aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/docs/main.js70
-rw-r--r--lib/std/Build/Cache.zig41
-rw-r--r--lib/std/Thread/Condition.zig11
-rw-r--r--lib/std/Uri.zig76
-rw-r--r--lib/std/c/freebsd.zig14
-rw-r--r--lib/std/crypto/Certificate.zig32
-rw-r--r--lib/std/crypto/Certificate/Bundle.zig37
-rw-r--r--lib/std/crypto/Certificate/Bundle/macos.zig4
-rw-r--r--lib/std/debug.zig85
-rw-r--r--lib/std/dynamic_library.zig4
-rw-r--r--lib/std/enums.zig2
-rw-r--r--lib/std/fmt.zig6
-rw-r--r--lib/std/http.zig9
-rw-r--r--lib/std/http/Client.zig720
-rw-r--r--lib/std/http/Headers.zig386
-rw-r--r--lib/std/http/Server.zig450
-rw-r--r--lib/std/http/protocol.zig2
-rw-r--r--lib/std/math/big/int.zig6
-rw-r--r--lib/std/mem.zig3
-rw-r--r--lib/std/os/windows/user32.zig12
-rw-r--r--lib/std/target/riscv.zig1
-rw-r--r--lib/std/zig/parser_test.zig24
-rw-r--r--lib/std/zig/render.zig11
23 files changed, 1339 insertions, 667 deletions
diff --git a/lib/docs/main.js b/lib/docs/main.js
index aa5e10e539..b25323af98 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -759,7 +759,7 @@ const NAV_MODES = {
docsSource = protoSrcNode.docs;
}
if (docsSource != null) {
- domTldDocs.innerHTML = markdown(docsSource);
+ domTldDocs.innerHTML = markdown(docsSource, fnDecl);
domTldDocs.classList.remove("hidden");
}
domFnProto.classList.remove("hidden");
@@ -1816,23 +1816,23 @@ const NAV_MODES = {
name = "struct { ";
}
}
- if (structObj.fields.length > 1 && opts.wantHtml) { name += "</br>"; }
+ if (structObj.field_types.length > 1 && opts.wantHtml) { name += "</br>"; }
let indent = "";
- if (structObj.fields.length > 1 && opts.wantHtml) {
+ if (structObj.field_types.length > 1 && opts.wantHtml) {
indent = "&nbsp;&nbsp;&nbsp;&nbsp;"
}
- if (opts.indent && structObj.fields.length > 1) {
+ if (opts.indent && structObj.field_types.length > 1) {
indent = opts.indent + indent;
}
let structNode = getAstNode(structObj.src);
let field_end = ",";
- if (structObj.fields.length > 1 && opts.wantHtml) {
+ if (structObj.field_types.length > 1 && opts.wantHtml) {
field_end += "</br>";
} else {
field_end += " ";
}
- for (let i = 0; i < structObj.fields.length; i += 1) {
+ for (let i = 0; i < structObj.field_types.length; i += 1) {
let fieldNode = getAstNode(structNode.fields[i]);
let fieldName = fieldNode.name;
let html = indent;
@@ -1840,18 +1840,22 @@ const NAV_MODES = {
html += escapeHtml(fieldName);
}
- let fieldTypeExpr = structObj.fields[i];
+ let fieldTypeExpr = structObj.field_types[i];
if (!structObj.is_tuple) {
html += ": ";
}
html += exprName(fieldTypeExpr, { ...opts, indent: indent });
+ if (structObj.field_defaults[i] !== null) {
+ html += " = " + exprName(structObj.field_defaults[i], opts);
+ }
+
html += field_end;
name += html;
}
- if (opts.indent && structObj.fields.length > 1) {
+ if (opts.indent && structObj.field_types.length > 1) {
name += opts.indent;
}
name += "}";
@@ -1897,6 +1901,10 @@ const NAV_MODES = {
let fieldName = fieldNode.name;
let html = indent + escapeHtml(fieldName);
+ if (enumObj.values[i] !== null) {
+ html += " = " + exprName(enumObj.values[i], opts);
+ }
+
html += field_end;
name += html;
@@ -2537,7 +2545,10 @@ const NAV_MODES = {
let docs = getAstNode(decl.src).docs;
if (docs != null) {
- domTldDocs.innerHTML = markdown(docs);
+ // TODO: it shouldn't just be decl.parent_container, but rather
+ // the type that the decl holds (if the value is a type)
+ domTldDocs.innerHTML = markdown(docs, getType(decl.parent_container));
+
domTldDocs.classList.remove("hidden");
}
@@ -3190,6 +3201,7 @@ const NAV_MODES = {
let declIndex = parentType.pubDecls[i];
let childDecl = getDecl(declIndex);
if (childDecl.name === childName) {
+ childDecl.find_subdecl_idx = declIndex;
return childDecl;
} else if (childDecl.is_uns) {
let declValue = resolveValue(childDecl.value);
@@ -3206,6 +3218,7 @@ const NAV_MODES = {
let declIndex = parentType.privDecls[i];
let childDecl = getDecl(declIndex);
if (childDecl.name === childName) {
+ childDecl.find_subdecl_idx = declIndex;
return childDecl;
} else if (childDecl.is_uns) {
let declValue = resolveValue(childDecl.value);
@@ -3622,12 +3635,39 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
const components = text.split(".");
let curDeclOrType = undefined;
- if (context) {
- curDeclOrType = findSubDecl(context, components[0]);
- if (curDeclOrType) {
+ let curContext = context;
+ let limit = 10000;
+ while (curContext) {
+ limit -= 1;
+
+ if (limit == 0) {
+ throw "too many iterations";
+ }
+
+ curDeclOrType = findSubDecl(curContext, components[0]);
+
+ if (!curDeclOrType) {
+ if (curContext.parent_container == null) break;
+ curContext = getType(curContext.parent_container);
+ continue;
+ }
+
+ if (curContext == context) {
separator = '.';
result = location.hash + separator + components[0];
+ } else {
+ // We had to go up, which means we need a new path!
+ const canonPath = getCanonDeclPath(curDeclOrType.find_subdecl_idx);
+ if (!canonPath) return;
+
+ let lastPkgName = canonPath.pkgNames[canonPath.pkgNames.length - 1];
+ let fullPath = lastPkgName + ":" + canonPath.declNames.join(".");
+
+ separator = '.';
+ result = "#A;" + fullPath;
}
+
+ break;
}
if (!curDeclOrType) {
@@ -4077,6 +4117,7 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
value: decl[3],
decltest: decl[4],
is_uns: decl[5],
+ parent_container: decl[6],
};
}
@@ -4145,7 +4186,7 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
field_defaults: ty[6],
is_tuple: ty[7],
line_number: ty[8],
- outer_decl: ty[9],
+ parent_container: ty[9],
};
case 10: // ComptimeExpr
case 11: // ComptimeFloat
@@ -4186,6 +4227,7 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
tag: ty[5],
values: ty[6],
nonexhaustive: ty[7],
+ parent_container: ty[8],
};
case 20: // Union
return {
@@ -4197,6 +4239,7 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
field_types: ty[5],
tag: ty[6],
auto_tag: ty[7],
+ parent_container: ty[8],
};
case 21: // Fn
return {
@@ -4226,6 +4269,7 @@ function addDeclToSearchResults(decl, declIndex, pkgNames, item, list, stack) {
src: ty[2],
privDecls: ty[3],
pubDecls: ty[4],
+ parent_container: ty[5],
};
case 24: // Frame
case 25: // AnyFrame
diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig
index f8c83451cb..3b67f4b24c 100644
--- a/lib/std/Build/Cache.zig
+++ b/lib/std/Build/Cache.zig
@@ -434,24 +434,29 @@ pub const Manifest = struct {
}
}
} else {
- if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
- .read = true,
- .truncate = false,
- .lock = .Exclusive,
- .lock_nonblocking = self.want_shared_lock,
- })) |manifest_file| {
- self.manifest_file = manifest_file;
- self.have_exclusive_lock = true;
- } else |err| switch (err) {
- // There are no dir components, so you would think that this was
- // unreachable, however we have observed on macOS two processes racing
- // to do openat() with O_CREAT manifest in ENOENT.
- error.WouldBlock, error.FileNotFound => {
- self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
- .lock = .Shared,
- });
- },
- else => |e| return e,
+ while (true) {
+ if (self.cache.manifest_dir.createFile(&manifest_file_path, .{
+ .read = true,
+ .truncate = false,
+ .lock = .Exclusive,
+ .lock_nonblocking = self.want_shared_lock,
+ })) |manifest_file| {
+ self.manifest_file = manifest_file;
+ self.have_exclusive_lock = true;
+ break;
+ } else |err| switch (err) {
+ error.WouldBlock => {
+ self.manifest_file = try self.cache.manifest_dir.openFile(&manifest_file_path, .{
+ .lock = .Shared,
+ });
+ break;
+ },
+ // There are no dir components, so you would think that this was
+ // unreachable, however we have observed on macOS two processes racing
+ // to do openat() with O_CREAT manifest in ENOENT.
+ error.FileNotFound => continue,
+ else => |e| return e,
+ }
}
}
diff --git a/lib/std/Thread/Condition.zig b/lib/std/Thread/Condition.zig
index 793779dbdb..09911df883 100644
--- a/lib/std/Thread/Condition.zig
+++ b/lib/std/Thread/Condition.zig
@@ -18,10 +18,11 @@
//! }
//!
//! fn producer() void {
-//! m.lock();
-//! defer m.unlock();
-//!
-//! predicate = true;
+//! {
+//! m.lock();
+//! defer m.unlock();
+//! predicate = true;
+//! }
//! c.signal();
//! }
//!
@@ -37,7 +38,7 @@
//! thread-1: condition.wait(&mutex)
//!
//! thread-2: // mutex.lock() (without this, the following signal may not see the waiting thread-1)
-//! thread-2: // mutex.unlock() (this is optional for correctness once locked above, as signal can be called without holding the mutex)
+//! thread-2: // mutex.unlock() (this is optional for correctness once locked above, as signal can be called while holding the mutex)
//! thread-2: condition.signal()
//! ```
diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig
index eb6311a19b..b010ce8662 100644
--- a/lib/std/Uri.zig
+++ b/lib/std/Uri.zig
@@ -27,6 +27,18 @@ pub fn escapeQuery(allocator: std.mem.Allocator, input: []const u8) error{OutOfM
return escapeStringWithFn(allocator, input, isQueryChar);
}
+pub fn writeEscapedString(writer: anytype, input: []const u8) !void {
+ return writeEscapedStringWithFn(writer, input, isUnreserved);
+}
+
+pub fn writeEscapedPath(writer: anytype, input: []const u8) !void {
+ return writeEscapedStringWithFn(writer, input, isPathChar);
+}
+
+pub fn writeEscapedQuery(writer: anytype, input: []const u8) !void {
+ return writeEscapedStringWithFn(writer, input, isQueryChar);
+}
+
pub fn escapeStringWithFn(allocator: std.mem.Allocator, input: []const u8, comptime keepUnescaped: fn (c: u8) bool) std.mem.Allocator.Error![]const u8 {
var outsize: usize = 0;
for (input) |c| {
@@ -52,6 +64,16 @@ pub fn escapeStringWithFn(allocator: std.mem.Allocator, input: []const u8, compt
return output;
}
+pub fn writeEscapedStringWithFn(writer: anytype, input: []const u8, comptime keepUnescaped: fn (c: u8) bool) @TypeOf(writer).Error!void {
+ for (input) |c| {
+ if (keepUnescaped(c)) {
+ try writer.writeByte(c);
+ } else {
+ try writer.print("%{X:0>2}", .{c});
+ }
+ }
+}
+
/// Parses a URI string and unescapes all %XX where XX is a valid hex number. Otherwise, verbatim copies
/// them to the output.
pub fn unescapeString(allocator: std.mem.Allocator, input: []const u8) error{OutOfMemory}![]const u8 {
@@ -184,6 +206,60 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri {
return uri;
}
+pub fn format(
+ uri: Uri,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) @TypeOf(writer).Error!void {
+ _ = options;
+
+ const needs_absolute = comptime std.mem.indexOf(u8, fmt, "+") != null;
+ const needs_path = comptime std.mem.indexOf(u8, fmt, "/") != null or fmt.len == 0;
+
+ if (needs_absolute) {
+ try writer.writeAll(uri.scheme);
+ try writer.writeAll(":");
+ if (uri.host) |host| {
+ try writer.writeAll("//");
+
+ if (uri.user) |user| {
+ try writer.writeAll(user);
+ if (uri.password) |password| {
+ try writer.writeAll(":");
+ try writer.writeAll(password);
+ }
+ try writer.writeAll("@");
+ }
+
+ try writer.writeAll(host);
+
+ if (uri.port) |port| {
+ try writer.writeAll(":");
+ try std.fmt.formatInt(port, 10, .lower, .{}, writer);
+ }
+ }
+ }
+
+ if (needs_path) {
+ if (uri.path.len == 0) {
+ try writer.writeAll("/");
+ } else {
+ try Uri.writeEscapedPath(writer, uri.path);
+ }
+
+ if (uri.query) |q| {
+ try writer.writeAll("?");
+ try Uri.writeEscapedQuery(writer, q);
+ }
+
+ if (uri.fragment) |f| {
+ try writer.writeAll("#");
+ try Uri.writeEscapedQuery(writer, f);
+ }
+ }
+}
+
/// Parses the URI or returns an error.
/// The return value will contain unescaped strings pointing into the
/// original `text`. Each component that is provided, will be non-`null`.
diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig
index 08d3d58425..f18a5bfa18 100644
--- a/lib/std/c/freebsd.zig
+++ b/lib/std/c/freebsd.zig
@@ -5,12 +5,21 @@ const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
const iovec_const = std.os.iovec_const;
+pub const CPU_SETSIZE = 256;
+pub const cpuset_t = extern struct {
+ __bits: [(CPU_SETSIZE + (@bitSizeOf(c_long) - 1)) / @bitSizeOf(c_long)]c_long,
+};
+pub const cpulevel_t = c_int;
+pub const cpuwhich_t = c_int;
+pub const id_t = i64;
+
extern "c" fn __error() *c_int;
pub const _errno = __error;
pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize;
+pub extern "c" fn getentropy(buf_ptr: [*]u8, buf_len: usize) c_int;
pub extern "c" fn pthread_getthreadid_np() c_int;
pub extern "c" fn pthread_set_name_np(thread: std.c.pthread_t, name: [*:0]const u8) void;
@@ -26,6 +35,9 @@ pub extern "c" fn getpid() pid_t;
pub extern "c" fn kinfo_getfile(pid: pid_t, cntp: *c_int) ?[*]kinfo_file;
pub extern "c" fn kinfo_getvmmap(pid: pid_t, cntp: *c_int) ?[*]kinfo_vmentry;
+pub extern "c" fn cpuset_getaffinity(level: cpulevel_t, which: cpuwhich_t, id: id_t, setsize: usize, mask: *cpuset_t) c_int;
+pub extern "c" fn cpuset_setaffinity(level: cpulevel_t, which: cpuwhich_t, id: id_t, setsize: usize, mask: *const cpuset_t) c_int;
+
pub const sf_hdtr = extern struct {
headers: [*]const iovec_const,
hdr_cnt: c_int,
@@ -203,8 +215,6 @@ pub const clock_t = isize;
pub const socklen_t = u32;
pub const suseconds_t = c_long;
-pub const id_t = i64;
-
/// Renamed from `kevent` to `Kevent` to avoid conflict with function name.
pub const Kevent = extern struct {
/// Identifier for this event.
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 22513f9efe..0caffba363 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -371,7 +371,9 @@ test "Parsed.checkHostName" {
try expectEqual(false, Parsed.checkHostName("lang.org", "zig*.org"));
}
-pub fn parse(cert: Certificate) !Parsed {
+pub const ParseError = der.Element.ParseElementError || ParseVersionError || ParseTimeError || ParseEnumError || ParseBitStringError;
+
+pub fn parse(cert: Certificate) ParseError!Parsed {
const cert_bytes = cert.buffer;
const certificate = try der.Element.parse(cert_bytes, cert.index);
const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start);
@@ -514,14 +516,18 @@ pub fn contents(cert: Certificate, elem: der.Element) []const u8 {
return cert.buffer[elem.slice.start..elem.slice.end];
}
+pub const ParseBitStringError = error{ CertificateFieldHasWrongDataType, CertificateHasInvalidBitString };
+
pub fn parseBitString(cert: Certificate, elem: der.Element) !der.Element.Slice {
if (elem.identifier.tag != .bitstring) return error.CertificateFieldHasWrongDataType;
if (cert.buffer[elem.slice.start] != 0) return error.CertificateHasInvalidBitString;
return .{ .start = elem.slice.start + 1, .end = elem.slice.end };
}
+pub const ParseTimeError = error{ CertificateTimeInvalid, CertificateFieldHasWrongDataType };
+
/// Returns number of seconds since epoch.
-pub fn parseTime(cert: Certificate, elem: der.Element) !u64 {
+pub fn parseTime(cert: Certificate, elem: der.Element) ParseTimeError!u64 {
const bytes = cert.contents(elem);
switch (elem.identifier.tag) {
.utc_time => {
@@ -647,34 +653,38 @@ test parseYear4 {
try expectError(error.CertificateTimeInvalid, parseYear4("crap"));
}
-pub fn parseAlgorithm(bytes: []const u8, element: der.Element) !Algorithm {
+pub fn parseAlgorithm(bytes: []const u8, element: der.Element) ParseEnumError!Algorithm {
return parseEnum(Algorithm, bytes, element);
}
-pub fn parseAlgorithmCategory(bytes: []const u8, element: der.Element) !AlgorithmCategory {
+pub fn parseAlgorithmCategory(bytes: []const u8, element: der.Element) ParseEnumError!AlgorithmCategory {
return parseEnum(AlgorithmCategory, bytes, element);
}
-pub fn parseAttribute(bytes: []const u8, element: der.Element) !Attribute {
+pub fn parseAttribute(bytes: []const u8, element: der.Element) ParseEnumError!Attribute {
return parseEnum(Attribute, bytes, element);
}
-pub fn parseNamedCurve(bytes: []const u8, element: der.Element) !NamedCurve {
+pub fn parseNamedCurve(bytes: []const u8, element: der.Element) ParseEnumError!NamedCurve {
return parseEnum(NamedCurve, bytes, element);
}
-pub fn parseExtensionId(bytes: []const u8, element: der.Element) !ExtensionId {
+pub fn parseExtensionId(bytes: []const u8, element: der.Element) ParseEnumError!ExtensionId {
return parseEnum(ExtensionId, bytes, element);
}
-fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) !E {
+pub const ParseEnumError = error{ CertificateFieldHasWrongDataType, CertificateHasUnrecognizedObjectId };
+
+fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) ParseEnumError!E {
if (element.identifier.tag != .object_identifier)
return error.CertificateFieldHasWrongDataType;
const oid_bytes = bytes[element.slice.start..element.slice.end];
return E.map.get(oid_bytes) orelse return error.CertificateHasUnrecognizedObjectId;
}
-pub fn parseVersion(bytes: []const u8, version_elem: der.Element) !Version {
+pub const ParseVersionError = error{ UnsupportedCertificateVersion, CertificateFieldHasInvalidLength };
+
+pub fn parseVersion(bytes: []const u8, version_elem: der.Element) ParseVersionError!Version {
if (@bitCast(u8, version_elem.identifier) != 0xa0)
return .v1;
@@ -861,9 +871,9 @@ pub const der = struct {
pub const empty: Slice = .{ .start = 0, .end = 0 };
};
- pub const ParseError = error{CertificateFieldHasInvalidLength};
+ pub const ParseElementError = error{CertificateFieldHasInvalidLength};
- pub fn parse(bytes: []const u8, index: u32) ParseError!Element {
+ pub fn parse(bytes: []const u8, index: u32) ParseElementError!Element {
var i = index;
const identifier = @bitCast(Identifier, bytes[i]);
i += 1;
diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig
index 1a5a45ae63..b3b5409d27 100644
--- a/lib/std/crypto/Certificate/Bundle.zig
+++ b/lib/std/crypto/Certificate/Bundle.zig
@@ -50,11 +50,13 @@ pub fn deinit(cb: *Bundle, gpa: Allocator) void {
cb.* = undefined;
}
+pub const RescanError = RescanLinuxError || RescanMacError || RescanWindowsError;
+
/// Clears the set of certificates and then scans the host operating system
/// file system standard locations for certificates.
/// For operating systems that do not have standard CA installations to be
/// found, this function clears the set of certificates.
-pub fn rescan(cb: *Bundle, gpa: Allocator) !void {
+pub fn rescan(cb: *Bundle, gpa: Allocator) RescanError!void {
switch (builtin.os.tag) {
.linux => return rescanLinux(cb, gpa),
.macos => return rescanMac(cb, gpa),
@@ -64,8 +66,11 @@ pub fn rescan(cb: *Bundle, gpa: Allocator) !void {
}
pub const rescanMac = @import("Bundle/macos.zig").rescanMac;
+pub const RescanMacError = @import("Bundle/macos.zig").RescanMacError;
+
+pub const RescanLinuxError = AddCertsFromFilePathError || AddCertsFromDirPathError;
-pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void {
+pub fn rescanLinux(cb: *Bundle, gpa: Allocator) RescanLinuxError!void {
// Possible certificate files; stop after finding one.
const cert_file_paths = [_][]const u8{
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
@@ -107,7 +112,9 @@ pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void {
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
-pub fn rescanWindows(cb: *Bundle, gpa: Allocator) !void {
+pub const RescanWindowsError = Allocator.Error || ParseCertError || std.os.UnexpectedError || error{FileNotFound};
+
+pub fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
@@ -132,12 +139,14 @@ pub fn rescanWindows(cb: *Bundle, gpa: Allocator) !void {
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
+pub const AddCertsFromDirPathError = fs.File.OpenError || AddCertsFromDirError;
+
pub fn addCertsFromDirPath(
cb: *Bundle,
gpa: Allocator,
dir: fs.Dir,
sub_dir_path: []const u8,
-) !void {
+) AddCertsFromDirPathError!void {
var iterable_dir = try dir.openIterableDir(sub_dir_path, .{});
defer iterable_dir.close();
return addCertsFromDir(cb, gpa, iterable_dir);
@@ -147,14 +156,16 @@ pub fn addCertsFromDirPathAbsolute(
cb: *Bundle,
gpa: Allocator,
abs_dir_path: []const u8,
-) !void {
+) AddCertsFromDirPathError!void {
assert(fs.path.isAbsolute(abs_dir_path));
var iterable_dir = try fs.openIterableDirAbsolute(abs_dir_path, .{});
defer iterable_dir.close();
return addCertsFromDir(cb, gpa, iterable_dir);
}
-pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir) !void {
+pub const AddCertsFromDirError = AddCertsFromFilePathError;
+
+pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir) AddCertsFromDirError!void {
var it = iterable_dir.iterate();
while (try it.next()) |entry| {
switch (entry.kind) {
@@ -166,11 +177,13 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir
}
}
+pub const AddCertsFromFilePathError = fs.File.OpenError || AddCertsFromFileError;
+
pub fn addCertsFromFilePathAbsolute(
cb: *Bundle,
gpa: Allocator,
abs_file_path: []const u8,
-) !void {
+) AddCertsFromFilePathError!void {
assert(fs.path.isAbsolute(abs_file_path));
var file = try fs.openFileAbsolute(abs_file_path, .{});
defer file.close();
@@ -182,13 +195,15 @@ pub fn addCertsFromFilePath(
gpa: Allocator,
dir: fs.Dir,
sub_file_path: []const u8,
-) !void {
+) AddCertsFromFilePathError!void {
var file = try dir.openFile(sub_file_path, .{});
defer file.close();
return addCertsFromFile(cb, gpa, file);
}
-pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) !void {
+pub const AddCertsFromFileError = Allocator.Error || fs.File.GetSeekPosError || fs.File.ReadError || ParseCertError || std.base64.Error || error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker };
+
+pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFromFileError!void {
const size = try file.getEndPos();
// We borrow `bytes` as a temporary buffer for the base64-encoded data.
@@ -222,7 +237,9 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) !void {
}
}
-pub fn parseCert(cb: *Bundle, gpa: Allocator, decoded_start: u32, now_sec: i64) !void {
+pub const ParseCertError = Allocator.Error || Certificate.ParseError;
+
+pub fn parseCert(cb: *Bundle, gpa: Allocator, decoded_start: u32, now_sec: i64) ParseCertError!void {
// Even though we could only partially parse the certificate to find
// the subject name, we pre-parse all of them to make sure and only
// include in the bundle ones that we know will parse. This way we can
diff --git a/lib/std/crypto/Certificate/Bundle/macos.zig b/lib/std/crypto/Certificate/Bundle/macos.zig
index 5260aa61a6..dba0520795 100644
--- a/lib/std/crypto/Certificate/Bundle/macos.zig
+++ b/lib/std/crypto/Certificate/Bundle/macos.zig
@@ -5,7 +5,9 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const Bundle = @import("../Bundle.zig");
-pub fn rescanMac(cb: *Bundle, gpa: Allocator) !void {
+pub const RescanMacError = Allocator.Error || fs.File.OpenError || fs.File.ReadError || fs.File.SeekError || Bundle.ParseCertError || error{EndOfStream};
+
+pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 933792add4..663540c182 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -183,6 +183,11 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
return;
};
const tty_config = detectTTYConfig(io.getStdErr());
+ if (native_os == .windows) {
+ writeCurrentStackTraceWindows(stderr, debug_info, tty_config, ip) catch return;
+ return;
+ }
+
printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return;
var it = StackIterator.init(null, bp);
while (it.next()) |return_address| {
@@ -595,7 +600,16 @@ pub noinline fn walkStackWindows(addresses: []usize) usize {
if (windows.ntdll.RtlLookupFunctionEntry(current_regs.ip, &image_base, &history_table)) |runtime_function| {
var handler_data: ?*anyopaque = null;
var establisher_frame: u64 = undefined;
- _ = windows.ntdll.RtlVirtualUnwind(windows.UNW_FLAG_NHANDLER, image_base, current_regs.ip, runtime_function, &context, &handler_data, &establisher_frame, null);
+ _ = windows.ntdll.RtlVirtualUnwind(
+ windows.UNW_FLAG_NHANDLER,
+ image_base,
+ current_regs.ip,
+ runtime_function,
+ &context,
+ &handler_data,
+ &establisher_frame,
+ null,
+ );
} else {
// leaf function
context.setIp(@intToPtr(*u64, current_regs.sp).*);
@@ -769,23 +783,29 @@ test "machoSearchSymbols" {
try testing.expectEqual(&symbols[2], machoSearchSymbols(&symbols, 5000).?);
}
+fn printUnknownSource(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
+ const module_name = debug_info.getModuleNameForAddress(address);
+ return printLineInfo(
+ out_stream,
+ null,
+ address,
+ "???",
+ module_name orelse "???",
+ tty_config,
+ printLineFromFileAnyOs,
+ );
+}
+
pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: anytype, address: usize, tty_config: TTY.Config) !void {
const module = debug_info.getModuleForAddress(address) catch |err| switch (err) {
- error.MissingDebugInfo, error.InvalidDebugInfo => {
- return printLineInfo(
- out_stream,
- null,
- address,
- "???",
- "???",
- tty_config,
- printLineFromFileAnyOs,
- );
- },
+ error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config),
else => return err,
};
- const symbol_info = try module.getSymbolAtAddress(debug_info.allocator, address);
+ const symbol_info = module.getSymbolAtAddress(debug_info.allocator, address) catch |err| switch (err) {
+ error.MissingDebugInfo, error.InvalidDebugInfo => return printUnknownSource(debug_info, out_stream, address, tty_config),
+ else => return err,
+ };
defer symbol_info.deinit(debug_info.allocator);
return printLineInfo(
@@ -1275,15 +1295,16 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
}
}
-pub const ModuleInfo = struct {
+pub const WindowsModuleInfo = struct {
base_address: usize,
size: u32,
+ name: []const u8,
};
pub const DebugInfo = struct {
allocator: mem.Allocator,
address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
- modules: if (native_os == .windows) std.ArrayListUnmanaged(ModuleInfo) else void,
+ modules: if (native_os == .windows) std.ArrayListUnmanaged(WindowsModuleInfo) else void,
pub fn init(allocator: mem.Allocator) !DebugInfo {
var debug_info = DebugInfo{
@@ -1299,7 +1320,6 @@ pub const DebugInfo = struct {
else => |err| return windows.unexpectedError(err),
}
}
-
defer windows.CloseHandle(handle);
var module_entry: windows.MODULEENTRY32 = undefined;
@@ -1313,6 +1333,7 @@ pub const DebugInfo = struct {
const module_info = try debug_info.modules.addOne(allocator);
module_info.base_address = @ptrToInt(module_entry.modBaseAddr);
module_info.size = module_entry.modBaseSize;
+ module_info.name = allocator.dupe(u8, mem.sliceTo(&module_entry.szModule, 0)) catch &.{};
module_valid = windows.kernel32.Module32Next(handle, &module_entry) == 1;
}
}
@@ -1328,7 +1349,12 @@ pub const DebugInfo = struct {
self.allocator.destroy(mdi);
}
self.address_map.deinit();
- if (native_os == .windows) self.modules.deinit(self.allocator);
+ if (native_os == .windows) {
+ for (self.modules.items) |module| {
+ self.allocator.free(module.name);
+ }
+ self.modules.deinit(self.allocator);
+ }
}
pub fn getModuleForAddress(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
@@ -1351,6 +1377,22 @@ pub const DebugInfo = struct {
}
}
+ pub fn getModuleNameForAddress(self: *DebugInfo, address: usize) ?[]const u8 {
+ if (builtin.zig_backend == .stage2_c) {
+ return null;
+ } else if (comptime builtin.target.isDarwin()) {
+ return null;
+ } else if (native_os == .windows) {
+ return self.lookupModuleNameWin32(address);
+ } else if (native_os == .haiku) {
+ return null;
+ } else if (comptime builtin.target.isWasm()) {
+ return null;
+ } else {
+ return null;
+ }
+ }
+
fn lookupModuleDyld(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
const image_count = std.c._dyld_image_count();
@@ -1428,6 +1470,15 @@ pub const DebugInfo = struct {
return error.MissingDebugInfo;
}
+ fn lookupModuleNameWin32(self: *DebugInfo, address: usize) ?[]const u8 {
+ for (self.modules.items) |module| {
+ if (address >= module.base_address and address < module.base_address + module.size) {
+ return module.name;
+ }
+ }
+ return null;
+ }
+
fn lookupModuleDl(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
var ctx: struct {
// Input
diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig
index 2ab798dcd7..099ba63204 100644
--- a/lib/std/dynamic_library.zig
+++ b/lib/std/dynamic_library.zig
@@ -348,7 +348,7 @@ pub const WindowsDynLib = struct {
pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T {
if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| {
- return @ptrCast(T, addr);
+ return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), addr));
} else {
return null;
}
@@ -382,7 +382,7 @@ pub const DlDynlib = struct {
// dlsym (and other dl-functions) secretly take shadow parameter - return address on stack
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826
if (@call(.never_tail, system.dlsym, .{ self.handle, name.ptr })) |symbol| {
- return @ptrCast(T, symbol);
+ return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), symbol));
} else {
return null;
}
diff --git a/lib/std/enums.zig b/lib/std/enums.zig
index c8c999d4b1..a915d40f49 100644
--- a/lib/std/enums.zig
+++ b/lib/std/enums.zig
@@ -32,7 +32,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
/// Looks up the supplied fields in the given enum type.
/// Uses only the field names, field values are ignored.
/// The result array is in the same order as the input.
-pub fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E {
+pub inline fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E {
comptime {
var result: [fields.len]E = undefined;
for (fields, 0..) |f, i| {
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 1658f8a502..3f697f8117 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -2005,7 +2005,7 @@ pub fn bufPrintIntToSlice(buf: []u8, value: anytype, base: u8, case: Case, optio
return buf[0..formatIntBuf(buf, value, base, case, options)];
}
-pub fn comptimePrint(comptime fmt: []const u8, args: anytype) *const [count(fmt, args):0]u8 {
+pub inline fn comptimePrint(comptime fmt: []const u8, args: anytype) *const [count(fmt, args):0]u8 {
comptime {
var buf: [count(fmt, args):0]u8 = undefined;
_ = bufPrint(&buf, fmt, args) catch unreachable;
@@ -2016,8 +2016,8 @@ pub fn comptimePrint(comptime fmt: []const u8, args: anytype) *const [count(fmt,
test "comptimePrint" {
@setEvalBranchQuota(2000);
- try std.testing.expectEqual(*const [3:0]u8, @TypeOf(comptime comptimePrint("{}", .{100})));
- try std.testing.expectEqualSlices(u8, "100", comptime comptimePrint("{}", .{100}));
+ try std.testing.expectEqual(*const [3:0]u8, @TypeOf(comptimePrint("{}", .{100})));
+ try std.testing.expectEqualSlices(u8, "100", comptimePrint("{}", .{100}));
}
test "parse u64 digit too big" {
diff --git a/lib/std/http.zig b/lib/std/http.zig
index 6e5f4e0cd9..364cc4eeda 100644
--- a/lib/std/http.zig
+++ b/lib/std/http.zig
@@ -1,6 +1,10 @@
pub const Client = @import("http/Client.zig");
pub const Server = @import("http/Server.zig");
pub const protocol = @import("http/protocol.zig");
+const headers = @import("http/Headers.zig");
+
+pub const Headers = headers.Headers;
+pub const Field = headers.Field;
pub const Version = enum {
@"HTTP/1.0",
@@ -265,11 +269,6 @@ pub const Connection = enum {
close,
};
-pub const CustomHeader = struct {
- name: []const u8,
- value: []const u8,
-};
-
const std = @import("std.zig");
test {
diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig
index 010c557f87..4ff29a215a 100644
--- a/lib/std/http/Client.zig
+++ b/lib/std/http/Client.zig
@@ -25,48 +25,7 @@ next_https_rescan_certs: bool = true,
/// The pool of connections that can be reused (and currently in use).
connection_pool: ConnectionPool = .{},
-/// The last error that occurred on this client. This is not threadsafe, do not expect it to be completely accurate.
-last_error: ?ExtraError = null,
-
-pub const ExtraError = union(enum) {
- fn impliedErrorSet(comptime f: anytype) type {
- const set = @typeInfo(@typeInfo(@TypeOf(f)).Fn.return_type.?).ErrorUnion.error_set;
- if (@typeName(set)[0] != '@') @compileError(@typeName(f) ++ " doesn't have an implied error set any more.");
- return set;
- }
-
- // There's apparently a dependency loop with using Client.DeflateDecompressor.
- const FakeTransferError = proto.HeadersParser.ReadError || error{ReadFailed};
- const FakeTransferReader = std.io.Reader(void, FakeTransferError, fakeRead);
- fn fakeRead(ctx: void, buf: []u8) FakeTransferError!usize {
- _ = .{ buf, ctx };
- return 0;
- }
-
- const FakeDeflateDecompressor = std.compress.zlib.ZlibStream(FakeTransferReader);
- const FakeGzipDecompressor = std.compress.gzip.Decompress(FakeTransferReader);
- const FakeZstdDecompressor = std.compress.zstd.DecompressStream(FakeTransferReader, .{});
-
- pub const TcpConnectError = std.net.TcpConnectToHostError;
- pub const TlsError = std.crypto.tls.Client.InitError(net.Stream);
- pub const WriteError = BufferedConnection.WriteError;
- pub const ReadError = BufferedConnection.ReadError || error{HttpChunkInvalid};
- pub const CaBundleError = impliedErrorSet(std.crypto.Certificate.Bundle.rescan);
-
- pub const ZlibInitError = error{ BadHeader, InvalidCompression, InvalidWindowSize, Unsupported, EndOfStream, OutOfMemory } || Request.TransferReadError;
- pub const GzipInitError = error{ BadHeader, InvalidCompression, OutOfMemory, WrongChecksum, EndOfStream, StreamTooLong } || Request.TransferReadError;
- // pub const DecompressError = Client.DeflateDecompressor.Error || Client.GzipDecompressor.Error || Client.ZstdDecompressor.Error;
- pub const DecompressError = FakeDeflateDecompressor.Error || FakeGzipDecompressor.Error || FakeZstdDecompressor.Error;
-
- zlib_init: ZlibInitError, // error.CompressionInitializationFailed
- gzip_init: GzipInitError, // error.CompressionInitializationFailed
- connect: TcpConnectError, // error.ConnectionFailed
- ca_bundle: CaBundleError, // error.CertificateAuthorityBundleFailed
- tls: TlsError, // error.TlsInitializationFailed
- write: WriteError, // error.WriteFailed
- read: ReadError, // error.ReadFailed
- decompress: DecompressError, // error.ReadFailed
-};
+proxy: ?HttpProxy = null,
/// A set of linked lists of connections that can be reused.
pub const ConnectionPool = struct {
@@ -82,6 +41,7 @@ pub const ConnectionPool = struct {
host: []u8,
port: u16,
+ proxied: bool = false,
closing: bool = false,
pub fn deinit(self: *StoredConnection, client: *Client) void {
@@ -158,7 +118,12 @@ pub const ConnectionPool = struct {
return client.allocator.destroy(popped);
}
- pool.free.append(node);
+ if (node.data.proxied) {
+ pool.free.prepend(node); // proxied connections go to the end of the queue, always try direct connections first
+ } else {
+ pool.free.append(node);
+ }
+
pool.free_len += 1;
}
@@ -202,30 +167,38 @@ pub const Connection = struct {
pub const Protocol = enum { plain, tls };
- pub fn read(conn: *Connection, buffer: []u8) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.read(buffer),
- .tls => return conn.tls_client.read(conn.stream, buffer),
- }
+ pub fn read(conn: *Connection, buffer: []u8) ReadError!usize {
+ return switch (conn.protocol) {
+ .plain => conn.stream.read(buffer),
+ .tls => conn.tls_client.read(conn.stream, buffer),
+ } catch |err| switch (err) {
+ error.TlsConnectionTruncated, error.TlsRecordOverflow, error.TlsDecodeError, error.TlsBadRecordMac, error.TlsBadLength, error.TlsIllegalParameter, error.TlsUnexpectedMessage => return error.TlsFailure,
+ error.TlsAlert => return error.TlsAlert,
+ error.ConnectionTimedOut => return error.ConnectionTimedOut,
+ error.ConnectionResetByPeer, error.BrokenPipe => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedReadFailure,
+ };
}
- pub fn readAtLeast(conn: *Connection, buffer: []u8, len: usize) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.readAtLeast(buffer, len),
- .tls => return conn.tls_client.readAtLeast(conn.stream, buffer, len),
- }
+ pub fn readAtLeast(conn: *Connection, buffer: []u8, len: usize) ReadError!usize {
+ return switch (conn.protocol) {
+ .plain => conn.stream.readAtLeast(buffer, len),
+ .tls => conn.tls_client.readAtLeast(conn.stream, buffer, len),
+ } catch |err| switch (err) {
+ error.TlsConnectionTruncated, error.TlsRecordOverflow, error.TlsDecodeError, error.TlsBadRecordMac, error.TlsBadLength, error.TlsIllegalParameter, error.TlsUnexpectedMessage => return error.TlsFailure,
+ error.TlsAlert => return error.TlsAlert,
+ error.ConnectionTimedOut => return error.ConnectionTimedOut,
+ error.ConnectionResetByPeer, error.BrokenPipe => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedReadFailure,
+ };
}
- pub const ReadError = net.Stream.ReadError || error{
- TlsConnectionTruncated,
- TlsRecordOverflow,
- TlsDecodeError,
+ pub const ReadError = error{
+ TlsFailure,
TlsAlert,
- TlsBadRecordMac,
- Overflow,
- TlsBadLength,
- TlsIllegalParameter,
- TlsUnexpectedMessage,
+ ConnectionTimedOut,
+ ConnectionResetByPeer,
+ UnexpectedReadFailure,
};
pub const Reader = std.io.Reader(*Connection, ReadError, read);
@@ -235,20 +208,30 @@ pub const Connection = struct {
}
pub fn writeAll(conn: *Connection, buffer: []const u8) !void {
- switch (conn.protocol) {
- .plain => return conn.stream.writeAll(buffer),
- .tls => return conn.tls_client.writeAll(conn.stream, buffer),
- }
+ return switch (conn.protocol) {
+ .plain => conn.stream.writeAll(buffer),
+ .tls => conn.tls_client.writeAll(conn.stream, buffer),
+ } catch |err| switch (err) {
+ error.BrokenPipe, error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedWriteFailure,
+ };
}
pub fn write(conn: *Connection, buffer: []const u8) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.write(buffer),
- .tls => return conn.tls_client.write(conn.stream, buffer),
- }
+ return switch (conn.protocol) {
+ .plain => conn.stream.write(buffer),
+ .tls => conn.tls_client.write(conn.stream, buffer),
+ } catch |err| switch (err) {
+ error.BrokenPipe, error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedWriteFailure,
+ };
}
- pub const WriteError = net.Stream.WriteError || error{};
+ pub const WriteError = error{
+ ConnectionResetByPeer,
+ UnexpectedWriteFailure,
+ };
+
pub const Writer = std.io.Writer(*Connection, WriteError, write);
pub fn writer(conn: *Connection) Writer {
@@ -371,140 +354,125 @@ pub const Compression = union(enum) {
/// A HTTP response originating from a server.
pub const Response = struct {
- pub const Headers = struct {
- status: http.Status,
- version: http.Version,
- location: ?[]const u8 = null,
- content_length: ?u64 = null,
- transfer_encoding: ?http.TransferEncoding = null,
- transfer_compression: ?http.ContentEncoding = null,
- connection: http.Connection = .close,
- upgrade: ?[]const u8 = null,
-
- pub const ParseError = error{
- ShortHttpStatusLine,
- BadHttpVersion,
- HttpHeadersInvalid,
- HttpHeaderContinuationsUnsupported,
- HttpTransferEncodingUnsupported,
- HttpConnectionHeaderUnsupported,
- InvalidContentLength,
- CompressionNotSupported,
- };
-
- pub fn parse(bytes: []const u8) ParseError!Headers {
- var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
-
- const first_line = it.next() orelse return error.HttpHeadersInvalid;
- if (first_line.len < 12)
- return error.ShortHttpStatusLine;
-
- const version: http.Version = switch (int64(first_line[0..8])) {
- int64("HTTP/1.0") => .@"HTTP/1.0",
- int64("HTTP/1.1") => .@"HTTP/1.1",
- else => return error.BadHttpVersion,
- };
- if (first_line[8] != ' ') return error.HttpHeadersInvalid;
- const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*));
-
- var headers: Headers = .{
- .version = version,
- .status = status,
- };
-
- while (it.next()) |line| {
- if (line.len == 0) return error.HttpHeadersInvalid;
- switch (line[0]) {
- ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
- else => {},
- }
+ pub const ParseError = Allocator.Error || error{
+ ShortHttpStatusLine,
+ BadHttpVersion,
+ HttpHeadersInvalid,
+ HttpHeaderContinuationsUnsupported,
+ HttpTransferEncodingUnsupported,
+ HttpConnectionHeaderUnsupported,
+ InvalidContentLength,
+ CompressionNotSupported,
+ };
- var line_it = mem.tokenize(u8, line, ": ");
- const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
- const header_value = line_it.rest();
- if (std.ascii.eqlIgnoreCase(header_name, "location")) {
- if (headers.location != null) return error.HttpHeadersInvalid;
- headers.location = header_value;
- } else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
- if (headers.content_length != null) return error.HttpHeadersInvalid;
- headers.content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
- } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
- // Transfer-Encoding: second, first
- // Transfer-Encoding: deflate, chunked
- var iter = mem.splitBackwards(u8, header_value, ",");
-
- if (iter.next()) |first| {
- const trimmed = mem.trim(u8, first, " ");
-
- if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
- if (headers.transfer_encoding != null) return error.HttpHeadersInvalid;
- headers.transfer_encoding = te;
- } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
- headers.transfer_compression = ce;
- } else {
- return error.HttpTransferEncodingUnsupported;
- }
- }
+ pub fn parse(res: *Response, bytes: []const u8) ParseError!void {
+ var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
- if (iter.next()) |second| {
- if (headers.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
+ const first_line = it.next() orelse return error.HttpHeadersInvalid;
+ if (first_line.len < 12)
+ return error.ShortHttpStatusLine;
- const trimmed = mem.trim(u8, second, " ");
+ const version: http.Version = switch (int64(first_line[0..8])) {
+ int64("HTTP/1.0") => .@"HTTP/1.0",
+ int64("HTTP/1.1") => .@"HTTP/1.1",
+ else => return error.BadHttpVersion,
+ };
+ if (first_line[8] != ' ') return error.HttpHeadersInvalid;
+ const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*));
+ const reason = mem.trimLeft(u8, first_line[12..], " ");
+
+ res.version = version;
+ res.status = status;
+ res.reason = reason;
+
+ while (it.next()) |line| {
+ if (line.len == 0) return error.HttpHeadersInvalid;
+ switch (line[0]) {
+ ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
+ else => {},
+ }
- if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- headers.transfer_compression = ce;
- } else {
- return error.HttpTransferEncodingUnsupported;
- }
+ var line_it = mem.tokenize(u8, line, ": ");
+ const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
+ const header_value = line_it.rest();
+
+ try res.headers.append(header_name, header_value);
+
+ if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+ if (res.content_length != null) return error.HttpHeadersInvalid;
+ res.content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+ // Transfer-Encoding: second, first
+ // Transfer-Encoding: deflate, chunked
+ var iter = mem.splitBackwards(u8, header_value, ",");
+
+ if (iter.next()) |first| {
+ const trimmed = mem.trim(u8, first, " ");
+
+ if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
+ if (res.transfer_encoding != null) return error.HttpHeadersInvalid;
+ res.transfer_encoding = te;
+ } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ if (res.transfer_compression != null) return error.HttpHeadersInvalid;
+ res.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
}
+ }
- if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
- } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
- if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+ if (iter.next()) |second| {
+ if (res.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
- const trimmed = mem.trim(u8, header_value, " ");
+ const trimmed = mem.trim(u8, second, " ");
if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- headers.transfer_compression = ce;
+ res.transfer_compression = ce;
} else {
return error.HttpTransferEncodingUnsupported;
}
- } else if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
- if (std.ascii.eqlIgnoreCase(header_value, "keep-alive")) {
- headers.connection = .keep_alive;
- } else if (std.ascii.eqlIgnoreCase(header_value, "close")) {
- headers.connection = .close;
- } else {
- return error.HttpConnectionHeaderUnsupported;
- }
- } else if (std.ascii.eqlIgnoreCase(header_name, "upgrade")) {
- headers.upgrade = header_value;
}
- }
- return headers;
- }
+ if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+ if (res.transfer_compression != null) return error.HttpHeadersInvalid;
- inline fn int64(array: *const [8]u8) u64 {
- return @bitCast(u64, array.*);
- }
+ const trimmed = mem.trim(u8, header_value, " ");
- fn parseInt3(nnn: @Vector(3, u8)) u10 {
- const zero: @Vector(3, u8) = .{ '0', '0', '0' };
- const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
- return @reduce(.Add, @as(@Vector(3, u10), nnn -% zero) *% mmm);
+ if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ res.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
+ }
}
+ }
- test parseInt3 {
- const expectEqual = testing.expectEqual;
- try expectEqual(@as(u10, 0), parseInt3("000".*));
- try expectEqual(@as(u10, 418), parseInt3("418".*));
- try expectEqual(@as(u10, 999), parseInt3("999".*));
- }
- };
+ inline fn int64(array: *const [8]u8) u64 {
+ return @bitCast(u64, array.*);
+ }
- headers: Headers = undefined,
+ fn parseInt3(nnn: @Vector(3, u8)) u10 {
+ const zero: @Vector(3, u8) = .{ '0', '0', '0' };
+ const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
+ return @reduce(.Add, @as(@Vector(3, u10), nnn -% zero) *% mmm);
+ }
+
+ test parseInt3 {
+ const expectEqual = testing.expectEqual;
+ try expectEqual(@as(u10, 0), parseInt3("000".*));
+ try expectEqual(@as(u10, 418), parseInt3("418".*));
+ try expectEqual(@as(u10, 999), parseInt3("999".*));
+ }
+
+ version: http.Version,
+ status: http.Status,
+ reason: []const u8,
+
+ content_length: ?u64 = null,
+ transfer_encoding: ?http.TransferEncoding = null,
+ transfer_compression: ?http.ContentEncoding = null,
+
+ headers: http.Headers,
parser: proto.HeadersParser,
compression: Compression = .none,
skip: bool = false,
@@ -514,22 +482,14 @@ pub const Response = struct {
///
/// Order of operations: request[ -> write -> finish] -> do -> read
pub const Request = struct {
- pub const Headers = struct {
- version: http.Version = .@"HTTP/1.1",
- method: http.Method = .GET,
- user_agent: []const u8 = "zig (std.http)",
- connection: http.Connection = .keep_alive,
- transfer_encoding: RequestTransfer = .none,
-
- custom: []const http.CustomHeader = &[_]http.CustomHeader{},
- };
-
uri: Uri,
client: *Client,
connection: *ConnectionPool.Node,
- /// These are stored in Request so that they are available when following
- /// redirects.
- headers: Headers,
+
+ method: http.Method,
+ version: http.Version = .@"HTTP/1.1",
+ headers: http.Headers,
+ transfer_encoding: RequestTransfer = .none,
redirects_left: u32,
handle_redirects: bool,
@@ -549,80 +509,104 @@ pub const Request = struct {
}
if (req.response.parser.header_bytes_owned) {
+ req.response.headers.deinit();
req.response.parser.header_bytes.deinit(req.client.allocator);
}
if (!req.response.parser.done) {
// If the response wasn't fully read, then we need to close the connection.
req.connection.data.closing = true;
- req.client.connection_pool.release(req.client, req.connection);
}
+ req.client.connection_pool.release(req.client, req.connection);
+
req.arena.deinit();
req.* = undefined;
}
- pub fn start(req: *Request, uri: Uri, headers: Headers) !void {
+ pub const StartError = BufferedConnection.WriteError || error{ InvalidContentLength, UnsupportedTransferEncoding };
+
+ /// Send the request to the server.
+ pub fn start(req: *Request) StartError!void {
var buffered = std.io.bufferedWriter(req.connection.data.buffered.writer());
const w = buffered.writer();
- const escaped_path = try Uri.escapePath(req.client.allocator, uri.path);
- defer req.client.allocator.free(escaped_path);
-
- const escaped_query = if (uri.query) |q| try Uri.escapeQuery(req.client.allocator, q) else null;
- defer if (escaped_query) |q| req.client.allocator.free(q);
+ try w.writeAll(@tagName(req.method));
+ try w.writeByte(' ');
- const escaped_fragment = if (uri.fragment) |f| try Uri.escapeQuery(req.client.allocator, f) else null;
- defer if (escaped_fragment) |f| req.client.allocator.free(f);
+ if (req.method == .CONNECT) {
+ try w.writeAll(req.uri.host.?);
+ try w.writeByte(':');
+ try w.print("{}", .{req.uri.port.?});
+ } else if (req.connection.data.proxied) {
+ // proxied connections require the full uri
+ try w.print("{+/}", .{req.uri});
+ } else {
+ try w.print("{/}", .{req.uri});
+ }
- try w.writeAll(@tagName(headers.method));
try w.writeByte(' ');
- if (escaped_path.len == 0) {
- try w.writeByte('/');
- } else {
- try w.writeAll(escaped_path);
+ try w.writeAll(@tagName(req.version));
+ try w.writeAll("\r\n");
+
+ if (!req.headers.contains("host")) {
+ try w.writeAll("Host: ");
+ try w.writeAll(req.uri.host.?);
+ try w.writeAll("\r\n");
}
- if (escaped_query) |q| {
- try w.writeByte('?');
- try w.writeAll(q);
+
+ if (!req.headers.contains("user-agent")) {
+ try w.writeAll("User-Agent: zig/");
+ try w.writeAll(@import("builtin").zig_version_string);
+ try w.writeAll(" (std.http)\r\n");
}
- if (escaped_fragment) |f| {
- try w.writeByte('#');
- try w.writeAll(f);
+
+ if (!req.headers.contains("connection")) {
+ try w.writeAll("Connection: keep-alive\r\n");
}
- try w.writeByte(' ');
- try w.writeAll(@tagName(headers.version));
- try w.writeAll("\r\nHost: ");
- try w.writeAll(uri.host.?);
- try w.writeAll("\r\nUser-Agent: ");
- try w.writeAll(headers.user_agent);
- if (headers.connection == .close) {
- try w.writeAll("\r\nConnection: close");
- } else {
- try w.writeAll("\r\nConnection: keep-alive");
+
+ if (!req.headers.contains("accept-encoding")) {
+ try w.writeAll("Accept-Encoding: gzip, deflate, zstd\r\n");
}
- try w.writeAll("\r\nAccept-Encoding: gzip, deflate, zstd");
- try w.writeAll("\r\nTE: gzip, deflate"); // TODO: add trailers when someone finds a nice way to integrate them without completely invalidating all pointers to headers.
- switch (headers.transfer_encoding) {
- .chunked => try w.writeAll("\r\nTransfer-Encoding: chunked"),
- .content_length => |content_length| try w.print("\r\nContent-Length: {d}", .{content_length}),
- .none => {},
+ if (!req.headers.contains("te")) {
+ try w.writeAll("TE: gzip, deflate, trailers\r\n");
}
- for (headers.custom) |header| {
- try w.writeAll("\r\n");
- try w.writeAll(header.name);
- try w.writeAll(": ");
- try w.writeAll(header.value);
+ const has_transfer_encoding = req.headers.contains("transfer-encoding");
+ const has_content_length = req.headers.contains("content-length");
+
+ if (!has_transfer_encoding and !has_content_length) {
+ switch (req.transfer_encoding) {
+ .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
+ .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
+ .none => {},
+ }
+ } else {
+ if (has_content_length) {
+ const content_length = std.fmt.parseInt(u64, req.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
+
+ req.transfer_encoding = .{ .content_length = content_length };
+ } else if (has_transfer_encoding) {
+ const transfer_encoding = req.headers.getFirstValue("content-length").?;
+ if (std.mem.eql(u8, transfer_encoding, "chunked")) {
+ req.transfer_encoding = .chunked;
+ } else {
+ return error.UnsupportedTransferEncoding;
+ }
+ } else {
+ req.transfer_encoding = .none;
+ }
}
- try w.writeAll("\r\n\r\n");
+ try w.print("{}", .{req.headers});
+
+ try w.writeAll("\r\n");
try buffered.flush();
}
- pub const TransferReadError = proto.HeadersParser.ReadError || error{ReadFailed};
+ pub const TransferReadError = BufferedConnection.ReadError || proto.HeadersParser.ReadError;
pub const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
@@ -635,10 +619,7 @@ pub const Request = struct {
var index: usize = 0;
while (index == 0) {
- const amt = req.response.parser.read(&req.connection.data.buffered, buf[index..], req.response.skip) catch |err| {
- req.client.last_error = .{ .read = err };
- return error.ReadFailed;
- };
+ const amt = try req.response.parser.read(&req.connection.data.buffered, buf[index..], req.response.skip);
if (amt == 0 and req.response.parser.done) break;
index += amt;
}
@@ -646,7 +627,7 @@ pub const Request = struct {
return index;
}
- pub const DoError = RequestError || TransferReadError || proto.HeadersParser.CheckCompleteHeadError || Response.Headers.ParseError || Uri.ParseError || error{ TooManyHttpRedirects, HttpRedirectMissingLocation, CompressionInitializationFailed };
+ pub const DoError = RequestError || TransferReadError || proto.HeadersParser.CheckCompleteHeadError || Response.ParseError || Uri.ParseError || error{ TooManyHttpRedirects, HttpRedirectMissingLocation, CompressionInitializationFailed, CompressionNotSupported };
/// Waits for a response from the server and parses any headers that are sent.
/// This function will block until the final response is received.
@@ -656,10 +637,7 @@ pub const Request = struct {
pub fn do(req: *Request) DoError!void {
while (true) { // handle redirects
while (true) { // read headers
- req.connection.data.buffered.fill() catch |err| {
- req.client.last_error = .{ .read = err };
- return error.ReadFailed;
- };
+ try req.connection.data.buffered.fill();
const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.data.buffered.peek());
req.connection.data.buffered.clear(@intCast(u16, nchecked));
@@ -667,27 +645,39 @@ pub const Request = struct {
if (req.response.parser.state.isContent()) break;
}
- req.response.headers = try Response.Headers.parse(req.response.parser.header_bytes.items);
+ req.response.headers = http.Headers{ .allocator = req.client.allocator, .owned = false };
+ try req.response.parse(req.response.parser.header_bytes.items);
+
+ if (req.response.status == .switching_protocols) {
+ req.connection.data.closing = false;
+ req.response.parser.done = true;
+ }
- if (req.response.headers.status == .switching_protocols) {
+ if (req.method == .CONNECT and req.response.status == .ok) {
req.connection.data.closing = false;
+ req.connection.data.proxied = true;
req.response.parser.done = true;
}
- if (req.headers.connection == .keep_alive and req.response.headers.connection == .keep_alive) {
+ const req_connection = req.headers.getFirstValue("connection");
+ const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
+
+ const res_connection = req.response.headers.getFirstValue("connection");
+ const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?);
+ if (req_keepalive and res_keepalive) {
req.connection.data.closing = false;
} else {
req.connection.data.closing = true;
}
- if (req.response.headers.transfer_encoding) |te| {
+ if (req.response.transfer_encoding) |te| {
switch (te) {
.chunked => {
req.response.parser.next_chunk_length = 0;
req.response.parser.state = .chunk_head_size;
},
}
- } else if (req.response.headers.content_length) |cl| {
+ } else if (req.response.content_length) |cl| {
req.response.parser.next_chunk_length = cl;
if (cl == 0) req.response.parser.done = true;
@@ -695,7 +685,7 @@ pub const Request = struct {
req.response.parser.done = true;
}
- if (req.response.headers.status.class() == .redirect and req.handle_redirects) {
+ if (req.response.status.class() == .redirect and req.handle_redirects) {
req.response.skip = true;
const empty = @as([*]u8, undefined)[0..0];
@@ -703,7 +693,7 @@ pub const Request = struct {
if (req.redirects_left == 0) return error.TooManyHttpRedirects;
- const location = req.response.headers.location orelse
+ const location = req.response.headers.getFirstValue("location") orelse
return error.HttpRedirectMissingLocation;
const new_url = Uri.parse(location) catch try Uri.parseWithoutScheme(location);
@@ -714,7 +704,8 @@ pub const Request = struct {
req.arena.deinit();
req.arena = new_arena;
- const new_req = try req.client.request(resolved_url, req.headers, .{
+ const new_req = try req.client.request(req.method, resolved_url, req.headers, .{
+ .version = req.version,
.max_redirects = req.redirects_left - 1,
.header_strategy = if (req.response.parser.header_bytes_owned) .{
.dynamic = req.response.parser.max_header_bytes,
@@ -727,19 +718,13 @@ pub const Request = struct {
} else {
req.response.skip = false;
if (!req.response.parser.done) {
- if (req.response.headers.transfer_compression) |tc| switch (tc) {
+ if (req.response.transfer_compression) |tc| switch (tc) {
.compress => return error.CompressionNotSupported,
.deflate => req.response.compression = .{
- .deflate = std.compress.zlib.zlibStream(req.client.allocator, req.transferReader()) catch |err| {
- req.client.last_error = .{ .zlib_init = err };
- return error.CompressionInitializationFailed;
- },
+ .deflate = std.compress.zlib.zlibStream(req.client.allocator, req.transferReader()) catch return error.CompressionInitializationFailed,
},
.gzip => req.response.compression = .{
- .gzip = std.compress.gzip.decompress(req.client.allocator, req.transferReader()) catch |err| {
- req.client.last_error = .{ .gzip_init = err };
- return error.CompressionInitializationFailed;
- },
+ .gzip = std.compress.gzip.decompress(req.client.allocator, req.transferReader()) catch return error.CompressionInitializationFailed,
},
.zstd => req.response.compression = .{
.zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
@@ -752,7 +737,7 @@ pub const Request = struct {
}
}
- pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError;
+ pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError || error{ DecompressionFailure, InvalidTrailers };
pub const Reader = std.io.Reader(*Request, ReadError, read);
@@ -762,57 +747,47 @@ pub const Request = struct {
/// Reads data from the response body. Must be called after `do`.
pub fn read(req: *Request, buffer: []u8) ReadError!usize {
- while (true) {
- const out_index = switch (req.response.compression) {
- .deflate => |*deflate| deflate.read(buffer) catch |err| {
- req.client.last_error = .{ .decompress = err };
- err catch {};
- return error.ReadFailed;
- },
- .gzip => |*gzip| gzip.read(buffer) catch |err| {
- req.client.last_error = .{ .decompress = err };
- err catch {};
- return error.ReadFailed;
- },
- .zstd => |*zstd| zstd.read(buffer) catch |err| {
- req.client.last_error = .{ .decompress = err };
- err catch {};
- return error.ReadFailed;
- },
- else => try req.transferRead(buffer),
- };
-
- if (out_index == 0) {
- while (!req.response.parser.state.isContent()) { // read trailing headers
- req.connection.data.buffered.fill() catch |err| {
- req.client.last_error = .{ .read = err };
- return error.ReadFailed;
- };
+ const out_index = switch (req.response.compression) {
+ .deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
+ .gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
+ .zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
+ else => try req.transferRead(buffer),
+ };
- const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.data.buffered.peek());
- req.connection.data.buffered.clear(@intCast(u16, nchecked));
- }
+ if (out_index == 0) {
+ const has_trail = !req.response.parser.state.isContent();
+
+ while (!req.response.parser.state.isContent()) { // read trailing headers
+ try req.connection.data.buffered.fill();
+
+ const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.data.buffered.peek());
+ req.connection.data.buffered.clear(@intCast(u16, nchecked));
}
- return out_index;
+ if (has_trail) {
+ req.response.headers = http.Headers{ .allocator = req.client.allocator, .owned = false };
+
+ // The response headers before the trailers are already guaranteed to be valid, so they will always be parsed again and cannot return an error.
+ // This will *only* fail for a malformed trailer.
+ req.response.parse(req.response.parser.header_bytes.items) catch return error.InvalidTrailers;
+ }
}
+
+ return out_index;
}
/// Reads data from the response body. Must be called after `do`.
pub fn readAll(req: *Request, buffer: []u8) !usize {
var index: usize = 0;
while (index < buffer.len) {
- const amt = read(req, buffer[index..]) catch |err| {
- req.client.last_error = .{ .read = err };
- return error.ReadFailed;
- };
+ const amt = try read(req, buffer[index..]);
if (amt == 0) break;
index += amt;
}
return index;
}
- pub const WriteError = error{ WriteFailed, NotWriteable, MessageTooLong };
+ pub const WriteError = BufferedConnection.WriteError || error{ NotWriteable, MessageTooLong };
pub const Writer = std.io.Writer(*Request, WriteError, write);
@@ -824,28 +799,16 @@ pub const Request = struct {
pub fn write(req: *Request, bytes: []const u8) WriteError!usize {
switch (req.headers.transfer_encoding) {
.chunked => {
- req.connection.data.conn.writer().print("{x}\r\n", .{bytes.len}) catch |err| {
- req.client.last_error = .{ .write = err };
- return error.WriteFailed;
- };
- req.connection.data.conn.writeAll(bytes) catch |err| {
- req.client.last_error = .{ .write = err };
- return error.WriteFailed;
- };
- req.connection.data.conn.writeAll("\r\n") catch |err| {
- req.client.last_error = .{ .write = err };
- return error.WriteFailed;
- };
+ try req.connection.data.conn.writer().print("{x}\r\n", .{bytes.len});
+ try req.connection.data.conn.writeAll(bytes);
+ try req.connection.data.conn.writeAll("\r\n");
return bytes.len;
},
.content_length => |*len| {
if (len.* < bytes.len) return error.MessageTooLong;
- const amt = req.connection.data.conn.write(bytes) catch |err| {
- req.client.last_error = .{ .write = err };
- return error.WriteFailed;
- };
+ const amt = try req.connection.data.conn.write(bytes);
len.* -= amt;
return amt;
},
@@ -853,19 +816,39 @@ pub const Request = struct {
}
}
+ pub fn writeAll(req: *Request, bytes: []const u8) WriteError!void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try write(req, bytes[index..]);
+ }
+ }
+
+ pub const FinishError = WriteError || error{MessageNotCompleted};
+
/// Finish the body of a request. This notifies the server that you have no more data to send.
- pub fn finish(req: *Request) !void {
- switch (req.headers.transfer_encoding) {
- .chunked => req.connection.data.conn.writeAll("0\r\n") catch |err| {
- req.client.last_error = .{ .write = err };
- return error.WriteFailed;
- },
+ pub fn finish(req: *Request) FinishError!void {
+ switch (req.transfer_encoding) {
+ .chunked => try req.connection.data.conn.writeAll("0\r\n\r\n"),
.content_length => |len| if (len != 0) return error.MessageNotCompleted,
.none => {},
}
}
};
+pub const HttpProxy = struct {
+ pub const ProxyAuthentication = union(enum) {
+ basic: []const u8,
+ custom: []const u8,
+ };
+
+ protocol: Connection.Protocol,
+ host: []const u8,
+ port: ?u16 = null,
+
+ /// The value for the Proxy-Authorization header.
+ auth: ?ProxyAuthentication = null,
+};
+
/// Release all associated resources with the client.
/// TODO: currently leaks all request allocated data
pub fn deinit(client: *Client) void {
@@ -875,11 +858,11 @@ pub fn deinit(client: *Client) void {
client.* = undefined;
}
-pub const ConnectError = Allocator.Error || error{ ConnectionFailed, TlsInitializationFailed };
+pub const ConnectUnproxiedError = Allocator.Error || error{ ConnectionRefused, NetworkUnreachable, ConnectionTimedOut, ConnectionResetByPeer, TemporaryNameServerFailure, NameServerFailure, UnknownHostName, HostLacksNetworkAddresses, UnexpectedConnectFailure, TlsInitializationFailed };
/// Connect to `host:port` using the specified protocol. This will reuse a connection if one is already open.
/// This function is threadsafe.
-pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*ConnectionPool.Node {
+pub fn connectUnproxied(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectUnproxiedError!*ConnectionPool.Node {
if (client.connection_pool.findConnection(.{
.host = host,
.port = port,
@@ -891,9 +874,16 @@ pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connectio
errdefer client.allocator.destroy(conn);
conn.* = .{ .data = undefined };
- const stream = net.tcpConnectToHost(client.allocator, host, port) catch |err| {
- client.last_error = .{ .connect = err };
- return error.ConnectionFailed;
+ const stream = net.tcpConnectToHost(client.allocator, host, port) catch |err| switch (err) {
+ error.ConnectionRefused => return error.ConnectionRefused,
+ error.NetworkUnreachable => return error.NetworkUnreachable,
+ error.ConnectionTimedOut => return error.ConnectionTimedOut,
+ error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
+ error.TemporaryNameServerFailure => return error.TemporaryNameServerFailure,
+ error.NameServerFailure => return error.NameServerFailure,
+ error.UnknownHostName => return error.UnknownHostName,
+ error.HostLacksNetworkAddresses => return error.HostLacksNetworkAddresses,
+ else => return error.UnexpectedConnectFailure,
};
errdefer stream.close();
@@ -914,10 +904,7 @@ pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connectio
conn.data.buffered.conn.tls_client = try client.allocator.create(std.crypto.tls.Client);
errdefer client.allocator.destroy(conn.data.buffered.conn.tls_client);
- conn.data.buffered.conn.tls_client.* = std.crypto.tls.Client.init(stream, client.ca_bundle, host) catch |err| {
- client.last_error = .{ .tls = err };
- return error.TlsInitializationFailed;
- };
+ conn.data.buffered.conn.tls_client.* = std.crypto.tls.Client.init(stream, client.ca_bundle, host) catch return error.TlsInitializationFailed;
// This is appropriate for HTTPS because the HTTP headers contain
// the content length which is used to detect truncation attacks.
conn.data.buffered.conn.tls_client.allow_truncation_attacks = true;
@@ -929,19 +916,51 @@ pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connectio
return conn;
}
-pub const RequestError = ConnectError || error{
+// Prevents a dependency loop in request()
+const ConnectErrorPartial = ConnectUnproxiedError || error{ UnsupportedUrlScheme, ConnectionRefused };
+pub const ConnectError = ConnectErrorPartial || RequestError;
+
+pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*ConnectionPool.Node {
+ if (client.connection_pool.findConnection(.{
+ .host = host,
+ .port = port,
+ .is_tls = protocol == .tls,
+ })) |node|
+ return node;
+
+ if (client.proxy) |proxy| {
+ const proxy_port: u16 = proxy.port orelse switch (proxy.protocol) {
+ .plain => 80,
+ .tls => 443,
+ };
+
+ const conn = try client.connectUnproxied(proxy.host, proxy_port, proxy.protocol);
+ conn.data.proxied = true;
+
+ return conn;
+ } else {
+ return client.connectUnproxied(host, port, protocol);
+ }
+}
+
+pub const RequestError = ConnectUnproxiedError || ConnectErrorPartial || Request.StartError || std.fmt.ParseIntError || BufferedConnection.WriteError || error{
UnsupportedUrlScheme,
UriMissingHost,
- CertificateAuthorityBundleFailed,
- WriteFailed,
+ CertificateBundleLoadFailure,
+ UnsupportedTransferEncoding,
};
pub const Options = struct {
+ version: http.Version = .@"HTTP/1.1",
+
handle_redirects: bool = true,
max_redirects: u32 = 3,
header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
+ /// Must be an already acquired connection.
+ connection: ?*ConnectionPool.Node = null,
+
pub const HeaderStrategy = union(enum) {
/// In this case, the client's Allocator will be used to store the
/// entire HTTP header. This value is the maximum total size of
@@ -965,7 +984,7 @@ pub const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
/// Form and send a http request to a server.
/// This function is threadsafe.
-pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Options) RequestError!Request {
+pub fn request(client: *Client, method: http.Method, uri: Uri, headers: http.Headers, options: Options) RequestError!Request {
const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUrlScheme;
const port: u16 = uri.port orelse switch (protocol) {
@@ -980,22 +999,27 @@ pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Opt
defer client.ca_bundle_mutex.unlock();
if (client.next_https_rescan_certs) {
- client.ca_bundle.rescan(client.allocator) catch |err| {
- client.last_error = .{ .ca_bundle = err };
- return error.CertificateAuthorityBundleFailed;
- };
+ client.ca_bundle.rescan(client.allocator) catch return error.CertificateBundleLoadFailure;
@atomicStore(bool, &client.next_https_rescan_certs, false, .Release);
}
}
+ const conn = options.connection orelse try client.connect(host, port, protocol);
+
var req: Request = .{
.uri = uri,
.client = client,
- .connection = try client.connect(host, port, protocol),
+ .connection = conn,
.headers = headers,
+ .method = method,
+ .version = options.version,
.redirects_left = options.max_redirects,
.handle_redirects = options.handle_redirects,
.response = .{
+ .status = undefined,
+ .reason = undefined,
+ .version = undefined,
+ .headers = undefined,
.parser = switch (options.header_strategy) {
.dynamic => |max| proto.HeadersParser.initDynamic(max),
.static => |buf| proto.HeadersParser.initStatic(buf),
@@ -1007,14 +1031,6 @@ pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Opt
req.arena = std.heap.ArenaAllocator.init(client.allocator);
- req.start(uri, headers) catch |err| {
- if (err == error.OutOfMemory) return error.OutOfMemory;
- const err_casted = @errSetCast(BufferedConnection.WriteError, err);
-
- client.last_error = .{ .write = err_casted };
- return error.WriteFailed;
- };
-
return req;
}
diff --git a/lib/std/http/Headers.zig b/lib/std/http/Headers.zig
new file mode 100644
index 0000000000..e84a890862
--- /dev/null
+++ b/lib/std/http/Headers.zig
@@ -0,0 +1,386 @@
+const std = @import("../std.zig");
+
+const Allocator = std.mem.Allocator;
+
+const testing = std.testing;
+const ascii = std.ascii;
+const assert = std.debug.assert;
+
+pub const HeaderList = std.ArrayListUnmanaged(Field);
+pub const HeaderIndexList = std.ArrayListUnmanaged(usize);
+pub const HeaderIndex = std.HashMapUnmanaged([]const u8, HeaderIndexList, CaseInsensitiveStringContext, std.hash_map.default_max_load_percentage);
+
+pub const CaseInsensitiveStringContext = struct {
+ pub fn hash(self: @This(), s: []const u8) u64 {
+ _ = self;
+ var buf: [64]u8 = undefined;
+ var i: u8 = 0;
+
+ var h = std.hash.Wyhash.init(0);
+ while (i < s.len) : (i += 64) {
+ const left = @min(64, s.len - i);
+ const ret = ascii.lowerString(buf[0..], s[i..][0..left]);
+ h.update(ret);
+ }
+
+ return h.final();
+ }
+
+ pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
+ _ = self;
+ return ascii.eqlIgnoreCase(a, b);
+ }
+};
+
+pub const Field = struct {
+ name: []const u8,
+ value: []const u8,
+
+ pub fn modify(entry: *Field, allocator: Allocator, new_value: []const u8) !void {
+ if (entry.value.len <= new_value.len) {
+ std.mem.copy(u8, @constCast(entry.value), new_value);
+ } else {
+ allocator.free(entry.value);
+
+ entry.value = try allocator.dupe(u8, new_value);
+ }
+ }
+
+ fn lessThan(ctx: void, a: Field, b: Field) bool {
+ _ = ctx;
+ if (a.name.ptr == b.name.ptr) return false;
+
+ return ascii.lessThanIgnoreCase(a.name, b.name);
+ }
+};
+
+pub const Headers = struct {
+ allocator: Allocator,
+ list: HeaderList = .{},
+ index: HeaderIndex = .{},
+
+ /// When this is false, names and values will not be duplicated.
+ /// Use with caution.
+ owned: bool = true,
+
+ pub fn init(allocator: Allocator) Headers {
+ return .{ .allocator = allocator };
+ }
+
+ pub fn deinit(headers: *Headers) void {
+ var it = headers.index.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.deinit(headers.allocator);
+
+ if (headers.owned) headers.allocator.free(entry.key_ptr.*);
+ }
+
+ for (headers.list.items) |entry| {
+ if (headers.owned) headers.allocator.free(entry.value);
+ }
+
+ headers.index.deinit(headers.allocator);
+ headers.list.deinit(headers.allocator);
+
+ headers.* = undefined;
+ }
+
+ /// Appends a header to the list. Both name and value are copied.
+ pub fn append(headers: *Headers, name: []const u8, value: []const u8) !void {
+ const n = headers.list.items.len;
+
+ const value_duped = if (headers.owned) try headers.allocator.dupe(u8, value) else value;
+ errdefer if (headers.owned) headers.allocator.free(value_duped);
+
+ var entry = Field{ .name = undefined, .value = value_duped };
+
+ if (headers.index.getEntry(name)) |kv| {
+ entry.name = kv.key_ptr.*;
+ try kv.value_ptr.append(headers.allocator, n);
+ } else {
+ const name_duped = if (headers.owned) try headers.allocator.dupe(u8, name) else name;
+ errdefer if (headers.owned) headers.allocator.free(name_duped);
+
+ entry.name = name_duped;
+
+ var new_index = try HeaderIndexList.initCapacity(headers.allocator, 1);
+ errdefer new_index.deinit(headers.allocator);
+
+ new_index.appendAssumeCapacity(n);
+ try headers.index.put(headers.allocator, name_duped, new_index);
+ }
+
+ try headers.list.append(headers.allocator, entry);
+ }
+
+ pub fn contains(headers: Headers, name: []const u8) bool {
+ return headers.index.contains(name);
+ }
+
+ pub fn delete(headers: *Headers, name: []const u8) bool {
+ if (headers.index.fetchRemove(name)) |kv| {
+ var index = kv.value;
+
+ // iterate backwards
+ var i = index.items.len;
+ while (i > 0) {
+ i -= 1;
+ const data_index = index.items[i];
+ const removed = headers.list.orderedRemove(data_index);
+
+ assert(ascii.eqlIgnoreCase(removed.name, name)); // ensure the index hasn't been corrupted
+ if (headers.owned) headers.allocator.free(removed.value);
+ }
+
+ if (headers.owned) headers.allocator.free(kv.key);
+ index.deinit(headers.allocator);
+ headers.rebuildIndex();
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /// Returns the index of the first occurrence of a header with the given name.
+ pub fn firstIndexOf(headers: Headers, name: []const u8) ?usize {
+ const index = headers.index.get(name) orelse return null;
+
+ return index.items[0];
+ }
+
+ /// Returns a list of indices containing headers with the given name.
+ pub fn getIndices(headers: Headers, name: []const u8) ?[]const usize {
+ const index = headers.index.get(name) orelse return null;
+
+ return index.items;
+ }
+
+ /// Returns the entry of the first occurrence of a header with the given name.
+ pub fn getFirstEntry(headers: Headers, name: []const u8) ?Field {
+ const first_index = headers.firstIndexOf(name) orelse return null;
+
+ return headers.list.items[first_index];
+ }
+
+ /// Returns a slice containing each header with the given name.
+ /// The caller owns the returned slice, but NOT the values in the slice.
+ pub fn getEntries(headers: Headers, allocator: Allocator, name: []const u8) !?[]const Field {
+ const indices = headers.getIndices(name) orelse return null;
+
+ const buf = try allocator.alloc(Field, indices.len);
+ for (indices, 0..) |idx, n| {
+ buf[n] = headers.list.items[idx];
+ }
+
+ return buf;
+ }
+
+ /// Returns the value in the entry of the first occurrence of a header with the given name.
+ pub fn getFirstValue(headers: Headers, name: []const u8) ?[]const u8 {
+ const first_index = headers.firstIndexOf(name) orelse return null;
+
+ return headers.list.items[first_index].value;
+ }
+
+ /// Returns a slice containing the value of each header with the given name.
+ /// The caller owns the returned slice, but NOT the values in the slice.
+ pub fn getValues(headers: Headers, allocator: Allocator, name: []const u8) !?[]const []const u8 {
+ const indices = headers.getIndices(name) orelse return null;
+
+ const buf = try allocator.alloc([]const u8, indices.len);
+ for (indices, 0..) |idx, n| {
+ buf[n] = headers.list.items[idx].value;
+ }
+
+ return buf;
+ }
+
+ fn rebuildIndex(headers: *Headers) void {
+ // clear out the indexes
+ var it = headers.index.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.shrinkRetainingCapacity(0);
+ }
+
+ // fill up indexes again; we know capacity is fine from before
+ for (headers.list.items, 0..) |entry, i| {
+ headers.index.getEntry(entry.name).?.value_ptr.appendAssumeCapacity(i);
+ }
+ }
+
+ /// Sorts the headers in lexicographical order.
+ pub fn sort(headers: *Headers) void {
+ std.sort.sort(Field, headers.list.items, {}, Field.lessThan);
+ headers.rebuildIndex();
+ }
+
+ /// Writes the headers to the given stream.
+ pub fn format(
+ headers: Headers,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ out_stream: anytype,
+ ) !void {
+ _ = fmt;
+ _ = options;
+
+ for (headers.list.items) |entry| {
+ if (entry.value.len == 0) continue;
+
+ try out_stream.writeAll(entry.name);
+ try out_stream.writeAll(": ");
+ try out_stream.writeAll(entry.value);
+ try out_stream.writeAll("\r\n");
+ }
+ }
+
+ /// Writes all of the headers with the given name to the given stream, separated by commas.
+ ///
+ /// This is useful for headers like `Set-Cookie` which can have multiple values. RFC 9110, Section 5.2
+ pub fn formatCommaSeparated(
+ headers: Headers,
+ name: []const u8,
+ out_stream: anytype,
+ ) !void {
+ const indices = headers.getIndices(name) orelse return;
+
+ try out_stream.writeAll(name);
+ try out_stream.writeAll(": ");
+
+ for (indices, 0..) |idx, n| {
+ if (n != 0) try out_stream.writeAll(", ");
+ try out_stream.writeAll(headers.list.items[idx].value);
+ }
+
+ try out_stream.writeAll("\r\n");
+ }
+};
+
+test "Headers.append" {
+ var h = Headers{ .allocator = std.testing.allocator };
+ defer h.deinit();
+
+ try h.append("foo", "bar");
+ try h.append("hello", "world");
+
+ try testing.expect(h.contains("Foo"));
+ try testing.expect(!h.contains("Bar"));
+}
+
+test "Headers.delete" {
+ var h = Headers{ .allocator = std.testing.allocator };
+ defer h.deinit();
+
+ try h.append("foo", "bar");
+ try h.append("hello", "world");
+
+ try testing.expect(h.contains("Foo"));
+
+ _ = h.delete("Foo");
+
+ try testing.expect(!h.contains("foo"));
+}
+
+test "Headers consistency" {
+ var h = Headers{ .allocator = std.testing.allocator };
+ defer h.deinit();
+
+ try h.append("foo", "bar");
+ try h.append("hello", "world");
+ _ = h.delete("Foo");
+
+ try h.append("foo", "bar");
+ try h.append("bar", "world");
+ try h.append("foo", "baz");
+ try h.append("baz", "hello");
+
+ try testing.expectEqual(@as(?usize, 0), h.firstIndexOf("hello"));
+ try testing.expectEqual(@as(?usize, 1), h.firstIndexOf("foo"));
+ try testing.expectEqual(@as(?usize, 2), h.firstIndexOf("bar"));
+ try testing.expectEqual(@as(?usize, 4), h.firstIndexOf("baz"));
+ try testing.expectEqual(@as(?usize, null), h.firstIndexOf("pog"));
+
+ try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("hello").?);
+ try testing.expectEqualSlices(usize, &[_]usize{ 1, 3 }, h.getIndices("foo").?);
+ try testing.expectEqualSlices(usize, &[_]usize{2}, h.getIndices("bar").?);
+ try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("baz").?);
+ try testing.expectEqual(@as(?[]const usize, null), h.getIndices("pog"));
+
+ try testing.expectEqualStrings("world", h.getFirstEntry("hello").?.value);
+ try testing.expectEqualStrings("bar", h.getFirstEntry("foo").?.value);
+ try testing.expectEqualStrings("world", h.getFirstEntry("bar").?.value);
+ try testing.expectEqualStrings("hello", h.getFirstEntry("baz").?.value);
+
+ const hello_entries = (try h.getEntries(testing.allocator, "hello")).?;
+ defer testing.allocator.free(hello_entries);
+ try testing.expectEqualDeep(@as([]const Field, &[_]Field{
+ .{ .name = "hello", .value = "world" },
+ }), hello_entries);
+
+ const foo_entries = (try h.getEntries(testing.allocator, "foo")).?;
+ defer testing.allocator.free(foo_entries);
+ try testing.expectEqualDeep(@as([]const Field, &[_]Field{
+ .{ .name = "foo", .value = "bar" },
+ .{ .name = "foo", .value = "baz" },
+ }), foo_entries);
+
+ const bar_entries = (try h.getEntries(testing.allocator, "bar")).?;
+ defer testing.allocator.free(bar_entries);
+ try testing.expectEqualDeep(@as([]const Field, &[_]Field{
+ .{ .name = "bar", .value = "world" },
+ }), bar_entries);
+
+ const baz_entries = (try h.getEntries(testing.allocator, "baz")).?;
+ defer testing.allocator.free(baz_entries);
+ try testing.expectEqualDeep(@as([]const Field, &[_]Field{
+ .{ .name = "baz", .value = "hello" },
+ }), baz_entries);
+
+ const pog_entries = (try h.getEntries(testing.allocator, "pog"));
+ try testing.expectEqual(@as(?[]const Field, null), pog_entries);
+
+ try testing.expectEqualStrings("world", h.getFirstValue("hello").?);
+ try testing.expectEqualStrings("bar", h.getFirstValue("foo").?);
+ try testing.expectEqualStrings("world", h.getFirstValue("bar").?);
+ try testing.expectEqualStrings("hello", h.getFirstValue("baz").?);
+ try testing.expectEqual(@as(?[]const u8, null), h.getFirstValue("pog"));
+
+ const hello_values = (try h.getValues(testing.allocator, "hello")).?;
+ defer testing.allocator.free(hello_values);
+ try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), hello_values);
+
+ const foo_values = (try h.getValues(testing.allocator, "foo")).?;
+ defer testing.allocator.free(foo_values);
+ try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{ "bar", "baz" }), foo_values);
+
+ const bar_values = (try h.getValues(testing.allocator, "bar")).?;
+ defer testing.allocator.free(bar_values);
+ try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), bar_values);
+
+ const baz_values = (try h.getValues(testing.allocator, "baz")).?;
+ defer testing.allocator.free(baz_values);
+ try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"hello"}), baz_values);
+
+ const pog_values = (try h.getValues(testing.allocator, "pog"));
+ try testing.expectEqual(@as(?[]const []const u8, null), pog_values);
+
+ h.sort();
+
+ try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("bar").?);
+ try testing.expectEqualSlices(usize, &[_]usize{1}, h.getIndices("baz").?);
+ try testing.expectEqualSlices(usize, &[_]usize{ 2, 3 }, h.getIndices("foo").?);
+ try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("hello").?);
+
+ const formatted_values = try std.fmt.allocPrint(testing.allocator, "{}", .{h});
+ defer testing.allocator.free(formatted_values);
+
+ try testing.expectEqualStrings("bar: world\r\nbaz: hello\r\nfoo: bar\r\nfoo: baz\r\nhello: world\r\n", formatted_values);
+
+ var buf: [128]u8 = undefined;
+ var fbs = std.io.fixedBufferStream(&buf);
+ const writer = fbs.writer();
+
+ try h.formatCommaSeparated("foo", writer);
+ try testing.expectEqualStrings("foo: bar, baz\r\n", fbs.getWritten());
+}
diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig
index 85fbf25265..94efb94d79 100644
--- a/lib/std/http/Server.zig
+++ b/lib/std/http/Server.zig
@@ -23,21 +23,33 @@ pub const Connection = struct {
pub const Protocol = enum { plain };
- pub fn read(conn: *Connection, buffer: []u8) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.read(buffer),
+ pub fn read(conn: *Connection, buffer: []u8) ReadError!usize {
+ return switch (conn.protocol) {
+ .plain => conn.stream.read(buffer),
// .tls => return conn.tls_client.read(conn.stream, buffer),
- }
+ } catch |err| switch (err) {
+ error.ConnectionTimedOut => return error.ConnectionTimedOut,
+ error.ConnectionResetByPeer, error.BrokenPipe => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedReadFailure,
+ };
}
- pub fn readAtLeast(conn: *Connection, buffer: []u8, len: usize) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.readAtLeast(buffer, len),
+ pub fn readAtLeast(conn: *Connection, buffer: []u8, len: usize) ReadError!usize {
+ return switch (conn.protocol) {
+ .plain => conn.stream.readAtLeast(buffer, len),
// .tls => return conn.tls_client.readAtLeast(conn.stream, buffer, len),
- }
+ } catch |err| switch (err) {
+ error.ConnectionTimedOut => return error.ConnectionTimedOut,
+ error.ConnectionResetByPeer, error.BrokenPipe => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedReadFailure,
+ };
}
- pub const ReadError = net.Stream.ReadError;
+ pub const ReadError = error{
+ ConnectionTimedOut,
+ ConnectionResetByPeer,
+ UnexpectedReadFailure,
+ };
pub const Reader = std.io.Reader(*Connection, ReadError, read);
@@ -45,21 +57,31 @@ pub const Connection = struct {
return Reader{ .context = conn };
}
- pub fn writeAll(conn: *Connection, buffer: []const u8) !void {
- switch (conn.protocol) {
- .plain => return conn.stream.writeAll(buffer),
+ pub fn writeAll(conn: *Connection, buffer: []const u8) WriteError!void {
+ return switch (conn.protocol) {
+ .plain => conn.stream.writeAll(buffer),
// .tls => return conn.tls_client.writeAll(conn.stream, buffer),
- }
+ } catch |err| switch (err) {
+ error.BrokenPipe, error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedWriteFailure,
+ };
}
- pub fn write(conn: *Connection, buffer: []const u8) !usize {
- switch (conn.protocol) {
- .plain => return conn.stream.write(buffer),
+ pub fn write(conn: *Connection, buffer: []const u8) WriteError!usize {
+ return switch (conn.protocol) {
+ .plain => conn.stream.write(buffer),
// .tls => return conn.tls_client.write(conn.stream, buffer),
- }
+ } catch |err| switch (err) {
+ error.BrokenPipe, error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
+ else => return error.UnexpectedWriteFailure,
+ };
}
- pub const WriteError = net.Stream.WriteError || error{};
+ pub const WriteError = error{
+ ConnectionResetByPeer,
+ UnexpectedWriteFailure,
+ };
+
pub const Writer = std.io.Writer(*Connection, WriteError, write);
pub fn writer(conn: *Connection) Writer {
@@ -155,136 +177,142 @@ pub const BufferedConnection = struct {
}
};
-/// A HTTP request originating from a client.
-pub const Request = struct {
- pub const Headers = struct {
- method: http.Method,
- target: []const u8,
- version: http.Version,
- content_length: ?u64 = null,
- transfer_encoding: ?http.TransferEncoding = null,
- transfer_compression: ?http.ContentEncoding = null,
- connection: http.Connection = .close,
- host: ?[]const u8 = null,
-
- pub const ParseError = error{
- ShortHttpStatusLine,
- BadHttpVersion,
- UnknownHttpMethod,
- HttpHeadersInvalid,
- HttpHeaderContinuationsUnsupported,
- HttpTransferEncodingUnsupported,
- HttpConnectionHeaderUnsupported,
- InvalidCharacter,
- };
+/// The mode of transport for responses.
+pub const ResponseTransfer = union(enum) {
+ content_length: u64,
+ chunked: void,
+ none: void,
+};
- pub fn parse(bytes: []const u8) !Headers {
- var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
+/// The decompressor for request messages.
+pub const Compression = union(enum) {
+ pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Response.TransferReader);
+ pub const GzipDecompressor = std.compress.gzip.Decompress(Response.TransferReader);
+ pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Response.TransferReader, .{});
- const first_line = it.next() orelse return error.HttpHeadersInvalid;
- if (first_line.len < 10)
- return error.ShortHttpStatusLine;
+ deflate: DeflateDecompressor,
+ gzip: GzipDecompressor,
+ zstd: ZstdDecompressor,
+ none: void,
+};
- const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
- const method_str = first_line[0..method_end];
- const method = std.meta.stringToEnum(http.Method, method_str) orelse return error.UnknownHttpMethod;
+/// A HTTP request originating from a client.
+pub const Request = struct {
+ pub const ParseError = Allocator.Error || error{
+ ShortHttpStatusLine,
+ BadHttpVersion,
+ UnknownHttpMethod,
+ HttpHeadersInvalid,
+ HttpHeaderContinuationsUnsupported,
+ HttpTransferEncodingUnsupported,
+ HttpConnectionHeaderUnsupported,
+ InvalidContentLength,
+ CompressionNotSupported,
+ };
- const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
- if (version_start == method_end) return error.HttpHeadersInvalid;
+ pub fn parse(req: *Request, bytes: []const u8) ParseError!void {
+ var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
- const version_str = first_line[version_start + 1 ..];
- if (version_str.len != 8) return error.HttpHeadersInvalid;
- const version: http.Version = switch (int64(version_str[0..8])) {
- int64("HTTP/1.0") => .@"HTTP/1.0",
- int64("HTTP/1.1") => .@"HTTP/1.1",
- else => return error.BadHttpVersion,
- };
+ const first_line = it.next() orelse return error.HttpHeadersInvalid;
+ if (first_line.len < 10)
+ return error.ShortHttpStatusLine;
- const target = first_line[method_end + 1 .. version_start];
+ const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+ const method_str = first_line[0..method_end];
+ const method = std.meta.stringToEnum(http.Method, method_str) orelse return error.UnknownHttpMethod;
- var headers: Headers = .{
- .method = method,
- .target = target,
- .version = version,
- };
+ const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+ if (version_start == method_end) return error.HttpHeadersInvalid;
- while (it.next()) |line| {
- if (line.len == 0) return error.HttpHeadersInvalid;
- switch (line[0]) {
- ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
- else => {},
- }
+ const version_str = first_line[version_start + 1 ..];
+ if (version_str.len != 8) return error.HttpHeadersInvalid;
+ const version: http.Version = switch (int64(version_str[0..8])) {
+ int64("HTTP/1.0") => .@"HTTP/1.0",
+ int64("HTTP/1.1") => .@"HTTP/1.1",
+ else => return error.BadHttpVersion,
+ };
- var line_it = mem.tokenize(u8, line, ": ");
- const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
- const header_value = line_it.rest();
- if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
- if (headers.content_length != null) return error.HttpHeadersInvalid;
- headers.content_length = try std.fmt.parseInt(u64, header_value, 10);
- } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
- // Transfer-Encoding: second, first
- // Transfer-Encoding: deflate, chunked
- var iter = mem.splitBackwards(u8, header_value, ",");
-
- if (iter.next()) |first| {
- const trimmed = mem.trim(u8, first, " ");
-
- if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
- if (headers.transfer_encoding != null) return error.HttpHeadersInvalid;
- headers.transfer_encoding = te;
- } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
- headers.transfer_compression = ce;
- } else {
- return error.HttpTransferEncodingUnsupported;
- }
- }
+ const target = first_line[method_end + 1 .. version_start];
- if (iter.next()) |second| {
- if (headers.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
+ req.method = method;
+ req.target = target;
+ req.version = version;
- const trimmed = mem.trim(u8, second, " ");
+ while (it.next()) |line| {
+ if (line.len == 0) return error.HttpHeadersInvalid;
+ switch (line[0]) {
+ ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
+ else => {},
+ }
- if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- headers.transfer_compression = ce;
- } else {
- return error.HttpTransferEncodingUnsupported;
- }
+ var line_it = mem.tokenize(u8, line, ": ");
+ const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
+ const header_value = line_it.rest();
+
+ try req.headers.append(header_name, header_value);
+
+ if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+ if (req.content_length != null) return error.HttpHeadersInvalid;
+ req.content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+ // Transfer-Encoding: second, first
+ // Transfer-Encoding: deflate, chunked
+ var iter = mem.splitBackwards(u8, header_value, ",");
+
+ if (iter.next()) |first| {
+ const trimmed = mem.trim(u8, first, " ");
+
+ if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
+ if (req.transfer_encoding != null) return error.HttpHeadersInvalid;
+ req.transfer_encoding = te;
+ } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ if (req.transfer_compression != null) return error.HttpHeadersInvalid;
+ req.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
}
+ }
- if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
- } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
- if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+ if (iter.next()) |second| {
+ if (req.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
- const trimmed = mem.trim(u8, header_value, " ");
+ const trimmed = mem.trim(u8, second, " ");
if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- headers.transfer_compression = ce;
+ req.transfer_compression = ce;
} else {
return error.HttpTransferEncodingUnsupported;
}
- } else if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
- if (std.ascii.eqlIgnoreCase(header_value, "keep-alive")) {
- headers.connection = .keep_alive;
- } else if (std.ascii.eqlIgnoreCase(header_value, "close")) {
- headers.connection = .close;
- } else {
- return error.HttpConnectionHeaderUnsupported;
- }
- } else if (std.ascii.eqlIgnoreCase(header_name, "host")) {
- headers.host = header_value;
}
- }
- return headers;
- }
+ if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+ if (req.transfer_compression != null) return error.HttpHeadersInvalid;
+
+ const trimmed = mem.trim(u8, header_value, " ");
- inline fn int64(array: *const [8]u8) u64 {
- return @bitCast(u64, array.*);
+ if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ req.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
+ }
}
- };
+ }
- headers: Headers = undefined,
+ inline fn int64(array: *const [8]u8) u64 {
+ return @bitCast(u64, array.*);
+ }
+
+ method: http.Method,
+ target: []const u8,
+ version: http.Version,
+
+ content_length: ?u64 = null,
+ transfer_encoding: ?http.TransferEncoding = null,
+ transfer_compression: ?http.ContentEncoding = null,
+
+ headers: http.Headers = undefined,
parser: proto.HeadersParser,
compression: Compression = .none,
};
@@ -295,23 +323,17 @@ pub const Request = struct {
/// Order of operations: accept -> wait -> do [ -> write -> finish][ -> reset /]
/// \ -> read /
pub const Response = struct {
- pub const Headers = struct {
- version: http.Version = .@"HTTP/1.1",
- status: http.Status = .ok,
- reason: ?[]const u8 = null,
+ version: http.Version = .@"HTTP/1.1",
+ status: http.Status = .ok,
+ reason: ?[]const u8 = null,
- server: ?[]const u8 = "zig (std.http)",
- connection: http.Connection = .keep_alive,
- transfer_encoding: RequestTransfer = .none,
-
- custom: []const http.CustomHeader = &[_]http.CustomHeader{},
- };
+ transfer_encoding: ResponseTransfer = .none,
server: *Server,
address: net.Address,
connection: BufferedConnection,
- headers: Headers = .{},
+ headers: http.Headers,
request: Request,
/// Reset this response to its initial state. This must be called before handling a second request on the same connection.
@@ -341,46 +363,61 @@ pub const Response = struct {
}
}
+ pub const DoError = BufferedConnection.WriteError || error{ UnsupportedTransferEncoding, InvalidContentLength };
+
/// Send the response headers.
pub fn do(res: *Response) !void {
var buffered = std.io.bufferedWriter(res.connection.writer());
const w = buffered.writer();
- try w.writeAll(@tagName(res.headers.version));
+ try w.writeAll(@tagName(res.version));
try w.writeByte(' ');
- try w.print("{d}", .{@enumToInt(res.headers.status)});
+ try w.print("{d}", .{@enumToInt(res.status)});
try w.writeByte(' ');
- if (res.headers.reason) |reason| {
+ if (res.reason) |reason| {
try w.writeAll(reason);
- } else if (res.headers.status.phrase()) |phrase| {
+ } else if (res.status.phrase()) |phrase| {
try w.writeAll(phrase);
}
+ try w.writeAll("\r\n");
- if (res.headers.server) |server| {
- try w.writeAll("\r\nServer: ");
- try w.writeAll(server);
+ if (!res.headers.contains("server")) {
+ try w.writeAll("Server: zig (std.http)\r\n");
}
- if (res.headers.connection == .close) {
- try w.writeAll("\r\nConnection: close");
- } else {
- try w.writeAll("\r\nConnection: keep-alive");
+ if (!res.headers.contains("connection")) {
+ try w.writeAll("Connection: keep-alive\r\n");
}
- switch (res.headers.transfer_encoding) {
- .chunked => try w.writeAll("\r\nTransfer-Encoding: chunked"),
- .content_length => |content_length| try w.print("\r\nContent-Length: {d}", .{content_length}),
- .none => {},
- }
+ const has_transfer_encoding = res.headers.contains("transfer-encoding");
+ const has_content_length = res.headers.contains("content-length");
- for (res.headers.custom) |header| {
- try w.writeAll("\r\n");
- try w.writeAll(header.name);
- try w.writeAll(": ");
- try w.writeAll(header.value);
+ if (!has_transfer_encoding and !has_content_length) {
+ switch (res.transfer_encoding) {
+ .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
+ .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
+ .none => {},
+ }
+ } else {
+ if (has_content_length) {
+ const content_length = std.fmt.parseInt(u64, res.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
+
+ res.transfer_encoding = .{ .content_length = content_length };
+ } else if (has_transfer_encoding) {
+ const transfer_encoding = res.headers.getFirstValue("content-length").?;
+ if (std.mem.eql(u8, transfer_encoding, "chunked")) {
+ res.transfer_encoding = .chunked;
+ } else {
+ return error.UnsupportedTransferEncoding;
+ }
+ } else {
+ res.transfer_encoding = .none;
+ }
}
- try w.writeAll("\r\n\r\n");
+ try w.print("{}", .{res.headers});
+
+ try w.writeAll("\r\n");
try buffered.flush();
}
@@ -393,23 +430,23 @@ pub const Response = struct {
return .{ .context = res };
}
- pub fn transferRead(res: *Response, buf: []u8) TransferReadError!usize {
- if (res.request.parser.isComplete()) return 0;
+ fn transferRead(res: *Response, buf: []u8) TransferReadError!usize {
+ if (res.request.parser.done) return 0;
var index: usize = 0;
while (index == 0) {
const amt = try res.request.parser.read(&res.connection, buf[index..], false);
- if (amt == 0 and res.request.parser.isComplete()) break;
+ if (amt == 0 and res.request.parser.done) break;
index += amt;
}
return index;
}
- pub const WaitForCompleteHeadError = BufferedConnection.ReadError || proto.HeadersParser.WaitForCompleteHeadError || Request.Headers.ParseError || error{ BadHeader, InvalidCompression, StreamTooLong, InvalidWindowSize } || error{CompressionNotSupported};
+ pub const WaitError = BufferedConnection.ReadError || proto.HeadersParser.CheckCompleteHeadError || Request.ParseError || error{ CompressionInitializationFailed, CompressionNotSupported };
/// Wait for the client to send a complete request head.
- pub fn wait(res: *Response) !void {
+ pub fn wait(res: *Response) WaitError!void {
while (true) {
try res.connection.fill();
@@ -419,22 +456,28 @@ pub const Response = struct {
if (res.request.parser.state.isContent()) break;
}
- res.request.headers = try Request.Headers.parse(res.request.parser.header_bytes.items);
+ res.request.headers = .{ .allocator = res.server.allocator, .owned = true };
+ try res.request.parse(res.request.parser.header_bytes.items);
+
+ const res_connection = res.headers.getFirstValue("connection");
+ const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?);
- if (res.headers.connection == .keep_alive and res.request.headers.connection == .keep_alive) {
+ const req_connection = res.request.headers.getFirstValue("connection");
+ const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
+ if (res_keepalive and req_keepalive) {
res.connection.conn.closing = false;
} else {
res.connection.conn.closing = true;
}
- if (res.request.headers.transfer_encoding) |te| {
+ if (res.request.transfer_encoding) |te| {
switch (te) {
.chunked => {
res.request.parser.next_chunk_length = 0;
res.request.parser.state = .chunk_head_size;
},
}
- } else if (res.request.headers.content_length) |cl| {
+ } else if (res.request.content_length) |cl| {
res.request.parser.next_chunk_length = cl;
if (cl == 0) res.request.parser.done = true;
@@ -443,13 +486,13 @@ pub const Response = struct {
}
if (!res.request.parser.done) {
- if (res.request.headers.transfer_compression) |tc| switch (tc) {
+ if (res.request.transfer_compression) |tc| switch (tc) {
.compress => return error.CompressionNotSupported,
.deflate => res.request.compression = .{
- .deflate = try std.compress.zlib.zlibStream(res.server.allocator, res.transferReader()),
+ .deflate = std.compress.zlib.zlibStream(res.server.allocator, res.transferReader()) catch return error.CompressionInitializationFailed,
},
.gzip => res.request.compression = .{
- .gzip = try std.compress.gzip.decompress(res.server.allocator, res.transferReader()),
+ .gzip = std.compress.gzip.decompress(res.server.allocator, res.transferReader()) catch return error.CompressionInitializationFailed,
},
.zstd => res.request.compression = .{
.zstd = std.compress.zstd.decompressStream(res.server.allocator, res.transferReader()),
@@ -458,7 +501,7 @@ pub const Response = struct {
}
}
- pub const ReadError = Compression.DeflateDecompressor.Error || Compression.GzipDecompressor.Error || Compression.ZstdDecompressor.Error || WaitForCompleteHeadError;
+ pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError || error{DecompressionFailure};
pub const Reader = std.io.Reader(*Response, ReadError, read);
@@ -467,12 +510,33 @@ pub const Response = struct {
}
pub fn read(res: *Response, buffer: []u8) ReadError!usize {
- return switch (res.request.compression) {
- .deflate => |*deflate| try deflate.read(buffer),
- .gzip => |*gzip| try gzip.read(buffer),
- .zstd => |*zstd| try zstd.read(buffer),
+ const out_index = switch (res.request.compression) {
+ .deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
+ .gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
+ .zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
else => try res.transferRead(buffer),
};
+
+ if (out_index == 0) {
+ const has_trail = !res.request.parser.state.isContent();
+
+ while (!res.request.parser.state.isContent()) { // read trailing headers
+ try res.connection.fill();
+
+ const nchecked = try res.request.parser.checkCompleteHead(res.server.allocator, res.connection.peek());
+ res.connection.clear(@intCast(u16, nchecked));
+ }
+
+ if (has_trail) {
+ res.request.headers = http.Headers{ .allocator = res.server.allocator, .owned = false };
+
+ // The response headers before the trailers are already guaranteed to be valid, so they will always be parsed again and cannot return an error.
+ // This will *only* fail for a malformed trailer.
+ res.request.parse(res.request.parser.header_bytes.items) catch return error.InvalidTrailers;
+ }
+ }
+
+ return out_index;
}
pub fn readAll(res: *Response, buffer: []u8) !usize {
@@ -495,7 +559,7 @@ pub const Response = struct {
/// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent.
pub fn write(res: *Response, bytes: []const u8) WriteError!usize {
- switch (res.headers.transfer_encoding) {
+ switch (res.transfer_encoding) {
.chunked => {
try res.connection.writer().print("{x}\r\n", .{bytes.len});
try res.connection.writeAll(bytes);
@@ -514,35 +578,25 @@ pub const Response = struct {
}
}
+ pub fn writeAll(req: *Request, bytes: []const u8) WriteError!void {
+ var index: usize = 0;
+ while (index < bytes.len) {
+ index += try write(req, bytes[index..]);
+ }
+ }
+
+ pub const FinishError = WriteError || error{MessageNotCompleted};
+
/// Finish the body of a request. This notifies the server that you have no more data to send.
- pub fn finish(res: *Response) !void {
- switch (res.headers.transfer_encoding) {
- .chunked => try res.connection.writeAll("0\r\n"),
+ pub fn finish(res: *Response) FinishError!void {
+ switch (res.transfer_encoding) {
+ .chunked => try res.connection.writeAll("0\r\n\r\n"),
.content_length => |len| if (len != 0) return error.MessageNotCompleted,
.none => {},
}
}
};
-/// The mode of transport for responses.
-pub const RequestTransfer = union(enum) {
- content_length: u64,
- chunked: void,
- none: void,
-};
-
-/// The decompressor for request messages.
-pub const Compression = union(enum) {
- pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Response.TransferReader);
- pub const GzipDecompressor = std.compress.gzip.Decompress(Response.TransferReader);
- pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Response.TransferReader, .{});
-
- deflate: DeflateDecompressor,
- gzip: GzipDecompressor,
- zstd: ZstdDecompressor,
- none: void,
-};
-
pub fn init(allocator: Allocator, options: net.StreamServer.Options) Server {
return .{
.allocator = allocator,
@@ -588,7 +642,11 @@ pub fn accept(server: *Server, options: HeaderStrategy) AcceptError!*Response {
.stream = in.stream,
.protocol = .plain,
} },
+ .headers = .{ .allocator = server.allocator },
.request = .{
+ .version = undefined,
+ .method = undefined,
+ .target = undefined,
.parser = switch (options) {
.dynamic => |max| proto.HeadersParser.initDynamic(max),
.static => |buf| proto.HeadersParser.initStatic(buf),
diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig
index 5e63d3092b..0d661bb31f 100644
--- a/lib/std/http/protocol.zig
+++ b/lib/std/http/protocol.zig
@@ -1,4 +1,4 @@
-const std = @import("std");
+const std = @import("../std.zig");
const testing = std.testing;
const mem = std.mem;
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 4e4e7c489e..aa2fbd48dd 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -935,9 +935,6 @@ pub const Mutable = struct {
/// The upper bound for r limb count is `b.limbs.len`.
/// The upper bound for q limb count is given by `a.limbs`.
///
- /// If `allocator` is provided, it will be used for temporary storage to improve
- /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- ///
/// `limbs_buffer` is used for temporary storage. The amount required is given by `calcDivLimbsBufferLen`.
pub fn divFloor(
q: *Mutable,
@@ -1065,9 +1062,6 @@ pub const Mutable = struct {
/// The upper bound for r limb count is `b.limbs.len`.
/// The upper bound for q limb count is given by `a.limbs.len`.
///
- /// If `allocator` is provided, it will be used for temporary storage to improve
- /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- ///
/// `limbs_buffer` is used for temporary storage. The amount required is given by `calcDivLimbsBufferLen`.
pub fn divTrunc(
q: *Mutable,
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index fcdcc71282..6b48b7ebde 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -3492,8 +3492,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type {
if (comptime !trait.is(.Pointer)(B) or
(meta.Child(B) != [size]u8 and meta.Child(B) != [size:0]u8))
{
- comptime var buf: [100]u8 = undefined;
- @compileError(std.fmt.bufPrint(&buf, "expected *[{}]u8, passed " ++ @typeName(B), .{size}) catch unreachable);
+ @compileError(std.fmt.comptimePrint("expected *[{}]u8, passed " ++ @typeName(B), .{size}));
}
return CopyPtrAttrs(B, .One, T);
diff --git a/lib/std/os/windows/user32.zig b/lib/std/os/windows/user32.zig
index 2566d0179f..211b9d8462 100644
--- a/lib/std/os/windows/user32.zig
+++ b/lib/std/os/windows/user32.zig
@@ -28,13 +28,11 @@ const POINT = windows.POINT;
const HCURSOR = windows.HCURSOR;
const HBRUSH = windows.HBRUSH;
-fn selectSymbol(comptime function_static: anytype, function_dynamic: *const @TypeOf(function_static), comptime os: std.Target.Os.WindowsVersion) *const @TypeOf(function_static) {
- comptime {
- const sym_ok = builtin.os.isAtLeast(.windows, os);
- if (sym_ok == true) return function_static;
- if (sym_ok == null) return function_dynamic;
- if (sym_ok == false) @compileError("Target OS range does not support function, at least " ++ @tagName(os) ++ " is required");
- }
+inline fn selectSymbol(comptime function_static: anytype, function_dynamic: *const @TypeOf(function_static), comptime os: std.Target.Os.WindowsVersion) *const @TypeOf(function_static) {
+ const sym_ok = comptime builtin.os.isAtLeast(.windows, os);
+ if (sym_ok == true) return function_static;
+ if (sym_ok == null) return function_dynamic;
+ if (sym_ok == false) @compileError("Target OS range does not support function, at least " ++ @tagName(os) ++ " is required");
}
// === Messages ===
diff --git a/lib/std/target/riscv.zig b/lib/std/target/riscv.zig
index 58ae3588de..0e72fa6e96 100644
--- a/lib/std/target/riscv.zig
+++ b/lib/std/target/riscv.zig
@@ -748,6 +748,7 @@ pub const cpu = struct {
.name = "baseline_rv32",
.llvm_name = null,
.features = featureSet(&[_]Feature{
+ .@"32bit",
.a,
.c,
.d,
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 1afc0e2e18..5ad8f8a07e 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -18,6 +18,19 @@ test "zig fmt: transform old for loop syntax to new" {
);
}
+test "zig fmt: remove extra whitespace at start and end of file with comment between" {
+ try testTransform(
+ \\
+ \\
+ \\// hello
+ \\
+ \\
+ ,
+ \\// hello
+ \\
+ );
+}
+
test "zig fmt: tuple struct" {
try testCanonical(
\\const T = struct {
@@ -527,17 +540,6 @@ test "zig fmt: allow empty line before commment at start of block" {
);
}
-test "zig fmt: allow empty line before commment at start of block" {
- try testCanonical(
- \\test {
- \\
- \\ // foo
- \\ const x = 42;
- \\}
- \\
- );
-}
-
test "zig fmt: trailing comma in fn parameter list" {
try testCanonical(
\\pub fn f(
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 61d789c3c4..2cb1d170b4 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -2512,7 +2512,7 @@ const Space = enum {
/// In either case, a newline will be inserted afterwards.
semicolon,
/// Skip rendering whitespace and comments. If this is used, the caller
- /// *must* handle handle whitespace and comments manually.
+ /// *must* handle whitespace and comments manually.
skip,
};
@@ -2702,7 +2702,7 @@ fn renderIdentifierContents(writer: anytype, bytes: []const u8) !void {
}
},
0x00...('\\' - 1), ('\\' + 1)...0x7f => {
- const buf = [1]u8{@intCast(u8, byte)};
+ const buf = [1]u8{byte};
try std.fmt.format(writer, "{}", .{std.zig.fmtEscapes(&buf)});
pos += 1;
},
@@ -2782,7 +2782,7 @@ fn renderComments(ais: *Ais, tree: Ast, start: usize, end: usize) Error!bool {
const comment_content = mem.trimLeft(u8, trimmed_comment["//".len..], &std.ascii.whitespace);
if (ais.disabled_offset != null and mem.eql(u8, comment_content, "zig fmt: on")) {
// Write the source for which formatting was disabled directly
- // to the underlying writer, fixing up invaild whitespace.
+ // to the underlying writer, fixing up invalid whitespace.
const disabled_source = tree.source[ais.disabled_offset.?..comment_start];
try writeFixingWhitespace(ais.underlying_writer, disabled_source);
// Write with the canonical single space.
@@ -2799,7 +2799,10 @@ fn renderComments(ais: *Ais, tree: Ast, start: usize, end: usize) Error!bool {
}
if (index != start and mem.containsAtLeast(u8, tree.source[index - 1 .. end], 2, "\n")) {
- try ais.insertNewline();
+ // Don't leave any whitespace at the end of the file
+ if (end != tree.source.len) {
+ try ais.insertNewline();
+ }
}
return index != start;