aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/std/build.zig33
-rw-r--r--lib/std/c.zig8
-rw-r--r--lib/std/c/darwin.zig14
-rw-r--r--lib/std/compress/deflate.zig110
-rw-r--r--lib/std/compress/zlib.zig13
-rw-r--r--lib/std/crypto.zig37
-rw-r--r--lib/std/crypto/25519/curve25519.zig8
-rw-r--r--lib/std/crypto/25519/ed25519.zig106
-rw-r--r--lib/std/crypto/25519/edwards25519.zig6
-rw-r--r--lib/std/crypto/25519/ristretto255.zig12
-rw-r--r--lib/std/crypto/25519/x25519.zig114
-rw-r--r--lib/std/crypto/aegis.zig6
-rw-r--r--lib/std/crypto/aes/aesni.zig12
-rw-r--r--lib/std/crypto/aes_gcm.zig6
-rw-r--r--lib/std/crypto/bcrypt.zig303
-rw-r--r--lib/std/crypto/benchmark.zig33
-rw-r--r--lib/std/crypto/blake2.zig24
-rw-r--r--lib/std/crypto/blake3.zig235
-rw-r--r--lib/std/crypto/chacha20.zig16
-rw-r--r--lib/std/crypto/gimli.zig8
-rw-r--r--lib/std/crypto/hkdf.zig6
-rw-r--r--lib/std/crypto/hmac.zig39
-rw-r--r--lib/std/crypto/salsa20.zig622
-rw-r--r--lib/std/elf.zig10
-rw-r--r--lib/std/event/loop.zig13
-rw-r--r--lib/std/fmt.zig7
-rw-r--r--lib/std/io/auto_indenting_stream.zig6
-rw-r--r--lib/std/io/change_detection_stream.zig6
-rw-r--r--lib/std/io/find_byte_out_stream.zig6
-rw-r--r--lib/std/math.zig23
-rw-r--r--lib/std/mem.zig14
-rw-r--r--lib/std/net.zig46
-rw-r--r--lib/std/net/test.zig75
-rw-r--r--lib/std/os.zig64
-rw-r--r--lib/std/os/bits/linux.zig58
-rw-r--r--lib/std/os/bits/linux/mips.zig3
-rw-r--r--lib/std/os/bits/linux/powerpc64.zig6
-rw-r--r--lib/std/os/bits/openbsd.zig4
-rw-r--r--lib/std/os/bits/windows.zig35
-rw-r--r--lib/std/os/linux.zig7
-rw-r--r--lib/std/os/linux/io_uring.zig1218
-rw-r--r--lib/std/os/linux/powerpc64.zig6
-rw-r--r--lib/std/os/test.zig2
-rw-r--r--lib/std/os/windows/ws2_32.zig42
-rw-r--r--lib/std/special/docs/main.js35
-rw-r--r--lib/std/testing.zig7
46 files changed, 3063 insertions, 401 deletions
diff --git a/lib/std/build.zig b/lib/std/build.zig
index e88a1a42ba..a1ac3f88f2 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -907,6 +907,9 @@ pub const Builder = struct {
install_dir: InstallDir,
dest_rel_path: []const u8,
) *InstallFileStep {
+ if (dest_rel_path.len == 0) {
+ panic("dest_rel_path must be non-empty", .{});
+ }
const install_step = self.allocator.create(InstallFileStep) catch unreachable;
install_step.* = InstallFileStep.init(self, src_path, install_dir, dest_rel_path);
return install_step;
@@ -1816,7 +1819,7 @@ pub const LibExeObjStep = struct {
},
else => {},
}
- out.print("pub const {z} = {};\n", .{ name, value }) catch unreachable;
+ out.print("pub const {z}: {} = {};\n", .{ name, @typeName(T), value }) catch unreachable;
}
/// The value is the path in the cache dir.
@@ -2751,6 +2754,34 @@ test "Builder.dupePkg()" {
std.testing.expect(dupe_deps[0].path.ptr != pkg_dep.path.ptr);
}
+test "LibExeObjStep.addBuildOption" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
+ defer arena.deinit();
+ var builder = try Builder.create(
+ &arena.allocator,
+ "test",
+ "test",
+ "test",
+ );
+ defer builder.destroy();
+
+ var exe = builder.addExecutable("not_an_executable", "/not/an/executable.zig");
+ exe.addBuildOption(usize, "option1", 1);
+ exe.addBuildOption(?usize, "option2", null);
+ exe.addBuildOption([]const u8, "string", "zigisthebest");
+ exe.addBuildOption(?[]const u8, "optional_string", null);
+
+ std.testing.expectEqualStrings(
+ \\pub const option1: usize = 1;
+ \\pub const option2: ?usize = null;
+ \\pub const string: []const u8 = "zigisthebest";
+ \\pub const optional_string: ?[]const u8 = null;
+ \\
+ , exe.build_options_contents.items);
+}
+
test "LibExeObjStep.addPackage" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
diff --git a/lib/std/c.zig b/lib/std/c.zig
index a1bca68976..7d2d200e7e 100644
--- a/lib/std/c.zig
+++ b/lib/std/c.zig
@@ -124,7 +124,7 @@ pub extern "c" fn readlinkat(dirfd: fd_t, noalias path: [*:0]const u8, noalias b
pub usingnamespace switch (builtin.os.tag) {
.macos, .ios, .watchos, .tvos => struct {
pub const realpath = @"realpath$DARWIN_EXTSN";
- pub const fstatat = @"fstatat$INODE64";
+ pub const fstatat = _fstatat;
},
else => struct {
pub extern "c" fn realpath(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
@@ -150,8 +150,8 @@ pub extern "c" fn socketpair(domain: c_uint, sock_type: c_uint, protocol: c_uint
pub extern "c" fn listen(sockfd: fd_t, backlog: c_uint) c_int;
pub extern "c" fn getsockname(sockfd: fd_t, noalias addr: *sockaddr, noalias addrlen: *socklen_t) c_int;
pub extern "c" fn connect(sockfd: fd_t, sock_addr: *const sockaddr, addrlen: socklen_t) c_int;
-pub extern "c" fn accept(sockfd: fd_t, addr: *sockaddr, addrlen: *socklen_t) c_int;
-pub extern "c" fn accept4(sockfd: fd_t, addr: *sockaddr, addrlen: *socklen_t, flags: c_uint) c_int;
+pub extern "c" fn accept(sockfd: fd_t, addr: ?*sockaddr, addrlen: ?*socklen_t) c_int;
+pub extern "c" fn accept4(sockfd: fd_t, addr: ?*sockaddr, addrlen: ?*socklen_t, flags: c_uint) c_int;
pub extern "c" fn getsockopt(sockfd: fd_t, level: u32, optname: u32, optval: ?*c_void, optlen: *socklen_t) c_int;
pub extern "c" fn setsockopt(sockfd: fd_t, level: u32, optname: u32, optval: ?*const c_void, optlen: socklen_t) c_int;
pub extern "c" fn send(sockfd: fd_t, buf: *const c_void, len: usize, flags: u32) isize;
@@ -194,7 +194,7 @@ pub usingnamespace switch (builtin.os.tag) {
// XXX: getdirentries -> _getdirentries64
pub extern "c" fn clock_getres(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn clock_gettime(clk_id: c_int, tp: *timespec) c_int;
- pub const fstat = @"fstat$INODE64";
+ pub const fstat = _fstat;
pub extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
pub extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 22b4d14732..e0acd6c746 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -29,8 +29,18 @@ pub extern "c" fn fcopyfile(from: fd_t, to: fd_t, state: ?copyfile_state_t, flag
pub extern "c" fn @"realpath$DARWIN_EXTSN"(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) isize;
-pub extern "c" fn @"fstat$INODE64"(fd: fd_t, buf: *Stat) c_int;
-pub extern "c" fn @"fstatat$INODE64"(dirfd: fd_t, path_name: [*:0]const u8, buf: *Stat, flags: u32) c_int;
+
+extern "c" fn fstat(fd: fd_t, buf: *Stat) c_int;
+/// On x86_64 Darwin, fstat has to be manully linked with $INODE64 suffix to force 64bit version.
+/// Note that this is fixed on aarch64 and no longer necessary.
+extern "c" fn @"fstat$INODE64"(fd: fd_t, buf: *Stat) c_int;
+pub const _fstat = if (builtin.arch == .aarch64) fstat else @"fstat$INODE64";
+
+extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *Stat, flags: u32) c_int;
+/// On x86_64 Darwin, fstatat has to be manully linked with $INODE64 suffix to force 64bit version.
+/// Note that this is fixed on aarch64 and no longer necessary.
+extern "c" fn @"fstatat$INODE64"(dirfd: fd_t, path_name: [*:0]const u8, buf: *Stat, flags: u32) c_int;
+pub const _fstatat = if (builtin.arch == .aarch64) fstatat else @"fstatat$INODE64";
pub extern "c" fn mach_absolute_time() u64;
pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
diff --git a/lib/std/compress/deflate.zig b/lib/std/compress/deflate.zig
index 9fe96cacb7..2697fd5b86 100644
--- a/lib/std/compress/deflate.zig
+++ b/lib/std/compress/deflate.zig
@@ -27,6 +27,8 @@ const FIXLCODES = 288;
const PREFIX_LUT_BITS = 9;
const Huffman = struct {
+ const LUTEntry = packed struct { symbol: u16 align(4), len: u16 };
+
// Number of codes for each possible length
count: [MAXBITS + 1]u16,
// Mapping between codes and symbols
@@ -40,19 +42,23 @@ const Huffman = struct {
// canonical Huffman code and we have to decode it using a slower method.
//
// [1] https://github.com/madler/zlib/blob/v1.2.11/doc/algorithm.txt#L58
- prefix_lut: [1 << PREFIX_LUT_BITS]u16,
- prefix_lut_len: [1 << PREFIX_LUT_BITS]u16,
+ prefix_lut: [1 << PREFIX_LUT_BITS]LUTEntry,
// The following info refer to the codes of length PREFIX_LUT_BITS+1 and are
// used to bootstrap the bit-by-bit reading method if the fast-path fails.
last_code: u16,
last_index: u16,
+ min_code_len: u16,
+
fn construct(self: *Huffman, code_length: []const u16) !void {
for (self.count) |*val| {
val.* = 0;
}
+ self.min_code_len = math.maxInt(u16);
for (code_length) |len| {
+ if (len != 0 and len < self.min_code_len)
+ self.min_code_len = len;
self.count[len] += 1;
}
@@ -85,39 +91,38 @@ const Huffman = struct {
}
}
- self.prefix_lut_len = mem.zeroes(@TypeOf(self.prefix_lut_len));
+ self.prefix_lut = mem.zeroes(@TypeOf(self.prefix_lut));
for (code_length) |len, symbol| {
if (len != 0) {
// Fill the symbol table.
// The symbols are assigned sequentially for each length.
self.symbol[offset[len]] = @truncate(u16, symbol);
- // Track the last assigned offset
+ // Track the last assigned offset.
offset[len] += 1;
}
if (len == 0 or len > PREFIX_LUT_BITS)
continue;
- // Given a Huffman code of length N we have to massage it so
- // that it becomes an index in the lookup table.
- // The bit order is reversed as the fast path reads the bit
- // sequence MSB to LSB using an &, the order is flipped wrt the
- // one obtained by reading bit-by-bit.
- // The codes are prefix-free, if the prefix matches we can
- // safely ignore the trail bits. We do so by replicating the
- // symbol info for each combination of the trailing bits.
+ // Given a Huffman code of length N we transform it into an index
+ // into the lookup table by reversing its bits and filling the
+ // remaining bits (PREFIX_LUT_BITS - N) with every possible
+ // combination of bits to act as a wildcard.
const bits_to_fill = @intCast(u5, PREFIX_LUT_BITS - len);
- const rev_code = bitReverse(codes[len], len);
- // Track the last used code, but only for lengths < PREFIX_LUT_BITS
+ const rev_code = bitReverse(u16, codes[len], len);
+
+ // Track the last used code, but only for lengths < PREFIX_LUT_BITS.
codes[len] += 1;
var j: usize = 0;
while (j < @as(usize, 1) << bits_to_fill) : (j += 1) {
const index = rev_code | (j << @intCast(u5, len));
- assert(self.prefix_lut_len[index] == 0);
- self.prefix_lut[index] = @truncate(u16, symbol);
- self.prefix_lut_len[index] = @truncate(u16, len);
+ assert(self.prefix_lut[index].len == 0);
+ self.prefix_lut[index] = .{
+ .symbol = @truncate(u16, symbol),
+ .len = @truncate(u16, len),
+ };
}
}
@@ -126,14 +131,10 @@ const Huffman = struct {
}
};
-// Reverse bit-by-bit a N-bit value
-fn bitReverse(x: usize, N: usize) usize {
- var tmp: usize = 0;
- var i: usize = 0;
- while (i < N) : (i += 1) {
- tmp |= ((x >> @intCast(u5, i)) & 1) << @intCast(u5, N - i - 1);
- }
- return tmp;
+// Reverse bit-by-bit a N-bit code.
+fn bitReverse(comptime T: type, value: T, N: usize) T {
+ const r = @bitReverse(T, value);
+ return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N);
}
pub fn InflateStream(comptime ReaderType: type) type {
@@ -269,8 +270,8 @@ pub fn InflateStream(comptime ReaderType: type) type {
hdist: *Huffman,
hlen: *Huffman,
- // Temporary buffer for the bitstream, only bits 0..`bits_left` are
- // considered valid.
+ // Temporary buffer for the bitstream.
+ // Bits 0..`bits_left` are filled with data, the remaining ones are zeros.
bits: u32,
bits_left: usize,
@@ -280,7 +281,8 @@ pub fn InflateStream(comptime ReaderType: type) type {
self.bits |= @as(u32, byte) << @intCast(u5, self.bits_left);
self.bits_left += 8;
}
- return self.bits & ((@as(u32, 1) << @intCast(u5, bits)) - 1);
+ const mask = (@as(u32, 1) << @intCast(u5, bits)) - 1;
+ return self.bits & mask;
}
fn readBits(self: *Self, bits: usize) !u32 {
const val = self.peekBits(bits);
@@ -293,8 +295,8 @@ pub fn InflateStream(comptime ReaderType: type) type {
}
fn stored(self: *Self) !void {
- // Discard the remaining bits, the lenght field is always
- // byte-aligned (and so is the data)
+ // Discard the remaining bits, the length field is always
+ // byte-aligned (and so is the data).
self.discardBits(self.bits_left);
const length = try self.inner_reader.readIntLittle(u16);
@@ -481,32 +483,52 @@ pub fn InflateStream(comptime ReaderType: type) type {
}
fn decode(self: *Self, h: *Huffman) !u16 {
- // Fast path, read some bits and hope they're prefixes of some code
- const prefix = try self.peekBits(PREFIX_LUT_BITS);
- if (h.prefix_lut_len[prefix] != 0) {
- self.discardBits(h.prefix_lut_len[prefix]);
- return h.prefix_lut[prefix];
+ // Using u32 instead of u16 to reduce the number of casts needed.
+ var prefix: u32 = 0;
+
+ // Fast path, read some bits and hope they're the prefix of some code.
+ // We can't read PREFIX_LUT_BITS as we don't want to read past the
+ // deflate stream end, use an incremental approach instead.
+ var code_len = h.min_code_len;
+ while (true) {
+ _ = try self.peekBits(code_len);
+ // Small optimization win, use as many bits as possible in the
+ // table lookup.
+ prefix = self.bits & ((1 << PREFIX_LUT_BITS) - 1);
+
+ const lut_entry = &h.prefix_lut[prefix];
+ // The code is longer than PREFIX_LUT_BITS!
+ if (lut_entry.len == 0)
+ break;
+ // If the code lenght doesn't increase we found a match.
+ if (lut_entry.len <= code_len) {
+ self.discardBits(code_len);
+ return lut_entry.symbol;
+ }
+
+ code_len = lut_entry.len;
}
// The sequence we've read is not a prefix of any code of length <=
- // PREFIX_LUT_BITS, keep decoding it using a slower method
- self.discardBits(PREFIX_LUT_BITS);
+ // PREFIX_LUT_BITS, keep decoding it using a slower method.
+ prefix = try self.readBits(PREFIX_LUT_BITS);
// Speed up the decoding by starting from the first code length
- // that's not covered by the table
+ // that's not covered by the table.
var len: usize = PREFIX_LUT_BITS + 1;
var first: usize = h.last_code;
var index: usize = h.last_index;
// Reverse the prefix so that the LSB becomes the MSB and make space
- // for the next bit
- var code = bitReverse(prefix, PREFIX_LUT_BITS + 1);
+ // for the next bit.
+ var code = bitReverse(u32, prefix, PREFIX_LUT_BITS + 1);
while (len <= MAXBITS) : (len += 1) {
code |= try self.readBits(1);
const count = h.count[len];
- if (code < first + count)
+ if (code < first + count) {
return h.symbol[index + (code - first)];
+ }
index += count;
first += count;
first <<= 1;
@@ -520,7 +542,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
while (true) {
switch (self.state) {
.DecodeBlockHeader => {
- // The compressed stream is done
+ // The compressed stream is done.
if (self.seen_eos) return;
const last = @intCast(u1, try self.readBits(1));
@@ -528,7 +550,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
self.seen_eos = last != 0;
- // The next state depends on the block type
+ // The next state depends on the block type.
switch (kind) {
0 => try self.stored(),
1 => try self.fixed(),
@@ -553,7 +575,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
var tmp: [1]u8 = undefined;
if ((try self.inner_reader.read(&tmp)) != 1) {
// Unexpected end of stream, keep this error
- // consistent with the use of readBitsNoEof
+ // consistent with the use of readBitsNoEof.
return error.EndOfStream;
}
self.window.appendUnsafe(tmp[0]);
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index d4bac4a8a4..63ef6c2aee 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -144,6 +144,19 @@ test "compressed data" {
);
}
+test "don't read past deflate stream's end" {
+ try testReader(
+ &[_]u8{
+ 0x08, 0xd7, 0x63, 0xf8, 0xcf, 0xc0, 0xc0, 0x00, 0xc1, 0xff,
+ 0xff, 0x43, 0x30, 0x03, 0x03, 0xc3, 0xff, 0xff, 0xff, 0x01,
+ 0x83, 0x95, 0x0b, 0xf5,
+ },
+ // SHA256 of
+ // 00ff 0000 00ff 0000 00ff 00ff ffff 00ff ffff 0000 0000 ffff ff
+ "3bbba1cc65408445c81abb61f3d2b86b1b60ee0d70b4c05b96d1499091a08c93",
+ );
+}
+
test "sanity checks" {
// Truncated header
testing.expectError(
diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig
index 0f6fa48d0a..eb45fb22e1 100644
--- a/lib/std/crypto.zig
+++ b/lib/std/crypto.zig
@@ -6,15 +6,18 @@
/// Authenticated Encryption with Associated Data
pub const aead = struct {
- const chacha20 = @import("crypto/chacha20.zig");
-
- pub const Gimli = @import("crypto/gimli.zig").Aead;
- pub const ChaCha20Poly1305 = chacha20.Chacha20Poly1305;
- pub const XChaCha20Poly1305 = chacha20.XChacha20Poly1305;
pub const Aegis128L = @import("crypto/aegis.zig").Aegis128L;
pub const Aegis256 = @import("crypto/aegis.zig").Aegis256;
+
pub const Aes128Gcm = @import("crypto/aes_gcm.zig").Aes128Gcm;
pub const Aes256Gcm = @import("crypto/aes_gcm.zig").Aes256Gcm;
+
+ pub const Gimli = @import("crypto/gimli.zig").Aead;
+
+ pub const ChaCha20Poly1305 = @import("crypto/chacha20.zig").Chacha20Poly1305;
+ pub const XChaCha20Poly1305 = @import("crypto/chacha20.zig").XChacha20Poly1305;
+
+ pub const XSalsa20Poly1305 = @import("crypto/salsa20.zig").XSalsa20Poly1305;
};
/// Authentication (MAC) functions.
@@ -50,13 +53,13 @@ pub const ecc = struct {
/// Hash functions.
pub const hash = struct {
+ pub const blake2 = @import("crypto/blake2.zig");
+ pub const Blake3 = @import("crypto/blake3.zig").Blake3;
+ pub const Gimli = @import("crypto/gimli.zig").Hash;
pub const Md5 = @import("crypto/md5.zig").Md5;
pub const Sha1 = @import("crypto/sha1.zig").Sha1;
pub const sha2 = @import("crypto/sha2.zig");
pub const sha3 = @import("crypto/sha3.zig");
- pub const blake2 = @import("crypto/blake2.zig");
- pub const Blake3 = @import("crypto/blake3.zig").Blake3;
- pub const Gimli = @import("crypto/gimli.zig").Hash;
};
/// Key derivation functions.
@@ -66,8 +69,8 @@ pub const kdf = struct {
/// MAC functions requiring single-use secret keys.
pub const onetimeauth = struct {
- pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305;
pub const Ghash = @import("crypto/ghash.zig").Ghash;
+ pub const Poly1305 = @import("crypto/poly1305.zig").Poly1305;
};
/// A password hashing function derives a uniform key from low-entropy input material such as passwords.
@@ -87,6 +90,7 @@ pub const onetimeauth = struct {
///
/// Password hashing functions must be used whenever sensitive data has to be directly derived from a password.
pub const pwhash = struct {
+ pub const bcrypt = @import("crypto/bcrypt.zig");
pub const pbkdf2 = @import("crypto/pbkdf2.zig").pbkdf2;
};
@@ -99,8 +103,19 @@ pub const sign = struct {
/// Most applications should be using AEAD constructions instead of stream ciphers directly.
pub const stream = struct {
pub const ChaCha20IETF = @import("crypto/chacha20.zig").ChaCha20IETF;
- pub const XChaCha20IETF = @import("crypto/chacha20.zig").XChaCha20IETF;
pub const ChaCha20With64BitNonce = @import("crypto/chacha20.zig").ChaCha20With64BitNonce;
+ pub const XChaCha20IETF = @import("crypto/chacha20.zig").XChaCha20IETF;
+
+ pub const Salsa20 = @import("crypto/salsa20.zig").Salsa20;
+ pub const XSalsa20 = @import("crypto/salsa20.zig").XSalsa20;
+};
+
+pub const nacl = struct {
+ const salsa20 = @import("crypto/salsa20.zig");
+
+ pub const Box = salsa20.Box;
+ pub const SecretBox = salsa20.SecretBox;
+ pub const SealedBox = salsa20.SealedBox;
};
const std = @import("std.zig");
@@ -122,6 +137,7 @@ test "crypto" {
}
_ = @import("crypto/aes.zig");
+ _ = @import("crypto/bcrypt.zig");
_ = @import("crypto/blake2.zig");
_ = @import("crypto/blake3.zig");
_ = @import("crypto/chacha20.zig");
@@ -134,6 +150,7 @@ test "crypto" {
_ = @import("crypto/sha1.zig");
_ = @import("crypto/sha2.zig");
_ = @import("crypto/sha3.zig");
+ _ = @import("crypto/salsa20.zig");
_ = @import("crypto/siphash.zig");
_ = @import("crypto/25519/curve25519.zig");
_ = @import("crypto/25519/ed25519.zig");
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 7f180eac2f..3ca7af7a41 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -100,6 +100,14 @@ pub const Curve25519 = struct {
_ = ladder(p, cofactor, 4) catch |_| return error.WeakPublicKey;
return try ladder(p, s, 256);
}
+
+ /// Compute the Curve25519 equivalent to an Edwards25519 point.
+ pub fn fromEdwards25519(p: std.crypto.ecc.Edwards25519) !Curve25519 {
+ try p.clearCofactor().rejectIdentity();
+ const one = std.crypto.ecc.Edwards25519.Fe.one;
+ const x = one.add(p.y).mul(one.sub(p.y).invert()); // xMont=(1+yEd)/(1-yEd)
+ return Curve25519{ .x = x };
+ }
};
test "curve25519" {
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index abbba22f11..842b08d706 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -4,6 +4,8 @@
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
+const crypto = std.crypto;
+const debug = std.debug;
const fmt = std.fmt;
const mem = std.mem;
const Sha512 = std.crypto.hash.sha2.Sha512;
@@ -14,8 +16,8 @@ pub const Ed25519 = struct {
pub const Curve = @import("edwards25519.zig").Edwards25519;
/// Length (in bytes) of a seed required to create a key pair.
pub const seed_length = 32;
- /// Length (in bytes) of a compressed key pair.
- pub const keypair_length = 64;
+ /// Length (in bytes) of a compressed secret key.
+ pub const secret_length = 64;
/// Length (in bytes) of a compressed public key.
pub const public_length = 32;
/// Length (in bytes) of a signature.
@@ -23,41 +25,61 @@ pub const Ed25519 = struct {
/// Length (in bytes) of optional random bytes, for non-deterministic signatures.
pub const noise_length = 32;
- /// Derive a key pair from a secret seed.
- ///
- /// As in RFC 8032, an Ed25519 public key is generated by hashing
- /// the secret key using the SHA-512 function, and interpreting the
- /// bit-swapped, clamped lower-half of the output as the secret scalar.
- ///
- /// For this reason, an EdDSA secret key is commonly called a seed,
- /// from which the actual secret is derived.
- pub fn createKeyPair(seed: [seed_length]u8) ![keypair_length]u8 {
- var az: [Sha512.digest_length]u8 = undefined;
- var h = Sha512.init(.{});
- h.update(&seed);
- h.final(&az);
- const p = try Curve.basePoint.clampedMul(az[0..32].*);
- var keypair: [keypair_length]u8 = undefined;
- mem.copy(u8, &keypair, &seed);
- mem.copy(u8, keypair[seed_length..], &p.toBytes());
- return keypair;
- }
+ /// An Ed25519 key pair.
+ pub const KeyPair = struct {
+ /// Public part.
+ public_key: [public_length]u8,
+ /// Secret part. What we expose as a secret key is, under the hood, the concatenation of the seed and the public key.
+ secret_key: [secret_length]u8,
- /// Return the public key for a given key pair.
- pub fn publicKey(key_pair: [keypair_length]u8) [public_length]u8 {
- var public_key: [public_length]u8 = undefined;
- mem.copy(u8, public_key[0..], key_pair[seed_length..]);
- return public_key;
- }
+ /// Derive a key pair from an optional secret seed.
+ ///
+ /// As in RFC 8032, an Ed25519 public key is generated by hashing
+ /// the secret key using the SHA-512 function, and interpreting the
+ /// bit-swapped, clamped lower-half of the output as the secret scalar.
+ ///
+ /// For this reason, an EdDSA secret key is commonly called a seed,
+ /// from which the actual secret is derived.
+ pub fn create(seed: ?[seed_length]u8) !KeyPair {
+ const ss = seed orelse ss: {
+ var random_seed: [seed_length]u8 = undefined;
+ try crypto.randomBytes(&random_seed);
+ break :ss random_seed;
+ };
+ var az: [Sha512.digest_length]u8 = undefined;
+ var h = Sha512.init(.{});
+ h.update(&ss);
+ h.final(&az);
+ const p = try Curve.basePoint.clampedMul(az[0..32].*);
+ var sk: [secret_length]u8 = undefined;
+ mem.copy(u8, &sk, &ss);
+ const pk = p.toBytes();
+ mem.copy(u8, sk[seed_length..], &pk);
+
+ return KeyPair{ .public_key = pk, .secret_key = sk };
+ }
+
+ /// Create a KeyPair from a secret key.
+ pub fn fromSecretKey(secret_key: [secret_length]u8) KeyPair {
+ return KeyPair{
+ .secret_key = secret_key,
+ .public_key = secret_key[seed_length..].*,
+ };
+ }
+ };
/// Sign a message using a key pair, and optional random noise.
/// Having noise creates non-standard, non-deterministic signatures,
/// but has been proven to increase resilience against fault attacks.
- pub fn sign(msg: []const u8, key_pair: [keypair_length]u8, noise: ?[noise_length]u8) ![signature_length]u8 {
- const public_key = key_pair[32..];
+ pub fn sign(msg: []const u8, key_pair: KeyPair, noise: ?[noise_length]u8) ![signature_length]u8 {
+ const seed = key_pair.secret_key[0..seed_length];
+ const public_key = key_pair.secret_key[seed_length..];
+ if (!mem.eql(u8, public_key, &key_pair.public_key)) {
+ return error.KeyMismatch;
+ }
var az: [Sha512.digest_length]u8 = undefined;
var h = Sha512.init(.{});
- h.update(key_pair[0..seed_length]);
+ h.update(seed);
h.final(&az);
h = Sha512.init(.{});
@@ -186,50 +208,44 @@ pub const Ed25519 = struct {
test "ed25519 key pair creation" {
var seed: [32]u8 = undefined;
try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
- const key_pair = try Ed25519.createKeyPair(seed);
+ const key_pair = try Ed25519.KeyPair.create(seed);
var buf: [256]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
-
- const public_key = Ed25519.publicKey(key_pair);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.secret_key}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "ed25519 signature" {
var seed: [32]u8 = undefined;
try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
- const key_pair = try Ed25519.createKeyPair(seed);
+ const key_pair = try Ed25519.KeyPair.create(seed);
const sig = try Ed25519.sign("test", key_pair, null);
var buf: [128]u8 = undefined;
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
- const public_key = Ed25519.publicKey(key_pair);
- try Ed25519.verify(sig, "test", public_key);
- std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", public_key));
+ try Ed25519.verify(sig, "test", key_pair.public_key);
+ std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", key_pair.public_key));
}
test "ed25519 batch verification" {
var i: usize = 0;
while (i < 100) : (i += 1) {
- var seed: [32]u8 = undefined;
- try std.crypto.randomBytes(&seed);
- const key_pair = try Ed25519.createKeyPair(seed);
+ const key_pair = try Ed25519.KeyPair.create(null);
var msg1: [32]u8 = undefined;
var msg2: [32]u8 = undefined;
try std.crypto.randomBytes(&msg1);
try std.crypto.randomBytes(&msg2);
const sig1 = try Ed25519.sign(&msg1, key_pair, null);
const sig2 = try Ed25519.sign(&msg2, key_pair, null);
- const public_key = Ed25519.publicKey(key_pair);
var signature_batch = [_]Ed25519.BatchElement{
Ed25519.BatchElement{
.sig = sig1,
.msg = &msg1,
- .public_key = public_key,
+ .public_key = key_pair.public_key,
},
Ed25519.BatchElement{
.sig = sig2,
.msg = &msg2,
- .public_key = public_key,
+ .public_key = key_pair.public_key,
},
};
try Ed25519.verifyBatch(2, signature_batch);
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index 74ea89a952..3e34576f78 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -12,6 +12,8 @@ pub const Edwards25519 = struct {
pub const Fe = @import("field.zig").Fe;
/// Field arithmetic mod the order of the main subgroup.
pub const scalar = @import("scalar.zig");
+ /// Length in bytes of a compressed representation of a point.
+ pub const encoded_length: usize = 32;
x: Fe,
y: Fe,
@@ -21,7 +23,7 @@ pub const Edwards25519 = struct {
is_base: bool = false,
/// Decode an Edwards25519 point from its compressed (Y+sign) coordinates.
- pub fn fromBytes(s: [32]u8) !Edwards25519 {
+ pub fn fromBytes(s: [encoded_length]u8) !Edwards25519 {
const z = Fe.one;
const y = Fe.fromBytes(s);
var u = y.sq();
@@ -43,7 +45,7 @@ pub const Edwards25519 = struct {
}
/// Encode an Edwards25519 point.
- pub fn toBytes(p: Edwards25519) [32]u8 {
+ pub fn toBytes(p: Edwards25519) [encoded_length]u8 {
const zi = p.z.invert();
var s = p.y.mul(zi).toBytes();
s[31] ^= @as(u8, @boolToInt(p.x.mul(zi).isNegative())) << 7;
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index 4e6494ed38..16d301592a 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -14,6 +14,8 @@ pub const Ristretto255 = struct {
pub const Fe = Curve.Fe;
/// Field arithmetic mod the order of the main subgroup.
pub const scalar = Curve.scalar;
+ /// Length in byte of an encoded element.
+ pub const encoded_length: usize = 32;
p: Curve,
@@ -32,7 +34,7 @@ pub const Ristretto255 = struct {
return .{ .ratio_is_square = @boolToInt(has_m_root) | @boolToInt(has_p_root), .root = x.abs() };
}
- fn rejectNonCanonical(s: [32]u8) !void {
+ fn rejectNonCanonical(s: [encoded_length]u8) !void {
if ((s[0] & 1) != 0) {
return error.NonCanonical;
}
@@ -48,7 +50,7 @@ pub const Ristretto255 = struct {
pub const basePoint = Ristretto255{ .p = Curve.basePoint };
/// Decode a Ristretto255 representative.
- pub fn fromBytes(s: [32]u8) !Ristretto255 {
+ pub fn fromBytes(s: [encoded_length]u8) !Ristretto255 {
try rejectNonCanonical(s);
const s_ = Fe.fromBytes(s);
const ss = s_.sq(); // s^2
@@ -78,7 +80,7 @@ pub const Ristretto255 = struct {
}
/// Encode to a Ristretto255 representative.
- pub fn toBytes(e: Ristretto255) [32]u8 {
+ pub fn toBytes(e: Ristretto255) [encoded_length]u8 {
const p = &e.p;
var u1_ = p.z.add(p.y); // Z+Y
const zmy = p.z.sub(p.y); // Z-Y
@@ -151,7 +153,7 @@ pub const Ristretto255 = struct {
/// Multiply a Ristretto255 element with a scalar.
/// Return error.WeakPublicKey if the resulting element is
/// the identity element.
- pub inline fn mul(p: Ristretto255, s: [32]u8) !Ristretto255 {
+ pub inline fn mul(p: Ristretto255, s: [encoded_length]u8) !Ristretto255 {
return Ristretto255{ .p = try p.p.mul(s) };
}
@@ -170,7 +172,7 @@ test "ristretto255" {
var buf: [256]u8 = undefined;
std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
- var r: [32]u8 = undefined;
+ var r: [Ristretto255.encoded_length]u8 = undefined;
try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
var q = try Ristretto255.fromBytes(r);
q = q.dbl().add(p);
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index dc1bd5a5ef..3b3ff551fe 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -4,59 +4,90 @@
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
+const crypto = std.crypto;
const mem = std.mem;
const fmt = std.fmt;
+const Sha512 = crypto.hash.sha2.Sha512;
+
/// X25519 DH function.
pub const X25519 = struct {
/// The underlying elliptic curve.
pub const Curve = @import("curve25519.zig").Curve25519;
/// Length (in bytes) of a secret key.
pub const secret_length = 32;
+ /// Length (in bytes) of a public key.
+ pub const public_length = 32;
/// Length (in bytes) of the output of the DH function.
- pub const key_length = 32;
+ pub const shared_length = 32;
+ /// Seed (for key pair creation) length in bytes.
+ pub const seed_length = 32;
+
+ /// An X25519 key pair.
+ pub const KeyPair = struct {
+ /// Public part.
+ public_key: [public_length]u8,
+ /// Secret part.
+ secret_key: [secret_length]u8,
+
+ /// Create a new key pair using an optional seed.
+ pub fn create(seed: ?[seed_length]u8) !KeyPair {
+ const sk = seed orelse sk: {
+ var random_seed: [seed_length]u8 = undefined;
+ try crypto.randomBytes(&random_seed);
+ break :sk random_seed;
+ };
+ var kp: KeyPair = undefined;
+ mem.copy(u8, &kp.secret_key, sk[0..]);
+ kp.public_key = try X25519.recoverPublicKey(sk);
+ return kp;
+ }
- /// Compute the public key for a given private key.
- pub fn createPublicKey(public_key: []u8, private_key: []const u8) bool {
- std.debug.assert(private_key.len >= key_length);
- std.debug.assert(public_key.len >= key_length);
- var s: [32]u8 = undefined;
- mem.copy(u8, &s, private_key[0..32]);
- if (Curve.basePoint.clampedMul(s)) |q| {
- mem.copy(u8, public_key, q.toBytes()[0..]);
- return true;
- } else |_| {
- return false;
+ /// Create a key pair from an Ed25519 key pair
+ pub fn fromEd25519(ed25519_key_pair: crypto.sign.Ed25519.KeyPair) !KeyPair {
+ const seed = ed25519_key_pair.secret_key[0..32];
+ var az: [Sha512.digest_length]u8 = undefined;
+ Sha512.hash(seed, &az, .{});
+ var sk = az[0..32].*;
+ Curve.scalar.clamp(&sk);
+ const pk = try publicKeyFromEd25519(ed25519_key_pair.public_key);
+ return KeyPair{
+ .public_key = pk,
+ .secret_key = sk,
+ };
}
+ };
+
+ /// Compute the public key for a given private key.
+ pub fn recoverPublicKey(secret_key: [secret_length]u8) ![public_length]u8 {
+ const q = try Curve.basePoint.clampedMul(secret_key);
+ return q.toBytes();
+ }
+
+ /// Compute the X25519 equivalent to an Ed25519 public eky.
+ pub fn publicKeyFromEd25519(ed25519_public_key: [crypto.sign.Ed25519.public_length]u8) ![public_length]u8 {
+ const pk_ed = try crypto.ecc.Edwards25519.fromBytes(ed25519_public_key);
+ const pk = try Curve.fromEdwards25519(pk_ed);
+ return pk.toBytes();
}
/// Compute the scalar product of a public key and a secret scalar.
/// Note that the output should not be used as a shared secret without
/// hashing it first.
- pub fn create(out: []u8, private_key: []const u8, public_key: []const u8) bool {
- std.debug.assert(out.len >= secret_length);
- std.debug.assert(private_key.len >= key_length);
- std.debug.assert(public_key.len >= key_length);
- var s: [32]u8 = undefined;
- var b: [32]u8 = undefined;
- mem.copy(u8, &s, private_key[0..32]);
- mem.copy(u8, &b, public_key[0..32]);
- if (Curve.fromBytes(b).clampedMul(s)) |q| {
- mem.copy(u8, out, q.toBytes()[0..]);
- return true;
- } else |_| {
- return false;
- }
+ pub fn scalarmult(secret_key: [secret_length]u8, public_key: [public_length]u8) ![shared_length]u8 {
+ const q = try Curve.fromBytes(public_key).clampedMul(secret_key);
+ return q.toBytes();
}
};
+const htest = @import("../test.zig");
+
test "x25519 public key calculation from secret key" {
var sk: [32]u8 = undefined;
var pk_expected: [32]u8 = undefined;
- var pk_calculated: [32]u8 = undefined;
try fmt.hexToBytes(sk[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
try fmt.hexToBytes(pk_expected[0..], "f1814f0e8ff1043d8a44d25babff3cedcae6c22c3edaa48f857ae70de2baae50");
- std.testing.expect(X25519.createPublicKey(pk_calculated[0..], &sk));
+ const pk_calculated = try X25519.recoverPublicKey(sk);
std.testing.expectEqual(pk_calculated, pk_expected);
}
@@ -66,9 +97,7 @@ test "x25519 rfc7748 vector1" {
const expected_output = [32]u8{ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 };
- var output: [32]u8 = undefined;
-
- std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
+ const output = try X25519.scalarmult(secret_key, public_key);
std.testing.expectEqual(output, expected_output);
}
@@ -78,9 +107,7 @@ test "x25519 rfc7748 vector2" {
const expected_output = [32]u8{ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 };
- var output: [32]u8 = undefined;
-
- std.testing.expect(X25519.create(output[0..], secret_key[0..], public_key[0..]));
+ const output = try X25519.scalarmult(secret_key, public_key);
std.testing.expectEqual(output, expected_output);
}
@@ -93,9 +120,7 @@ test "x25519 rfc7748 one iteration" {
var i: usize = 0;
while (i < 1) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
+ const output = try X25519.scalarmult(k, u);
mem.copy(u8, u[0..], k[0..]);
mem.copy(u8, k[0..], output[0..]);
}
@@ -117,9 +142,7 @@ test "x25519 rfc7748 1,000 iterations" {
var i: usize = 0;
while (i < 1000) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
+ const output = try X25519.scalarmult(&k, &u);
mem.copy(u8, u[0..], k[0..]);
mem.copy(u8, k[0..], output[0..]);
}
@@ -140,12 +163,17 @@ test "x25519 rfc7748 1,000,000 iterations" {
var i: usize = 0;
while (i < 1000000) : (i += 1) {
- var output: [32]u8 = undefined;
- std.testing.expect(X25519.create(output[0..], &k, &u));
-
+ const output = try X25519.scalarmult(&k, &u);
mem.copy(u8, u[0..], k[0..]);
mem.copy(u8, k[0..], output[0..]);
}
std.testing.expectEqual(k[0..], expected_output);
}
+
+test "edwards25519 -> curve25519 map" {
+ const ed_kp = try crypto.sign.Ed25519.KeyPair.create([_]u8{0x42} ** 32);
+ const mont_kp = try X25519.KeyPair.fromEd25519(ed_kp);
+ htest.assertEqual("90e7595fc89e52fdfddce9c6a43d74dbf6047025ee0462d2d172e8b6a2841d6e", &mont_kp.secret_key);
+ htest.assertEqual("cc4f2cdb695dd766f34118eb67b98652fed1d8bc49c330b119bbfa8a64989378", &mont_kp.public_key);
+}
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index f91c13a06c..f3060ef615 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
diff --git a/lib/std/crypto/aes/aesni.zig b/lib/std/crypto/aes/aesni.zig
index 5f605a4e60..3d694875bf 100644
--- a/lib/std/crypto/aes/aesni.zig
+++ b/lib/std/crypto/aes/aesni.zig
@@ -100,8 +100,18 @@ pub const Block = struct {
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
+ const cpu = std.Target.x86.cpu;
+
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
- pub const optimal_parallel_blocks = 8;
+ pub const optimal_parallel_blocks = switch (std.Target.current.cpu.model) {
+ &cpu.westmere => 6,
+ &cpu.sandybridge, &cpu.ivybridge => 8,
+ &cpu.haswell, &cpu.broadwell => 7,
+ &cpu.cannonlake, &cpu.skylake, &cpu.skylake_avx512 => 4,
+ &cpu.icelake_client, &cpu.icelake_server => 6,
+ &cpu.znver1, &cpu.znver2 => 8,
+ else => 8,
+ };
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
diff --git a/lib/std/crypto/aes_gcm.zig b/lib/std/crypto/aes_gcm.zig
index ee88804388..e57decb2b2 100644
--- a/lib/std/crypto/aes_gcm.zig
+++ b/lib/std/crypto/aes_gcm.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("std");
const assert = std.debug.assert;
const builtin = std.builtin;
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
new file mode 100644
index 0000000000..179d904494
--- /dev/null
+++ b/lib/std/crypto/bcrypt.zig
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
+const std = @import("std");
+const crypto = std.crypto;
+const fmt = std.fmt;
+const math = std.math;
+const mem = std.mem;
+const debug = std.debug;
+const testing = std.testing;
+
+const salt_length: usize = 16;
+const salt_str_length: usize = 22;
+const ct_str_length: usize = 31;
+const ct_length: usize = 24;
+
+/// Length (in bytes) of a password hash
+pub const hash_length: usize = 60;
+
+pub const BcryptError = error{
+ /// The hashed password cannot be decoded.
+ InvalidEncoding,
+ /// The hash is not valid for the given password.
+ InvalidPassword,
+};
+
+const State = struct {
+ sboxes: [4][256]u32 = [4][256]u32{
+ .{ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a },
+ .{ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7 },
+ .{ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0 },
+ .{ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6 },
+ },
+ subkeys: [18]u32 = [18]u32{ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b },
+
+ fn toWord(data: []const u8, current: *usize) u32 {
+ var t: u32 = 0;
+ var j = current.*;
+ var i: usize = 0;
+ while (i < 4) : (i += 1) {
+ if (j >= data.len) j = 0;
+ t = (t << 8) | data[j];
+ j += 1;
+ }
+ current.* = j;
+ return t;
+ }
+
+ fn expand0(state: *State, key: []const u8) void {
+ var i: usize = 0;
+ var j: usize = 0;
+ var t: u32 = undefined;
+ while (i < state.subkeys.len) : (i += 1) {
+ state.subkeys[i] ^= toWord(key, &j);
+ }
+
+ var halves = Halves{ .l = 0, .r = 0 };
+ i = 0;
+ while (i < 18) : (i += 2) {
+ state.encipher(&halves);
+ state.subkeys[i] = halves.l;
+ state.subkeys[i + 1] = halves.r;
+ }
+
+ i = 0;
+ while (i < 4) : (i += 1) {
+ var k: usize = 0;
+ while (k < 256) : (k += 2) {
+ state.encipher(&halves);
+ state.sboxes[i][k] = halves.l;
+ state.sboxes[i][k + 1] = halves.r;
+ }
+ }
+ }
+
+ fn expand(state: *State, data: []const u8, key: []const u8) void {
+ var i: usize = 0;
+ var j: usize = 0;
+ var t: u32 = undefined;
+ while (i < state.subkeys.len) : (i += 1) {
+ state.subkeys[i] ^= toWord(key, &j);
+ }
+
+ var halves = Halves{ .l = 0, .r = 0 };
+ i = 0;
+ j = 0;
+ while (i < 18) : (i += 2) {
+ halves.l ^= toWord(data, &j);
+ halves.r ^= toWord(data, &j);
+ state.encipher(&halves);
+ state.subkeys[i] = halves.l;
+ state.subkeys[i + 1] = halves.r;
+ }
+
+ i = 0;
+ while (i < 4) : (i += 1) {
+ var k: usize = 0;
+ while (k < 256) : (k += 2) {
+ halves.l ^= toWord(data, &j);
+ halves.r ^= toWord(data, &j);
+ state.encipher(&halves);
+ state.sboxes[i][k] = halves.l;
+ state.sboxes[i][k + 1] = halves.r;
+ }
+ }
+ }
+
+ const Halves = struct {
+ l: u32, r: u32
+ };
+
+ fn feistelF(state: State, x: u32) u32 {
+ var r = state.sboxes[0][@truncate(u8, x >> 24)];
+ r +%= state.sboxes[1][@truncate(u8, x >> 16)];
+ r ^= state.sboxes[2][@truncate(u8, x >> 8)];
+ r +%= state.sboxes[3][@truncate(u8, x)];
+ return r;
+ }
+
+ fn halfRound(state: State, i: u32, j: u32, n: usize) u32 {
+ return i ^ state.feistelF(j) ^ state.subkeys[n];
+ }
+
+ fn encipher(state: State, halves: *Halves) void {
+ halves.l ^= state.subkeys[0];
+ var i: usize = 1;
+ while (i < 16) : (i += 2) {
+ halves.r = state.halfRound(halves.r, halves.l, i);
+ halves.l = state.halfRound(halves.l, halves.r, i + 1);
+ }
+ const halves_last = Halves{ .l = halves.r ^ state.subkeys[i], .r = halves.l };
+ halves.* = halves_last;
+ }
+
+ fn encrypt(state: State, data: []u32) void {
+ debug.assert(data.len % 2 == 0);
+ var i: usize = 0;
+ while (i < data.len) : (i += 2) {
+ var halves = Halves{ .l = data[i], .r = data[i + 1] };
+ state.encipher(&halves);
+ data[i] = halves.l;
+ data[i + 1] = halves.r;
+ }
+ }
+};
+
+// bcrypt has its own variant of base64, with its own alphabet and no padding
+const Codec = struct {
+ const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
+
+ fn encode(b64: []u8, bin: []const u8) void {
+ var i: usize = 0;
+ var j: usize = 0;
+ while (i < bin.len) {
+ var c1 = bin[i];
+ i += 1;
+ b64[j] = alphabet[c1 >> 2];
+ j += 1;
+ c1 = (c1 & 3) << 4;
+ if (i >= bin.len) {
+ b64[j] = alphabet[c1];
+ j += 1;
+ break;
+ }
+ var c2 = bin[i];
+ i += 1;
+ c1 |= (c2 >> 4) & 0x0f;
+ b64[j] = alphabet[c1];
+ j += 1;
+ c1 = (c2 & 0x0f) << 2;
+ if (i >= bin.len) {
+ b64[j] = alphabet[c1];
+ j += 1;
+ break;
+ }
+ c2 = bin[i];
+ i += 1;
+ c1 |= (c2 >> 6) & 3;
+ b64[j] = alphabet[c1];
+ b64[j + 1] = alphabet[c2 & 0x3f];
+ j += 2;
+ }
+ debug.assert(j == b64.len);
+ }
+
+ fn decode(bin: []u8, b64: []const u8) BcryptError!void {
+ var i: usize = 0;
+ var j: usize = 0;
+ while (j < bin.len) {
+ const c1 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i]) orelse return error.InvalidEncoding);
+ const c2 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 1]) orelse return error.InvalidEncoding);
+ bin[j] = (c1 << 2) | ((c2 & 0x30) >> 4);
+ j += 1;
+ if (j >= bin.len) {
+ break;
+ }
+ const c3 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 2]) orelse return error.InvalidEncoding);
+ bin[j] = ((c2 & 0x0f) << 4) | ((c3 & 0x3c) >> 2);
+ j += 1;
+ if (j >= bin.len) {
+ break;
+ }
+ const c4 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 3]) orelse return error.InvalidEncoding);
+ bin[j] = ((c3 & 0x03) << 6) | c4;
+ j += 1;
+ i += 4;
+ }
+ }
+};
+
+fn strHashInternal(password: []const u8, rounds_log: u6, salt: [salt_length]u8) BcryptError![hash_length]u8 {
+ var state = State{};
+ var password_buf: [73]u8 = undefined;
+ const trimmed_len = math.min(password.len, password_buf.len - 1);
+ mem.copy(u8, password_buf[0..], password[0..trimmed_len]);
+ password_buf[trimmed_len] = 0;
+ var passwordZ = password_buf[0 .. trimmed_len + 1];
+ state.expand(salt[0..], passwordZ);
+
+ const rounds: u64 = @as(u64, 1) << rounds_log;
+ var k: u64 = 0;
+ while (k < rounds) : (k += 1) {
+ state.expand0(passwordZ);
+ state.expand0(salt[0..]);
+ }
+ mem.secureZero(u8, &password_buf);
+
+ var cdata = [6]u32{ 0x4f727068, 0x65616e42, 0x65686f6c, 0x64657253, 0x63727944, 0x6f756274 }; // "OrpheanBeholderScryDoubt"
+ k = 0;
+ while (k < 64) : (k += 1) {
+ state.encrypt(&cdata);
+ }
+
+ var ct: [ct_length]u8 = undefined;
+ for (cdata) |c, i| {
+ mem.writeIntBig(u32, ct[i * 4 ..][0..4], c);
+ }
+
+ var salt_str: [salt_str_length]u8 = undefined;
+ Codec.encode(salt_str[0..], salt[0..]);
+
+ var ct_str: [ct_str_length]u8 = undefined;
+ Codec.encode(ct_str[0..], ct[0 .. ct.len - 1]);
+
+ var s_buf: [hash_length]u8 = undefined;
+ const s = fmt.bufPrint(s_buf[0..], "$2b${}{}${}{}", .{ rounds_log / 10, rounds_log % 10, salt_str, ct_str }) catch unreachable;
+ debug.assert(s.len == s_buf.len);
+ return s_buf;
+}
+
+/// Compute a hash of a password using 2^rounds_log rounds of the bcrypt key stretching function.
+/// bcrypt is a computationally expensive and cache-hard function, explicitly designed to slow down exhaustive searches.
+///
+/// The function returns a string that includes all the parameters required for verification.
+///
+/// IMPORTANT: by design, bcrypt silently truncates passwords to 72 bytes.
+/// If this is an issue for your application, hash the password first using a function such as SHA-512,
+/// and then use the resulting hash as the password parameter for bcrypt.
+pub fn strHash(password: []const u8, rounds_log: u6) ![hash_length]u8 {
+ var salt: [salt_length]u8 = undefined;
+ try crypto.randomBytes(&salt);
+ return strHashInternal(password, rounds_log, salt);
+}
+
+/// Verify that a previously computed hash is valid for a given password.
+pub fn strVerify(h: [hash_length]u8, password: []const u8) BcryptError!void {
+ if (!mem.eql(u8, "$2", h[0..2])) return error.InvalidEncoding;
+ if (h[3] != '$' or h[6] != '$') return error.InvalidEncoding;
+ const rounds_log_str = h[4..][0..2];
+ const salt_str = h[7..][0..salt_str_length];
+ var salt: [salt_length]u8 = undefined;
+ try Codec.decode(salt[0..], salt_str[0..]);
+ const rounds_log = fmt.parseInt(u6, rounds_log_str[0..], 10) catch return error.InvalidEncoding;
+ const wanted_s = try strHashInternal(password, rounds_log, salt);
+ if (!mem.eql(u8, wanted_s[0..], h[0..])) {
+ return error.InvalidPassword;
+ }
+}
+
+test "bcrypt codec" {
+ var salt: [salt_length]u8 = undefined;
+ try crypto.randomBytes(&salt);
+ var salt_str: [salt_str_length]u8 = undefined;
+ Codec.encode(salt_str[0..], salt[0..]);
+ var salt2: [salt_length]u8 = undefined;
+ try Codec.decode(salt2[0..], salt_str[0..]);
+ testing.expectEqualSlices(u8, salt[0..], salt2[0..]);
+}
+
+test "bcrypt" {
+ const s = try strHash("password", 5);
+ try strVerify(s, "password");
+ testing.expectError(error.InvalidPassword, strVerify(s, "invalid password"));
+
+ const long_s = try strHash("password" ** 100, 5);
+ try strVerify(long_s, "password" ** 100);
+ try strVerify(long_s, "password" ** 101);
+
+ try strVerify("$2b$08$WUQKyBCaKpziCwUXHiMVvu40dYVjkTxtWJlftl0PpjY2BxWSvFIEe".*, "The devil himself");
+}
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 27da9f1f42..5b6a815f0c 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -96,20 +96,22 @@ pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 {
const exchanges = [_]Crypto{Crypto{ .ty = crypto.dh.X25519, .name = "x25519" }};
pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_count: comptime_int) !u64 {
- std.debug.assert(DhKeyExchange.key_length >= DhKeyExchange.secret_length);
+ std.debug.assert(DhKeyExchange.shared_length >= DhKeyExchange.secret_length);
- var in: [DhKeyExchange.key_length]u8 = undefined;
- prng.random.bytes(in[0..]);
+ var secret: [DhKeyExchange.shared_length]u8 = undefined;
+ prng.random.bytes(secret[0..]);
- var out: [DhKeyExchange.key_length]u8 = undefined;
- prng.random.bytes(out[0..]);
+ var public: [DhKeyExchange.shared_length]u8 = undefined;
+ prng.random.bytes(public[0..]);
var timer = try Timer.start();
const start = timer.lap();
{
var i: usize = 0;
while (i < exchange_count) : (i += 1) {
- _ = DhKeyExchange.create(out[0..], out[0..], in[0..]);
+ const out = try DhKeyExchange.scalarmult(secret, public);
+ mem.copy(u8, secret[0..16], out[0..16]);
+ mem.copy(u8, public[0..16], out[16..32]);
mem.doNotOptimizeAway(&out);
}
}
@@ -124,10 +126,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
const signatures = [_]Crypto{Crypto{ .ty = crypto.sign.Ed25519, .name = "ed25519" }};
pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
- var seed: [Signature.seed_length]u8 = undefined;
- prng.random.bytes(seed[0..]);
const msg = [_]u8{0} ** 64;
- const key_pair = try Signature.createKeyPair(seed);
+ const key_pair = try Signature.KeyPair.create(null);
var timer = try Timer.start();
const start = timer.lap();
@@ -149,11 +149,8 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count
const signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed25519, .name = "ed25519" }};
pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
- var seed: [Signature.seed_length]u8 = undefined;
- prng.random.bytes(seed[0..]);
const msg = [_]u8{0} ** 64;
- const key_pair = try Signature.createKeyPair(seed);
- const public_key = Signature.publicKey(key_pair);
+ const key_pair = try Signature.KeyPair.create(null);
const sig = try Signature.sign(&msg, key_pair, null);
var timer = try Timer.start();
@@ -161,7 +158,7 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign
{
var i: usize = 0;
while (i < signatures_count) : (i += 1) {
- try Signature.verify(sig, &msg, public_key);
+ try Signature.verify(sig, &msg, key_pair.public_key);
mem.doNotOptimizeAway(&sig);
}
}
@@ -176,16 +173,13 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign
const batch_signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed25519, .name = "ed25519" }};
pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
- var seed: [Signature.seed_length]u8 = undefined;
- prng.random.bytes(seed[0..]);
const msg = [_]u8{0} ** 64;
- const key_pair = try Signature.createKeyPair(seed);
- const public_key = Signature.publicKey(key_pair);
+ const key_pair = try Signature.KeyPair.create(null);
const sig = try Signature.sign(&msg, key_pair, null);
var batch: [64]Signature.BatchElement = undefined;
for (batch) |*element| {
- element.* = Signature.BatchElement{ .sig = sig, .msg = &msg, .public_key = public_key };
+ element.* = Signature.BatchElement{ .sig = sig, .msg = &msg, .public_key = key_pair.public_key };
}
var timer = try Timer.start();
@@ -208,6 +202,7 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
const aeads = [_]Crypto{
Crypto{ .ty = crypto.aead.ChaCha20Poly1305, .name = "chacha20Poly1305" },
Crypto{ .ty = crypto.aead.XChaCha20Poly1305, .name = "xchacha20Poly1305" },
+ Crypto{ .ty = crypto.aead.XSalsa20Poly1305, .name = "xsalsa20Poly1305" },
Crypto{ .ty = crypto.aead.Gimli, .name = "gimli-aead" },
Crypto{ .ty = crypto.aead.Aegis128L, .name = "aegis-128l" },
Crypto{ .ty = crypto.aead.Aegis256, .name = "aegis-256" },
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index 4a32eb69ba..f8b42b7a32 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -44,7 +44,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
pub const key_length_min = 0;
pub const key_length_max = 32;
pub const key_length = 32; // recommended key length
- pub const Options = struct { key: ?[]const u8 = null, salt: ?[8]u8 = null, context: ?[8]u8 = null };
+ pub const Options = struct { key: ?[]const u8 = null, salt: ?[8]u8 = null, context: ?[8]u8 = null, expected_out_bits: usize = out_bits };
const iv = [8]u32{
0x6A09E667,
@@ -77,14 +77,14 @@ pub fn Blake2s(comptime out_bits: usize) type {
buf_len: u8,
pub fn init(options: Options) Self {
- debug.assert(8 <= out_bits and out_bits <= 256);
+ comptime debug.assert(8 <= out_bits and out_bits <= 256);
var d: Self = undefined;
mem.copy(u32, d.h[0..], iv[0..]);
const key_len = if (options.key) |key| key.len else 0;
// default parameters
- d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, out_bits >> 3);
+ d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, options.expected_out_bits >> 3);
d.t = 0;
d.buf_len = 0;
@@ -125,7 +125,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
// Full middle blocks.
while (off + 64 < b.len) : (off += 64) {
d.t += 64;
- d.round(b[off .. off + 64], false);
+ d.round(b[off..][0..64], false);
}
// Copy any remainder for next pass.
@@ -145,9 +145,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
}
}
- fn round(d: *Self, b: []const u8, last: bool) void {
- debug.assert(b.len == 64);
-
+ fn round(d: *Self, b: *const [64]u8, last: bool) void {
var m: [16]u32 = undefined;
var v: [16]u32 = undefined;
@@ -387,7 +385,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
pub const key_length_min = 0;
pub const key_length_max = 64;
pub const key_length = 32; // recommended key length
- pub const Options = struct { key: ?[]const u8 = null, salt: ?[16]u8 = null, context: ?[16]u8 = null };
+ pub const Options = struct { key: ?[]const u8 = null, salt: ?[16]u8 = null, context: ?[16]u8 = null, expected_out_bits: usize = out_bits };
const iv = [8]u64{
0x6a09e667f3bcc908,
@@ -422,14 +420,14 @@ pub fn Blake2b(comptime out_bits: usize) type {
buf_len: u8,
pub fn init(options: Options) Self {
- debug.assert(8 <= out_bits and out_bits <= 512);
+ comptime debug.assert(8 <= out_bits and out_bits <= 512);
var d: Self = undefined;
mem.copy(u64, d.h[0..], iv[0..]);
const key_len = if (options.key) |key| key.len else 0;
// default parameters
- d.h[0] ^= 0x01010000 ^ (key_len << 8) ^ (out_bits >> 3);
+ d.h[0] ^= 0x01010000 ^ (key_len << 8) ^ (options.expected_out_bits >> 3);
d.t = 0;
d.buf_len = 0;
@@ -470,7 +468,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
// Full middle blocks.
while (off + 128 < b.len) : (off += 128) {
d.t += 128;
- d.round(b[off .. off + 128], false);
+ d.round(b[off..][0..128], false);
}
// Copy any remainder for next pass.
@@ -490,9 +488,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
}
}
- fn round(d: *Self, b: []const u8, last: bool) void {
- debug.assert(b.len == 128);
-
+ fn round(d: *Self, b: *const [128]u8, last: bool) void {
var m: [16]u64 = undefined;
var v: [16]u64 = undefined;
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 601eefbb7c..72fd1028ab 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -11,6 +11,7 @@ const fmt = std.fmt;
const math = std.math;
const mem = std.mem;
const testing = std.testing;
+const Vector = std.meta.Vector;
const ChunkIterator = struct {
slice: []u8,
@@ -61,87 +62,173 @@ const KEYED_HASH: u8 = 1 << 4;
const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
-// The mixing function, G, which mixes either a column or a diagonal.
-fn g(state: *[16]u32, a: usize, b: usize, c: usize, d: usize, mx: u32, my: u32) void {
- _ = @addWithOverflow(u32, state[a], state[b], &state[a]);
- _ = @addWithOverflow(u32, state[a], mx, &state[a]);
- state[d] = math.rotr(u32, state[d] ^ state[a], 16);
- _ = @addWithOverflow(u32, state[c], state[d], &state[c]);
- state[b] = math.rotr(u32, state[b] ^ state[c], 12);
- _ = @addWithOverflow(u32, state[a], state[b], &state[a]);
- _ = @addWithOverflow(u32, state[a], my, &state[a]);
- state[d] = math.rotr(u32, state[d] ^ state[a], 8);
- _ = @addWithOverflow(u32, state[c], state[d], &state[c]);
- state[b] = math.rotr(u32, state[b] ^ state[c], 7);
-}
+const CompressVectorized = struct {
+ const Lane = Vector(4, u32);
+ const Rows = [4]Lane;
-fn round(state: *[16]u32, msg: [16]u32, schedule: [16]u8) void {
- // Mix the columns.
- g(state, 0, 4, 8, 12, msg[schedule[0]], msg[schedule[1]]);
- g(state, 1, 5, 9, 13, msg[schedule[2]], msg[schedule[3]]);
- g(state, 2, 6, 10, 14, msg[schedule[4]], msg[schedule[5]]);
- g(state, 3, 7, 11, 15, msg[schedule[6]], msg[schedule[7]]);
-
- // Mix the diagonals.
- g(state, 0, 5, 10, 15, msg[schedule[8]], msg[schedule[9]]);
- g(state, 1, 6, 11, 12, msg[schedule[10]], msg[schedule[11]]);
- g(state, 2, 7, 8, 13, msg[schedule[12]], msg[schedule[13]]);
- g(state, 3, 4, 9, 14, msg[schedule[14]], msg[schedule[15]]);
-}
+ inline fn rot(x: Lane, comptime n: u5) Lane {
+ return (x >> @splat(4, @as(u5, n))) | (x << @splat(4, @as(u5, 1 +% ~n)));
+ }
-fn compress(
- chaining_value: [8]u32,
- block_words: [16]u32,
- block_len: u32,
- counter: u64,
- flags: u8,
-) [16]u32 {
- var state = [16]u32{
- chaining_value[0],
- chaining_value[1],
- chaining_value[2],
- chaining_value[3],
- chaining_value[4],
- chaining_value[5],
- chaining_value[6],
- chaining_value[7],
- IV[0],
- IV[1],
- IV[2],
- IV[3],
- @truncate(u32, counter),
- @truncate(u32, counter >> 32),
- block_len,
- flags,
- };
- for (MSG_SCHEDULE) |schedule| {
- round(&state, block_words, schedule);
+ inline fn g(comptime even: bool, rows: *Rows, m: Lane) void {
+ rows[0] +%= rows[1] +% m;
+ rows[3] ^= rows[0];
+ rows[3] = rot(rows[3], if (even) 8 else 16);
+ rows[2] +%= rows[3];
+ rows[1] ^= rows[2];
+ rows[1] = rot(rows[1], if (even) 7 else 12);
}
- for (chaining_value) |_, i| {
- state[i] ^= state[i + 8];
- state[i + 8] ^= chaining_value[i];
+
+ inline fn diagonalize(rows: *Rows) void {
+ rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 3, 0, 1, 2 });
+ rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
+ rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 1, 2, 3, 0 });
}
- return state;
-}
+
+ inline fn undiagonalize(rows: *Rows) void {
+ rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 1, 2, 3, 0 });
+ rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
+ rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 3, 0, 1, 2 });
+ }
+
+ fn compress(
+ chaining_value: [8]u32,
+ block_words: [16]u32,
+ block_len: u32,
+ counter: u64,
+ flags: u8,
+ ) [16]u32 {
+ const md = Lane{ @truncate(u32, counter), @truncate(u32, counter >> 32), block_len, @as(u32, flags) };
+ var rows = Rows{ chaining_value[0..4].*, chaining_value[4..8].*, IV[0..4].*, md };
+
+ var m = Rows{ block_words[0..4].*, block_words[4..8].*, block_words[8..12].*, block_words[12..16].* };
+ var t0 = @shuffle(u32, m[0], m[1], [_]i32{ 0, 2, (-1 - 0), (-1 - 2) });
+ g(false, &rows, t0);
+ var t1 = @shuffle(u32, m[0], m[1], [_]i32{ 1, 3, (-1 - 1), (-1 - 3) });
+ g(true, &rows, t1);
+ diagonalize(&rows);
+ var t2 = @shuffle(u32, m[2], m[3], [_]i32{ 0, 2, (-1 - 0), (-1 - 2) });
+ t2 = @shuffle(u32, t2, undefined, [_]i32{ 3, 0, 1, 2 });
+ g(false, &rows, t2);
+ var t3 = @shuffle(u32, m[2], m[3], [_]i32{ 1, 3, (-1 - 1), (-1 - 3) });
+ t3 = @shuffle(u32, t3, undefined, [_]i32{ 3, 0, 1, 2 });
+ g(true, &rows, t3);
+ undiagonalize(&rows);
+ m = Rows{ t0, t1, t2, t3 };
+
+ var i: usize = 0;
+ while (i < 6) : (i += 1) {
+ t0 = @shuffle(u32, m[0], m[1], [_]i32{ 2, 1, (-1 - 1), (-1 - 3) });
+ t0 = @shuffle(u32, t0, undefined, [_]i32{ 1, 2, 3, 0 });
+ g(false, &rows, t0);
+ t1 = @shuffle(u32, m[2], m[3], [_]i32{ 2, 2, (-1 - 3), (-1 - 3) });
+ var tt = @shuffle(u32, m[0], undefined, [_]i32{ 3, 3, 0, 0 });
+ t1 = @shuffle(u32, tt, t1, [_]i32{ 0, (-1 - 1), 2, (-1 - 3) });
+ g(true, &rows, t1);
+ diagonalize(&rows);
+ t2 = @shuffle(u32, m[3], m[1], [_]i32{ 0, 1, (-1 - 0), (-1 - 1) });
+ tt = @shuffle(u32, t2, m[2], [_]i32{ 0, 1, 2, (-1 - 3) });
+ t2 = @shuffle(u32, tt, undefined, [_]i32{ 0, 2, 3, 1 });
+ g(false, &rows, t2);
+ t3 = @shuffle(u32, m[1], m[3], [_]i32{ 2, (-1 - 2), 3, (-1 - 3) });
+ tt = @shuffle(u32, m[2], t3, [_]i32{ 0, (-1 - 0), 1, (-1 - 1) });
+ t3 = @shuffle(u32, tt, undefined, [_]i32{ 2, 3, 1, 0 });
+ g(true, &rows, t3);
+ undiagonalize(&rows);
+ m = Rows{ t0, t1, t2, t3 };
+ }
+
+ rows[0] ^= rows[2];
+ rows[1] ^= rows[3];
+ rows[2] ^= Vector(4, u32){ chaining_value[0], chaining_value[1], chaining_value[2], chaining_value[3] };
+ rows[3] ^= Vector(4, u32){ chaining_value[4], chaining_value[5], chaining_value[6], chaining_value[7] };
+
+ return @bitCast([16]u32, rows);
+ }
+};
+
+const CompressGeneric = struct {
+ fn g(state: *[16]u32, comptime a: usize, comptime b: usize, comptime c: usize, comptime d: usize, mx: u32, my: u32) void {
+ state[a] +%= state[b] +% mx;
+ state[d] = math.rotr(u32, state[d] ^ state[a], 16);
+ state[c] +%= state[d];
+ state[b] = math.rotr(u32, state[b] ^ state[c], 12);
+ state[a] +%= state[b] +% my;
+ state[d] = math.rotr(u32, state[d] ^ state[a], 8);
+ state[c] +%= state[d];
+ state[b] = math.rotr(u32, state[b] ^ state[c], 7);
+ }
+
+ fn round(state: *[16]u32, msg: [16]u32, schedule: [16]u8) void {
+ // Mix the columns.
+ g(state, 0, 4, 8, 12, msg[schedule[0]], msg[schedule[1]]);
+ g(state, 1, 5, 9, 13, msg[schedule[2]], msg[schedule[3]]);
+ g(state, 2, 6, 10, 14, msg[schedule[4]], msg[schedule[5]]);
+ g(state, 3, 7, 11, 15, msg[schedule[6]], msg[schedule[7]]);
+
+ // Mix the diagonals.
+ g(state, 0, 5, 10, 15, msg[schedule[8]], msg[schedule[9]]);
+ g(state, 1, 6, 11, 12, msg[schedule[10]], msg[schedule[11]]);
+ g(state, 2, 7, 8, 13, msg[schedule[12]], msg[schedule[13]]);
+ g(state, 3, 4, 9, 14, msg[schedule[14]], msg[schedule[15]]);
+ }
+
+ fn compress(
+ chaining_value: [8]u32,
+ block_words: [16]u32,
+ block_len: u32,
+ counter: u64,
+ flags: u8,
+ ) [16]u32 {
+ var state = [16]u32{
+ chaining_value[0],
+ chaining_value[1],
+ chaining_value[2],
+ chaining_value[3],
+ chaining_value[4],
+ chaining_value[5],
+ chaining_value[6],
+ chaining_value[7],
+ IV[0],
+ IV[1],
+ IV[2],
+ IV[3],
+ @truncate(u32, counter),
+ @truncate(u32, counter >> 32),
+ block_len,
+ flags,
+ };
+ for (MSG_SCHEDULE) |schedule| {
+ round(&state, block_words, schedule);
+ }
+ for (chaining_value) |_, i| {
+ state[i] ^= state[i + 8];
+ state[i + 8] ^= chaining_value[i];
+ }
+ return state;
+ }
+};
+
+const compress = if (std.Target.current.cpu.arch == .x86_64) CompressVectorized.compress else CompressGeneric.compress;
fn first8Words(words: [16]u32) [8]u32 {
return @ptrCast(*const [8]u32, &words).*;
}
-fn wordsFromLittleEndianBytes(words: []u32, bytes: []const u8) void {
- var byte_slice = bytes;
- for (words) |*word| {
- word.* = mem.readIntSliceLittle(u32, byte_slice);
- byte_slice = byte_slice[4..];
+fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
+ var words: [count]u32 = undefined;
+ for (words) |*word, i| {
+ word.* = mem.readIntSliceLittle(u32, bytes[4 * i ..]);
}
+ return words;
}
// Each chunk or parent node can produce either an 8-word chaining value or, by
// setting the ROOT flag, any number of final output bytes. The Output struct
// captures the state just prior to choosing between those two possibilities.
const Output = struct {
- input_chaining_value: [8]u32,
- block_words: [16]u32,
+ input_chaining_value: [8]u32 align(16),
+ block_words: [16]u32 align(16),
block_len: u32,
counter: u64,
flags: u8,
@@ -181,9 +268,9 @@ const Output = struct {
};
const ChunkState = struct {
- chaining_value: [8]u32,
+ chaining_value: [8]u32 align(16),
chunk_counter: u64,
- block: [BLOCK_LEN]u8 = [_]u8{0} ** BLOCK_LEN,
+ block: [BLOCK_LEN]u8 align(16) = [_]u8{0} ** BLOCK_LEN,
block_len: u8 = 0,
blocks_compressed: u8 = 0,
flags: u8,
@@ -218,8 +305,7 @@ const ChunkState = struct {
// If the block buffer is full, compress it and clear it. More
// input is coming, so this compression is not CHUNK_END.
if (self.block_len == BLOCK_LEN) {
- var block_words: [16]u32 = undefined;
- wordsFromLittleEndianBytes(block_words[0..], self.block[0..]);
+ const block_words = wordsFromLittleEndianBytes(16, self.block);
self.chaining_value = first8Words(compress(
self.chaining_value,
block_words,
@@ -238,8 +324,7 @@ const ChunkState = struct {
}
fn output(self: *const ChunkState) Output {
- var block_words: [16]u32 = undefined;
- wordsFromLittleEndianBytes(block_words[0..], self.block[0..]);
+ const block_words = wordsFromLittleEndianBytes(16, self.block);
return Output{
.input_chaining_value = self.chaining_value,
.block_words = block_words,
@@ -256,7 +341,7 @@ fn parentOutput(
key: [8]u32,
flags: u8,
) Output {
- var block_words: [16]u32 = undefined;
+ var block_words: [16]u32 align(16) = undefined;
mem.copy(u32, block_words[0..8], left_child_cv[0..]);
mem.copy(u32, block_words[8..], right_child_cv[0..]);
return Output{
@@ -303,8 +388,7 @@ pub const Blake3 = struct {
/// Construct a new `Blake3` for the hash function, with an optional key
pub fn init(options: Options) Blake3 {
if (options.key) |key| {
- var key_words: [8]u32 = undefined;
- wordsFromLittleEndianBytes(key_words[0..], key[0..]);
+ const key_words = wordsFromLittleEndianBytes(8, key);
return Blake3.init_internal(key_words, KEYED_HASH);
} else {
return Blake3.init_internal(IV, 0);
@@ -318,8 +402,7 @@ pub const Blake3 = struct {
context_hasher.update(context);
var context_key: [KEY_LEN]u8 = undefined;
context_hasher.final(context_key[0..]);
- var context_key_words: [8]u32 = undefined;
- wordsFromLittleEndianBytes(context_key_words[0..], context_key[0..]);
+ const context_key_words = wordsFromLittleEndianBytes(8, context_key);
return Blake3.init_internal(context_key_words, DERIVE_KEY_MATERIAL);
}
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index ecf9abbef9..32d1aa6dce 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -100,7 +100,7 @@ const ChaCha20VecImpl = struct {
x[3] +%= ctx[3];
}
- fn chaCha20Internal(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) void {
+ fn chacha20Xor(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) void {
var ctx = initContext(key, counter);
var x: BlockVec = undefined;
var buf: [64]u8 = undefined;
@@ -239,7 +239,7 @@ const ChaCha20NonVecImpl = struct {
}
}
- fn chaCha20Internal(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) void {
+ fn chacha20Xor(out: []u8, in: []const u8, key: [8]u32, counter: [4]u32) void {
var ctx = initContext(key, counter);
var x: BlockVec = undefined;
var buf: [64]u8 = undefined;
@@ -325,7 +325,7 @@ pub const ChaCha20IETF = struct {
c[1] = mem.readIntLittle(u32, nonce[0..4]);
c[2] = mem.readIntLittle(u32, nonce[4..8]);
c[3] = mem.readIntLittle(u32, nonce[8..12]);
- ChaCha20Impl.chaCha20Internal(out, in, keyToWords(key), c);
+ ChaCha20Impl.chacha20Xor(out, in, keyToWords(key), c);
}
};
@@ -351,7 +351,7 @@ pub const ChaCha20With64BitNonce = struct {
// first partial big block
if (((@intCast(u64, maxInt(u32) - @truncate(u32, counter)) + 1) << 6) < in.len) {
- ChaCha20Impl.chaCha20Internal(out[cursor..big_block], in[cursor..big_block], k, c);
+ ChaCha20Impl.chacha20Xor(out[cursor..big_block], in[cursor..big_block], k, c);
cursor = big_block - cursor;
c[1] += 1;
if (comptime @sizeOf(usize) > 4) {
@@ -359,14 +359,14 @@ pub const ChaCha20With64BitNonce = struct {
var remaining_blocks: u32 = @intCast(u32, (in.len / big_block));
var i: u32 = 0;
while (remaining_blocks > 0) : (remaining_blocks -= 1) {
- ChaCha20Impl.chaCha20Internal(out[cursor .. cursor + big_block], in[cursor .. cursor + big_block], k, c);
- c[1] += 1; // upper 32-bit of counter, generic chaCha20Internal() doesn't know about this.
+ ChaCha20Impl.chacha20Xor(out[cursor .. cursor + big_block], in[cursor .. cursor + big_block], k, c);
+ c[1] += 1; // upper 32-bit of counter, generic chacha20Xor() doesn't know about this.
cursor += big_block;
}
}
}
- ChaCha20Impl.chaCha20Internal(out[cursor..], in[cursor..], k, c);
+ ChaCha20Impl.chacha20Xor(out[cursor..], in[cursor..], k, c);
}
};
@@ -694,7 +694,7 @@ fn chacha20poly1305OpenDetached(dst: []u8, ciphertext: []const u8, tag: *const [
// See https://github.com/ziglang/zig/issues/1776
var acc: u8 = 0;
for (computedTag) |_, i| {
- acc |= (computedTag[i] ^ tag[i]);
+ acc |= computedTag[i] ^ tag[i];
}
if (acc != 0) {
return error.AuthenticationFailed;
diff --git a/lib/std/crypto/gimli.zig b/lib/std/crypto/gimli.zig
index 42fd38d393..52708158ab 100644
--- a/lib/std/crypto/gimli.zig
+++ b/lib/std/crypto/gimli.zig
@@ -39,13 +39,13 @@ pub const State = struct {
}
/// TODO follow the span() convention instead of having this and `toSliceConst`
- pub fn toSlice(self: *Self) []u8 {
- return mem.sliceAsBytes(self.data[0..]);
+ pub fn toSlice(self: *Self) *[BLOCKBYTES]u8 {
+ return mem.asBytes(&self.data);
}
/// TODO follow the span() convention instead of having this and `toSlice`
- pub fn toSliceConst(self: *Self) []const u8 {
- return mem.sliceAsBytes(self.data[0..]);
+ pub fn toSliceConst(self: *const Self) *const [BLOCKBYTES]u8 {
+ return mem.asBytes(&self.data);
}
fn permute_unrolled(self: *Self) void {
diff --git a/lib/std/crypto/hkdf.zig b/lib/std/crypto/hkdf.zig
index 7ac3603637..c62583a932 100644
--- a/lib/std/crypto/hkdf.zig
+++ b/lib/std/crypto/hkdf.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("../std.zig");
const assert = std.debug.assert;
const hmac = std.crypto.auth.hmac;
diff --git a/lib/std/crypto/hmac.zig b/lib/std/crypto/hmac.zig
index e0972ecb7e..3978ff6b81 100644
--- a/lib/std/crypto/hmac.zig
+++ b/lib/std/crypto/hmac.zig
@@ -26,41 +26,41 @@ pub fn Hmac(comptime Hash: type) type {
pub const key_length = 32; // recommended key length
o_key_pad: [Hash.block_length]u8,
- i_key_pad: [Hash.block_length]u8,
- scratch: [Hash.block_length]u8,
hash: Hash,
// HMAC(k, m) = H(o_key_pad || H(i_key_pad || msg)) where || is concatenation
- pub fn create(out: []u8, msg: []const u8, key: []const u8) void {
+ pub fn create(out: *[mac_length]u8, msg: []const u8, key: []const u8) void {
var ctx = Self.init(key);
ctx.update(msg);
- ctx.final(out[0..]);
+ ctx.final(out);
}
pub fn init(key: []const u8) Self {
var ctx: Self = undefined;
+ var scratch: [Hash.block_length]u8 = undefined;
+ var i_key_pad: [Hash.block_length]u8 = undefined;
// Normalize key length to block size of hash
if (key.len > Hash.block_length) {
- Hash.hash(key, ctx.scratch[0..mac_length], .{});
- mem.set(u8, ctx.scratch[mac_length..Hash.block_length], 0);
+ Hash.hash(key, scratch[0..mac_length], .{});
+ mem.set(u8, scratch[mac_length..Hash.block_length], 0);
} else if (key.len < Hash.block_length) {
- mem.copy(u8, ctx.scratch[0..key.len], key);
- mem.set(u8, ctx.scratch[key.len..Hash.block_length], 0);
+ mem.copy(u8, scratch[0..key.len], key);
+ mem.set(u8, scratch[key.len..Hash.block_length], 0);
} else {
- mem.copy(u8, ctx.scratch[0..], key);
+ mem.copy(u8, scratch[0..], key);
}
for (ctx.o_key_pad) |*b, i| {
- b.* = ctx.scratch[i] ^ 0x5c;
+ b.* = scratch[i] ^ 0x5c;
}
- for (ctx.i_key_pad) |*b, i| {
- b.* = ctx.scratch[i] ^ 0x36;
+ for (i_key_pad) |*b, i| {
+ b.* = scratch[i] ^ 0x36;
}
ctx.hash = Hash.init(.{});
- ctx.hash.update(ctx.i_key_pad[0..]);
+ ctx.hash.update(&i_key_pad);
return ctx;
}
@@ -68,14 +68,13 @@ pub fn Hmac(comptime Hash: type) type {
ctx.hash.update(msg);
}
- pub fn final(ctx: *Self, out: []u8) void {
- debug.assert(Hash.block_length >= out.len and out.len >= mac_length);
-
- ctx.hash.final(ctx.scratch[0..mac_length]);
+ pub fn final(ctx: *Self, out: *[mac_length]u8) void {
+ var scratch: [mac_length]u8 = undefined;
+ ctx.hash.final(&scratch);
var ohash = Hash.init(.{});
- ohash.update(ctx.o_key_pad[0..]);
- ohash.update(ctx.scratch[0..mac_length]);
- ohash.final(out[0..mac_length]);
+ ohash.update(&ctx.o_key_pad);
+ ohash.update(&scratch);
+ ohash.final(out);
}
};
}
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
new file mode 100644
index 0000000000..ccfb6ecbad
--- /dev/null
+++ b/lib/std/crypto/salsa20.zig
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
+const std = @import("std");
+const crypto = std.crypto;
+const debug = std.debug;
+const math = std.math;
+const mem = std.mem;
+const Vector = std.meta.Vector;
+
+const Poly1305 = crypto.onetimeauth.Poly1305;
+const Blake2b = crypto.hash.blake2.Blake2b;
+const X25519 = crypto.dh.X25519;
+
+const Salsa20VecImpl = struct {
+ const Lane = Vector(4, u32);
+ const Half = Vector(2, u32);
+ const BlockVec = [4]Lane;
+
+ fn initContext(key: [8]u32, d: [4]u32) BlockVec {
+ const c = "expand 32-byte k";
+ const constant_le = comptime [4]u32{
+ mem.readIntLittle(u32, c[0..4]),
+ mem.readIntLittle(u32, c[4..8]),
+ mem.readIntLittle(u32, c[8..12]),
+ mem.readIntLittle(u32, c[12..16]),
+ };
+ return BlockVec{
+ Lane{ key[0], key[1], key[2], key[3] },
+ Lane{ key[4], key[5], key[6], key[7] },
+ Lane{ constant_le[0], constant_le[1], constant_le[2], constant_le[3] },
+ Lane{ d[0], d[1], d[2], d[3] },
+ };
+ }
+
+ inline fn rot(x: Lane, comptime n: u5) Lane {
+ return (x << @splat(4, @as(u5, n))) | (x >> @splat(4, @as(u5, 1 +% ~n)));
+ }
+
+ inline fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
+ const n1n2n3n0 = Lane{ input[3][1], input[3][2], input[3][3], input[3][0] };
+ const n1n2 = Half{ n1n2n3n0[0], n1n2n3n0[1] };
+ const n3n0 = Half{ n1n2n3n0[2], n1n2n3n0[3] };
+ const k0k1 = Half{ input[0][0], input[0][1] };
+ const k2k3 = Half{ input[0][2], input[0][3] };
+ const k4k5 = Half{ input[1][0], input[1][1] };
+ const k6k7 = Half{ input[1][2], input[1][3] };
+ const n0k0 = Half{ n3n0[1], k0k1[0] };
+ const k0n0 = Half{ n0k0[1], n0k0[0] };
+ const k4k5k0n0 = Lane{ k4k5[0], k4k5[1], k0n0[0], k0n0[1] };
+ const k1k6 = Half{ k0k1[1], k6k7[0] };
+ const k6k1 = Half{ k1k6[1], k1k6[0] };
+ const n1n2k6k1 = Lane{ n1n2[0], n1n2[1], k6k1[0], k6k1[1] };
+ const k7n3 = Half{ k6k7[1], n3n0[0] };
+ const n3k7 = Half{ k7n3[1], k7n3[0] };
+ const k2k3n3k7 = Lane{ k2k3[0], k2k3[1], n3k7[0], n3k7[1] };
+
+ var diag0 = input[2];
+ var diag1 = @shuffle(u32, k4k5k0n0, undefined, [_]i32{ 1, 2, 3, 0 });
+ var diag2 = @shuffle(u32, n1n2k6k1, undefined, [_]i32{ 1, 2, 3, 0 });
+ var diag3 = @shuffle(u32, k2k3n3k7, undefined, [_]i32{ 1, 2, 3, 0 });
+
+ const start0 = diag0;
+ const start1 = diag1;
+ const start2 = diag2;
+ const start3 = diag3;
+
+ var i: usize = 0;
+ while (i < 20) : (i += 2) {
+ var a0 = diag1 +% diag0;
+ diag3 ^= rot(a0, 7);
+ var a1 = diag0 +% diag3;
+ diag2 ^= rot(a1, 9);
+ var a2 = diag3 +% diag2;
+ diag1 ^= rot(a2, 13);
+ var a3 = diag2 +% diag1;
+ diag0 ^= rot(a3, 18);
+
+ var diag3_shift = @shuffle(u32, diag3, undefined, [_]i32{ 3, 0, 1, 2 });
+ var diag2_shift = @shuffle(u32, diag2, undefined, [_]i32{ 2, 3, 0, 1 });
+ var diag1_shift = @shuffle(u32, diag1, undefined, [_]i32{ 1, 2, 3, 0 });
+ diag3 = diag3_shift;
+ diag2 = diag2_shift;
+ diag1 = diag1_shift;
+
+ a0 = diag3 +% diag0;
+ diag1 ^= rot(a0, 7);
+ a1 = diag0 +% diag1;
+ diag2 ^= rot(a1, 9);
+ a2 = diag1 +% diag2;
+ diag3 ^= rot(a2, 13);
+ a3 = diag2 +% diag3;
+ diag0 ^= rot(a3, 18);
+
+ diag1_shift = @shuffle(u32, diag1, undefined, [_]i32{ 3, 0, 1, 2 });
+ diag2_shift = @shuffle(u32, diag2, undefined, [_]i32{ 2, 3, 0, 1 });
+ diag3_shift = @shuffle(u32, diag3, undefined, [_]i32{ 1, 2, 3, 0 });
+ diag1 = diag1_shift;
+ diag2 = diag2_shift;
+ diag3 = diag3_shift;
+ }
+
+ if (feedback) {
+ diag0 +%= start0;
+ diag1 +%= start1;
+ diag2 +%= start2;
+ diag3 +%= start3;
+ }
+
+ const x0x1x10x11 = Lane{ diag0[0], diag1[1], diag0[2], diag1[3] };
+ const x12x13x6x7 = Lane{ diag1[0], diag2[1], diag1[2], diag2[3] };
+ const x8x9x2x3 = Lane{ diag2[0], diag3[1], diag2[2], diag3[3] };
+ const x4x5x14x15 = Lane{ diag3[0], diag0[1], diag3[2], diag0[3] };
+
+ x[0] = Lane{ x0x1x10x11[0], x0x1x10x11[1], x8x9x2x3[2], x8x9x2x3[3] };
+ x[1] = Lane{ x4x5x14x15[0], x4x5x14x15[1], x12x13x6x7[2], x12x13x6x7[3] };
+ x[2] = Lane{ x8x9x2x3[0], x8x9x2x3[1], x0x1x10x11[2], x0x1x10x11[3] };
+ x[3] = Lane{ x12x13x6x7[0], x12x13x6x7[1], x4x5x14x15[2], x4x5x14x15[3] };
+ }
+
+ fn hashToBytes(out: *[64]u8, x: BlockVec) void {
+ var i: usize = 0;
+ while (i < 4) : (i += 1) {
+ mem.writeIntLittle(u32, out[16 * i + 0 ..][0..4], x[i][0]);
+ mem.writeIntLittle(u32, out[16 * i + 4 ..][0..4], x[i][1]);
+ mem.writeIntLittle(u32, out[16 * i + 8 ..][0..4], x[i][2]);
+ mem.writeIntLittle(u32, out[16 * i + 12 ..][0..4], x[i][3]);
+ }
+ }
+
+ fn salsa20Xor(out: []u8, in: []const u8, key: [8]u32, d: [4]u32) void {
+ var ctx = initContext(key, d);
+ var x: BlockVec = undefined;
+ var buf: [64]u8 = undefined;
+ var i: usize = 0;
+ while (i + 64 <= in.len) : (i += 64) {
+ salsa20Core(x[0..], ctx, true);
+ hashToBytes(buf[0..], x);
+ var xout = out[i..];
+ const xin = in[i..];
+ var j: usize = 0;
+ while (j < 64) : (j += 1) {
+ xout[j] = xin[j];
+ }
+ j = 0;
+ while (j < 64) : (j += 1) {
+ xout[j] ^= buf[j];
+ }
+ ctx[2][0] +%= 1;
+ if (ctx[2][0] == 0) {
+ ctx[2][1] += 1;
+ }
+ }
+ if (i < in.len) {
+ salsa20Core(x[0..], ctx, true);
+ hashToBytes(buf[0..], x);
+
+ var xout = out[i..];
+ const xin = in[i..];
+ var j: usize = 0;
+ while (j < in.len % 64) : (j += 1) {
+ xout[j] = xin[j] ^ buf[j];
+ }
+ }
+ }
+
+ fn hsalsa20(input: [16]u8, key: [32]u8) [32]u8 {
+ var c: [4]u32 = undefined;
+ for (c) |_, i| {
+ c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
+ }
+ const ctx = initContext(keyToWords(key), c);
+ var x: BlockVec = undefined;
+ salsa20Core(x[0..], ctx, false);
+ var out: [32]u8 = undefined;
+ mem.writeIntLittle(u32, out[0..4], x[0][0]);
+ mem.writeIntLittle(u32, out[4..8], x[1][1]);
+ mem.writeIntLittle(u32, out[8..12], x[2][2]);
+ mem.writeIntLittle(u32, out[12..16], x[3][3]);
+ mem.writeIntLittle(u32, out[16..20], x[1][2]);
+ mem.writeIntLittle(u32, out[20..24], x[1][3]);
+ mem.writeIntLittle(u32, out[24..28], x[2][0]);
+ mem.writeIntLittle(u32, out[28..32], x[2][1]);
+ return out;
+ }
+};
+
+const Salsa20NonVecImpl = struct {
+ const BlockVec = [16]u32;
+
+ fn initContext(key: [8]u32, d: [4]u32) BlockVec {
+ const c = "expand 32-byte k";
+ const constant_le = comptime [4]u32{
+ mem.readIntLittle(u32, c[0..4]),
+ mem.readIntLittle(u32, c[4..8]),
+ mem.readIntLittle(u32, c[8..12]),
+ mem.readIntLittle(u32, c[12..16]),
+ };
+ return BlockVec{
+ constant_le[0], key[0], key[1], key[2],
+ key[3], constant_le[1], d[0], d[1],
+ d[2], d[3], constant_le[2], key[4],
+ key[5], key[6], key[7], constant_le[3],
+ };
+ }
+
+ const QuarterRound = struct {
+ a: usize,
+ b: usize,
+ c: usize,
+ d: u6,
+ };
+
+ inline fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound {
+ return QuarterRound{
+ .a = a,
+ .b = b,
+ .c = c,
+ .d = d,
+ };
+ }
+
+ inline fn salsa20Core(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
+ const arx_steps = comptime [_]QuarterRound{
+ Rp(4, 0, 12, 7), Rp(8, 4, 0, 9), Rp(12, 8, 4, 13), Rp(0, 12, 8, 18),
+ Rp(9, 5, 1, 7), Rp(13, 9, 5, 9), Rp(1, 13, 9, 13), Rp(5, 1, 13, 18),
+ Rp(14, 10, 6, 7), Rp(2, 14, 10, 9), Rp(6, 2, 14, 13), Rp(10, 6, 2, 18),
+ Rp(3, 15, 11, 7), Rp(7, 3, 15, 9), Rp(11, 7, 3, 13), Rp(15, 11, 7, 18),
+ Rp(1, 0, 3, 7), Rp(2, 1, 0, 9), Rp(3, 2, 1, 13), Rp(0, 3, 2, 18),
+ Rp(6, 5, 4, 7), Rp(7, 6, 5, 9), Rp(4, 7, 6, 13), Rp(5, 4, 7, 18),
+ Rp(11, 10, 9, 7), Rp(8, 11, 10, 9), Rp(9, 8, 11, 13), Rp(10, 9, 8, 18),
+ Rp(12, 15, 14, 7), Rp(13, 12, 15, 9), Rp(14, 13, 12, 13), Rp(15, 14, 13, 18),
+ };
+ x.* = input;
+ var j: usize = 0;
+ while (j < 20) : (j += 2) {
+ inline for (arx_steps) |r| {
+ x[r.a] ^= math.rotl(u32, x[r.b] +% x[r.c], r.d);
+ }
+ }
+ if (feedback) {
+ j = 0;
+ while (j < 16) : (j += 1) {
+ x[j] +%= input[j];
+ }
+ }
+ }
+
+ fn hashToBytes(out: *[64]u8, x: BlockVec) void {
+ for (x) |w, i| {
+ mem.writeIntLittle(u32, out[i * 4 ..][0..4], w);
+ }
+ }
+
+ fn salsa20Xor(out: []u8, in: []const u8, key: [8]u32, d: [4]u32) void {
+ var ctx = initContext(key, d);
+ var x: BlockVec = undefined;
+ var buf: [64]u8 = undefined;
+ var i: usize = 0;
+ while (i + 64 <= in.len) : (i += 64) {
+ salsa20Core(x[0..], ctx, true);
+ hashToBytes(buf[0..], x);
+ var xout = out[i..];
+ const xin = in[i..];
+ var j: usize = 0;
+ while (j < 64) : (j += 1) {
+ xout[j] = xin[j];
+ }
+ j = 0;
+ while (j < 64) : (j += 1) {
+ xout[j] ^= buf[j];
+ }
+ ctx[9] += @boolToInt(@addWithOverflow(u32, ctx[8], 1, &ctx[8]));
+ }
+ if (i < in.len) {
+ salsa20Core(x[0..], ctx, true);
+ hashToBytes(buf[0..], x);
+
+ var xout = out[i..];
+ const xin = in[i..];
+ var j: usize = 0;
+ while (j < in.len % 64) : (j += 1) {
+ xout[j] = xin[j] ^ buf[j];
+ }
+ }
+ }
+
+ fn hsalsa20(input: [16]u8, key: [32]u8) [32]u8 {
+ var c: [4]u32 = undefined;
+ for (c) |_, i| {
+ c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
+ }
+ const ctx = initContext(keyToWords(key), c);
+ var x: BlockVec = undefined;
+ salsa20Core(x[0..], ctx, false);
+ var out: [32]u8 = undefined;
+ mem.writeIntLittle(u32, out[0..4], x[0]);
+ mem.writeIntLittle(u32, out[4..8], x[5]);
+ mem.writeIntLittle(u32, out[8..12], x[10]);
+ mem.writeIntLittle(u32, out[12..16], x[15]);
+ mem.writeIntLittle(u32, out[16..20], x[6]);
+ mem.writeIntLittle(u32, out[20..24], x[7]);
+ mem.writeIntLittle(u32, out[24..28], x[8]);
+ mem.writeIntLittle(u32, out[28..32], x[9]);
+ return out;
+ }
+};
+
+const Salsa20Impl = if (std.Target.current.cpu.arch == .x86_64) Salsa20VecImpl else Salsa20NonVecImpl;
+
+fn keyToWords(key: [32]u8) [8]u32 {
+ var k: [8]u32 = undefined;
+ var i: usize = 0;
+ while (i < 8) : (i += 1) {
+ k[i] = mem.readIntLittle(u32, key[i * 4 ..][0..4]);
+ }
+ return k;
+}
+
+fn extend(key: [32]u8, nonce: [24]u8) struct { key: [32]u8, nonce: [8]u8 } {
+ return .{
+ .key = Salsa20Impl.hsalsa20(nonce[0..16].*, key),
+ .nonce = nonce[16..24].*,
+ };
+}
+
+/// The Salsa20 stream cipher.
+pub const Salsa20 = struct {
+ /// Nonce length in bytes.
+ pub const nonce_length = 8;
+ /// Key length in bytes.
+ pub const key_length = 32;
+
+ /// Add the output of the Salsa20 stream cipher to `in` and stores the result into `out`.
+ /// WARNING: This function doesn't provide authenticated encryption.
+ /// Using the AEAD or one of the `box` versions is usually preferred.
+ pub fn xor(out: []u8, in: []const u8, counter: u64, key: [key_length]u8, nonce: [nonce_length]u8) void {
+ debug.assert(in.len == out.len);
+
+ var d: [4]u32 = undefined;
+ d[0] = mem.readIntLittle(u32, nonce[0..4]);
+ d[1] = mem.readIntLittle(u32, nonce[4..8]);
+ d[2] = @truncate(u32, counter);
+ d[3] = @truncate(u32, counter >> 32);
+ Salsa20Impl.salsa20Xor(out, in, keyToWords(key), d);
+ }
+};
+
+/// The XSalsa20 stream cipher.
+pub const XSalsa20 = struct {
+ /// Nonce length in bytes.
+ pub const nonce_length = 24;
+ /// Key length in bytes.
+ pub const key_length = 32;
+
+ /// Add the output of the XSalsa20 stream cipher to `in` and stores the result into `out`.
+ /// WARNING: This function doesn't provide authenticated encryption.
+ /// Using the AEAD or one of the `box` versions is usually preferred.
+ pub fn xor(out: []u8, in: []const u8, counter: u64, key: [key_length]u8, nonce: [nonce_length]u8) void {
+ const extended = extend(key, nonce);
+ Salsa20.xor(out, in, counter, extended.key, extended.nonce);
+ }
+};
+
+/// The XSalsa20 stream cipher, combined with the Poly1305 MAC
+pub const XSalsa20Poly1305 = struct {
+ /// Authentication tag length in bytes.
+ pub const tag_length = Poly1305.mac_length;
+ /// Nonce length in bytes.
+ pub const nonce_length = XSalsa20.nonce_length;
+ /// Key length in bytes.
+ pub const key_length = XSalsa20.key_length;
+
+ /// c: ciphertext: output buffer should be of size m.len
+ /// tag: authentication tag: output MAC
+ /// m: message
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ pub fn encrypt(c: []u8, tag: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) void {
+ debug.assert(c.len == m.len);
+ const extended = extend(k, npub);
+ var block0 = [_]u8{0} ** 64;
+ const mlen0 = math.min(32, m.len);
+ mem.copy(u8, block0[32..][0..mlen0], m[0..mlen0]);
+ Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
+ mem.copy(u8, c[0..mlen0], block0[32..][0..mlen0]);
+ Salsa20.xor(c[mlen0..], m[mlen0..], 1, extended.key, extended.nonce);
+ var mac = Poly1305.init(block0[0..32]);
+ mac.update(ad);
+ mac.update(c);
+ mac.final(tag);
+ }
+
+ /// m: message: output buffer should be of size c.len
+ /// c: ciphertext
+ /// tag: authentication tag
+ /// ad: Associated Data
+ /// npub: public nonce
+ /// k: private key
+ pub fn decrypt(m: []u8, c: []const u8, tag: [tag_length]u8, ad: []const u8, npub: [nonce_length]u8, k: [key_length]u8) !void {
+ debug.assert(c.len == m.len);
+ const extended = extend(k, npub);
+ var block0 = [_]u8{0} ** 64;
+ const mlen0 = math.min(32, c.len);
+ mem.copy(u8, block0[32..][0..mlen0], c[0..mlen0]);
+ Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
+ var mac = Poly1305.init(block0[0..32]);
+ mac.update(ad);
+ mac.update(c);
+ var computedTag: [tag_length]u8 = undefined;
+ mac.final(&computedTag);
+ var acc: u8 = 0;
+ for (computedTag) |_, i| {
+ acc |= computedTag[i] ^ tag[i];
+ }
+ if (acc != 0) {
+ mem.secureZero(u8, &computedTag);
+ return error.AuthenticationFailed;
+ }
+ mem.copy(u8, m[0..mlen0], block0[32..][0..mlen0]);
+ Salsa20.xor(m[mlen0..], c[mlen0..], 1, extended.key, extended.nonce);
+ }
+};
+
+/// NaCl-compatible secretbox API.
+///
+/// A secretbox contains both an encrypted message and an authentication tag to verify that it hasn't been tampered with.
+/// A secret key shared by all the recipients must be already known in order to use this API.
+///
+/// Nonces are 192-bit large and can safely be chosen with a random number generator.
+pub const SecretBox = struct {
+ /// Key length in bytes.
+ pub const key_length = XSalsa20Poly1305.key_length;
+ /// Nonce length in bytes.
+ pub const nonce_length = XSalsa20Poly1305.nonce_length;
+ /// Authentication tag length in bytes.
+ pub const tag_length = XSalsa20Poly1305.tag_length;
+
+ /// Encrypt and authenticate `m` using a nonce `npub` and a key `k`.
+ /// `c` must be exactly `tag_length` longer than `m`, as it will store both the ciphertext and the authentication tag.
+ pub fn seal(c: []u8, m: []const u8, npub: [nonce_length]u8, k: [key_length]u8) void {
+ debug.assert(c.len == tag_length + m.len);
+ XSalsa20Poly1305.encrypt(c[tag_length..], c[0..tag_length], m, "", npub, k);
+ }
+
+ /// Verify and decrypt `c` using a nonce `npub` and a key `k`.
+ /// `m` must be exactly `tag_length` smaller than `c`, as `c` includes an authentication tag in addition to the encrypted message.
+ pub fn open(m: []u8, c: []const u8, npub: [nonce_length]u8, k: [key_length]u8) !void {
+ if (c.len < tag_length) {
+ return error.AuthenticationFailed;
+ }
+ debug.assert(m.len == c.len - tag_length);
+ return XSalsa20Poly1305.decrypt(m, c[tag_length..], c[0..tag_length].*, "", npub, k);
+ }
+};
+
+/// NaCl-compatible box API.
+///
+/// A secretbox contains both an encrypted message and an authentication tag to verify that it hasn't been tampered with.
+/// This construction uses public-key cryptography. A shared secret doesn't have to be known in advance by both parties.
+/// Instead, a message is encrypted using a sender's secret key and a recipient's public key,
+/// and is decrypted using the recipient's secret key and the sender's public key.
+///
+/// Nonces are 192-bit large and can safely be chosen with a random number generator.
+pub const Box = struct {
+ /// Public key length in bytes.
+ pub const public_length = X25519.public_length;
+ /// Secret key length in bytes.
+ pub const secret_length = X25519.secret_length;
+ /// Shared key length in bytes.
+ pub const shared_length = XSalsa20Poly1305.key_length;
+ /// Seed (for key pair creation) length in bytes.
+ pub const seed_length = X25519.seed_length;
+ /// Nonce length in bytes.
+ pub const nonce_length = XSalsa20Poly1305.nonce_length;
+ /// Authentication tag length in bytes.
+ pub const tag_length = XSalsa20Poly1305.tag_length;
+
+ /// A key pair.
+ pub const KeyPair = X25519.KeyPair;
+
+ /// Compute a secret suitable for `secretbox` given a recipent's public key and a sender's secret key.
+ pub fn createSharedSecret(public_key: [public_length]u8, secret_key: [secret_length]u8) ![shared_length]u8 {
+ const p = try X25519.scalarmult(secret_key, public_key);
+ const zero = [_]u8{0} ** 16;
+ return Salsa20Impl.hsalsa20(zero, p);
+ }
+
+ /// Encrypt and authenticate a message using a recipient's public key `public_key` and a sender's `secret_key`.
+ pub fn seal(c: []u8, m: []const u8, npub: [nonce_length]u8, public_key: [public_length]u8, secret_key: [secret_length]u8) !void {
+ const shared_key = try createSharedSecret(public_key, secret_key);
+ return SecretBox.seal(c, m, npub, shared_key);
+ }
+
+ /// Verify and decrypt a message using a recipient's secret key `public_key` and a sender's `public_key`.
+ pub fn open(m: []u8, c: []const u8, npub: [nonce_length]u8, public_key: [public_length]u8, secret_key: [secret_length]u8) !void {
+ const shared_key = try createSharedSecret(public_key, secret_key);
+ return SecretBox.open(m, c, npub, shared_key);
+ }
+};
+
+/// libsodium-compatible sealed boxes
+///
+/// Sealed boxes are designed to anonymously send messages to a recipient given their public key.
+/// Only the recipient can decrypt these messages, using their private key.
+/// While the recipient can verify the integrity of the message, it cannot verify the identity of the sender.
+///
+/// A message is encrypted using an ephemeral key pair, whose secret part is destroyed right after the encryption process.
+pub const SealedBox = struct {
+ pub const public_length = Box.public_length;
+ pub const secret_length = Box.secret_length;
+ pub const seed_length = Box.seed_length;
+ pub const seal_length = Box.public_length + Box.tag_length;
+
+ /// A key pair.
+ pub const KeyPair = Box.KeyPair;
+
+ fn createNonce(pk1: [public_length]u8, pk2: [public_length]u8) [Box.nonce_length]u8 {
+ var hasher = Blake2b(Box.nonce_length * 8).init(.{});
+ hasher.update(&pk1);
+ hasher.update(&pk2);
+ var nonce: [Box.nonce_length]u8 = undefined;
+ hasher.final(&nonce);
+ return nonce;
+ }
+
+ /// Encrypt a message `m` for a recipient whose public key is `public_key`.
+ /// `c` must be `seal_length` bytes larger than `m`, so that the required metadata can be added.
+ pub fn seal(c: []u8, m: []const u8, public_key: [public_length]u8) !void {
+ debug.assert(c.len == m.len + seal_length);
+ var ekp = try KeyPair.create(null);
+ const nonce = createNonce(ekp.public_key, public_key);
+ mem.copy(u8, c[0..public_length], ekp.public_key[0..]);
+ try Box.seal(c[Box.public_length..], m, nonce, public_key, ekp.secret_key);
+ mem.secureZero(u8, ekp.secret_key[0..]);
+ }
+
+ /// Decrypt a message using a key pair.
+ /// `m` must be exactly `seal_length` bytes smaller than `c`, as `c` also includes metadata.
+ pub fn open(m: []u8, c: []const u8, keypair: KeyPair) !void {
+ if (c.len < seal_length) {
+ return error.AuthenticationFailed;
+ }
+ const epk = c[0..public_length];
+ const nonce = createNonce(epk.*, keypair.public_key);
+ return Box.open(m, c[public_length..], nonce, epk.*, keypair.secret_key);
+ }
+};
+
+const htest = @import("test.zig");
+
+test "(x)salsa20" {
+ const key = [_]u8{0x69} ** 32;
+ const nonce = [_]u8{0x42} ** 8;
+ const msg = [_]u8{0} ** 20;
+ var c: [msg.len]u8 = undefined;
+
+ Salsa20.xor(&c, msg[0..], 0, key, nonce);
+ htest.assertEqual("30ff9933aa6534ff5207142593cd1fca4b23bdd8", c[0..]);
+
+ const extended_nonce = [_]u8{0x42} ** 24;
+ XSalsa20.xor(&c, msg[0..], 0, key, extended_nonce);
+ htest.assertEqual("b4ab7d82e750ec07644fa3281bce6cd91d4243f9", c[0..]);
+}
+
+test "xsalsa20poly1305" {
+ var msg: [100]u8 = undefined;
+ var msg2: [msg.len]u8 = undefined;
+ var c: [msg.len]u8 = undefined;
+ var key: [XSalsa20Poly1305.key_length]u8 = undefined;
+ var nonce: [XSalsa20Poly1305.nonce_length]u8 = undefined;
+ var tag: [XSalsa20Poly1305.tag_length]u8 = undefined;
+ try crypto.randomBytes(&msg);
+ try crypto.randomBytes(&key);
+ try crypto.randomBytes(&nonce);
+
+ XSalsa20Poly1305.encrypt(c[0..], &tag, msg[0..], "ad", nonce, key);
+ try XSalsa20Poly1305.decrypt(msg2[0..], c[0..], tag, "ad", nonce, key);
+}
+
+test "xsalsa20poly1305 secretbox" {
+ var msg: [100]u8 = undefined;
+ var msg2: [msg.len]u8 = undefined;
+ var key: [XSalsa20Poly1305.key_length]u8 = undefined;
+ var nonce: [Box.nonce_length]u8 = undefined;
+ var boxed: [msg.len + Box.tag_length]u8 = undefined;
+ try crypto.randomBytes(&msg);
+ try crypto.randomBytes(&key);
+ try crypto.randomBytes(&nonce);
+
+ SecretBox.seal(boxed[0..], msg[0..], nonce, key);
+ try SecretBox.open(msg2[0..], boxed[0..], nonce, key);
+}
+
+test "xsalsa20poly1305 box" {
+ var msg: [100]u8 = undefined;
+ var msg2: [msg.len]u8 = undefined;
+ var nonce: [Box.nonce_length]u8 = undefined;
+ var boxed: [msg.len + Box.tag_length]u8 = undefined;
+ try crypto.randomBytes(&msg);
+ try crypto.randomBytes(&nonce);
+
+ var kp1 = try Box.KeyPair.create(null);
+ var kp2 = try Box.KeyPair.create(null);
+ try Box.seal(boxed[0..], msg[0..], nonce, kp1.public_key, kp2.secret_key);
+ try Box.open(msg2[0..], boxed[0..], nonce, kp2.public_key, kp1.secret_key);
+}
+
+test "xsalsa20poly1305 sealedbox" {
+ var msg: [100]u8 = undefined;
+ var msg2: [msg.len]u8 = undefined;
+ var boxed: [msg.len + SealedBox.seal_length]u8 = undefined;
+ try crypto.randomBytes(&msg);
+
+ var kp = try Box.KeyPair.create(null);
+ try SealedBox.seal(boxed[0..], msg[0..], kp.public_key);
+ try SealedBox.open(msg2[0..], boxed[0..], kp);
+}
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index 9f56721214..983dc6f9fd 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -937,6 +937,16 @@ pub const Verdaux = switch (@sizeOf(usize)) {
8 => Elf64_Verdaux,
else => @compileError("expected pointer size of 32 or 64"),
};
+pub const Addr = switch (@sizeOf(usize)) {
+ 4 => Elf32_Addr,
+ 8 => Elf64_Addr,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
+pub const Half = switch (@sizeOf(usize)) {
+ 4 => Elf32_Half,
+ 8 => Elf64_Half,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
/// Machine architectures
/// See current registered ELF machine architectures at:
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 80dc94d184..8fd7df0d6f 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -467,6 +467,7 @@ pub const Loop = struct {
}};
_ = os.poll(&pfd, -1) catch |poll_err| switch (poll_err) {
error.SystemResources,
+ error.NetworkSubsystemFailed,
error.Unexpected,
=> {
// Even poll() didn't work. The best we can do now is sleep for a
@@ -772,7 +773,7 @@ pub const Loop = struct {
self.delay_queue.waiters.insert(&entry);
// Speculatively wake up the timer thread when we add a new entry.
- // If the timer thread is sleeping on a longer entry, we need to
+ // If the timer thread is sleeping on a longer entry, we need to
// interrupt it so that our entry can be expired in time.
self.delay_queue.event.set();
}
@@ -784,7 +785,7 @@ pub const Loop = struct {
thread: *std.Thread,
event: std.AutoResetEvent,
is_running: bool,
-
+
/// Initialize the delay queue by spawning the timer thread
/// and starting any timer resources.
fn init(self: *DelayQueue) !void {
@@ -799,7 +800,7 @@ pub const Loop = struct {
};
}
- /// Entry point for the timer thread
+ /// Entry point for the timer thread
/// which waits for timer entries to expire and reschedules them.
fn run(self: *DelayQueue) void {
const loop = @fieldParentPtr(Loop, "delay_queue", self);
@@ -847,12 +848,12 @@ pub const Loop = struct {
const entry = self.peekExpiringEntry() orelse return null;
if (entry.expires > now)
return null;
-
+
assert(self.entries.remove(&entry.node));
return entry;
}
-
- /// Returns an estimate for the amount of time
+
+ /// Returns an estimate for the amount of time
/// to wait until the next waiting entry expires.
fn nextExpire(self: *Waiters) ?u64 {
const entry = self.peekExpiringEntry() orelse return null;
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index f8bc707bd3..c3f0209e16 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -496,6 +496,7 @@ pub fn formatType(
const buffer = [_]u8{'.'} ++ @tagName(value);
return formatType(buffer, fmt, options, writer, max_depth);
},
+ .Null => return formatBuf("null", options, writer),
else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"),
}
}
@@ -1908,3 +1909,9 @@ test "sci float padding" {
try testFmt("center-pad: *3.141e+00*\n", "center-pad: {e:*^11.3}\n", .{number});
try testFmt("right-pad: 3.141e+00**\n", "right-pad: {e:*<11.3}\n", .{number});
}
+
+test "null" {
+ const inst = null;
+ try testFmt("null", "{}", .{inst});
+}
+
diff --git a/lib/std/io/auto_indenting_stream.zig b/lib/std/io/auto_indenting_stream.zig
index d08878e851..bea4af7519 100644
--- a/lib/std/io/auto_indenting_stream.zig
+++ b/lib/std/io/auto_indenting_stream.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
diff --git a/lib/std/io/change_detection_stream.zig b/lib/std/io/change_detection_stream.zig
index 5ba2bb3c10..52c3372094 100644
--- a/lib/std/io/change_detection_stream.zig
+++ b/lib/std/io/change_detection_stream.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
diff --git a/lib/std/io/find_byte_out_stream.zig b/lib/std/io/find_byte_out_stream.zig
index b8689b7992..70e1e190b1 100644
--- a/lib/std/io/find_byte_out_stream.zig
+++ b/lib/std/io/find_byte_out_stream.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("../std.zig");
const io = std.io;
const assert = std.debug.assert;
diff --git a/lib/std/math.zig b/lib/std/math.zig
index f0c4f74d73..ac1ff2b071 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -405,7 +405,14 @@ test "math.shr" {
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: anytype) T {
- if (@typeInfo(T).Int.is_signed) {
+ if (@typeInfo(T) == .Vector) {
+ const C = @typeInfo(T).Vector.child;
+ if (@typeInfo(C).Int.is_signed) {
+ @compileError("cannot rotate signed integers");
+ }
+ const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+ return (x >> @splat(@typeInfo(T).Vector.len, ar)) | (x << @splat(@typeInfo(T).Vector.len, 1 + ~ar));
+ } else if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -419,12 +426,21 @@ test "math.rotr" {
testing.expect(rotr(u8, 0b00000001, @as(usize, 8)) == 0b00000001);
testing.expect(rotr(u8, 0b00000001, @as(usize, 4)) == 0b00010000);
testing.expect(rotr(u8, 0b00000001, @as(isize, -1)) == 0b00000010);
+ testing.expect(rotr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1}, @as(usize, 1))[0] == @as(u32, 1) << 31);
+ testing.expect(rotr(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1}, @as(isize, -1))[0] == @as(u32, 1) << 1);
}
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: anytype) T {
- if (@typeInfo(T).Int.is_signed) {
+ if (@typeInfo(T) == .Vector) {
+ const C = @typeInfo(T).Vector.child;
+ if (@typeInfo(C).Int.is_signed) {
+ @compileError("cannot rotate signed integers");
+ }
+ const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+ return (x << @splat(@typeInfo(T).Vector.len, ar)) | (x >> @splat(@typeInfo(T).Vector.len, 1 +% ~ar));
+ } else if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -438,6 +454,8 @@ test "math.rotl" {
testing.expect(rotl(u8, 0b00000001, @as(usize, 8)) == 0b00000001);
testing.expect(rotl(u8, 0b00000001, @as(usize, 4)) == 0b00010000);
testing.expect(rotl(u8, 0b00000001, @as(isize, -1)) == 0b10000000);
+ testing.expect(rotl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1 << 31}, @as(usize, 1))[0] == 1);
+ testing.expect(rotl(std.meta.Vector(1, u32), std.meta.Vector(1, u32){1 << 31}, @as(isize, -1))[0] == @as(u32, 1) << 30);
}
pub fn Log2Int(comptime T: type) type {
@@ -1141,4 +1159,3 @@ test "math.comptime" {
comptime const v = sin(@as(f32, 1)) + ln(@as(f32, 5));
testing.expect(v == sin(@as(f32, 1)) + ln(@as(f32, 5)));
}
-
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index dd1c736626..fec59fe13f 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -16,6 +16,10 @@ const testing = std.testing;
/// https://github.com/ziglang/zig/issues/2564
pub const page_size = switch (builtin.arch) {
.wasm32, .wasm64 => 64 * 1024,
+ .aarch64 => switch (builtin.os.tag) {
+ .macos, .ios, .watchos, .tvos => 16 * 1024,
+ else => 4 * 1024,
+ },
else => 4 * 1024,
};
@@ -907,7 +911,7 @@ pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?us
if (needle.len > haystack.len) return null;
if (needle.len == 0) return haystack.len;
- if (!meta.trait.hasUniqueRepresentation(T) or haystack.len < 32 or needle.len <= 2)
+ if (!meta.trait.hasUniqueRepresentation(T) or haystack.len < 52 or needle.len <= 4)
return lastIndexOfLinear(T, haystack, needle);
const haystack_bytes = sliceAsBytes(haystack);
@@ -951,10 +955,10 @@ pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, nee
}
test "mem.indexOf" {
- testing.expect(indexOf(u8, "one two three four five six seven eight nine ten", "three four").? == 8);
- testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten", "three four").? == 8);
- testing.expect(indexOf(u8, "one two three four five six seven eight nine ten", "two two") == null);
- testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten", "two two") == null);
+ testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8);
+ testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "three four").? == 8);
+ testing.expect(indexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null);
+ testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten eleven", "two two") == null);
testing.expect(indexOf(u8, "one two three four five six seven eight nine ten", "").? == 0);
testing.expect(lastIndexOf(u8, "one two three four five six seven eight nine ten", "").? == 48);
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 5ac22948fa..c041c55e19 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -11,11 +11,7 @@ const mem = std.mem;
const os = std.os;
const fs = std.fs;
-test "" {
- _ = @import("net/test.zig");
-}
-
-const has_unix_sockets = @hasDecl(os, "sockaddr_un");
+pub const has_unix_sockets = @hasDecl(os, "sockaddr_un");
pub const Address = extern union {
any: os.sockaddr,
@@ -610,7 +606,7 @@ pub fn connectUnixSocket(path: []const u8) !fs.File {
os.SOCK_STREAM | os.SOCK_CLOEXEC | opt_non_block,
0,
);
- errdefer os.close(sockfd);
+ errdefer os.closeSocket(sockfd);
var addr = try std.net.Address.initUnix(path);
@@ -629,7 +625,7 @@ pub fn connectUnixSocket(path: []const u8) !fs.File {
fn if_nametoindex(name: []const u8) !u32 {
var ifr: os.ifreq = undefined;
var sockfd = try os.socket(os.AF_UNIX, os.SOCK_DGRAM | os.SOCK_CLOEXEC, 0);
- defer os.close(sockfd);
+ defer os.closeSocket(sockfd);
std.mem.copy(u8, &ifr.ifrn.name, name);
ifr.ifrn.name[name.len] = 0;
@@ -677,7 +673,7 @@ pub fn tcpConnectToAddress(address: Address) !fs.File {
const sock_flags = os.SOCK_STREAM | nonblock |
(if (builtin.os.tag == .windows) 0 else os.SOCK_CLOEXEC);
const sockfd = try os.socket(address.any.family, sock_flags, os.IPPROTO_TCP);
- errdefer os.close(sockfd);
+ errdefer os.closeSocket(sockfd);
if (std.io.is_async) {
const loop = std.event.Loop.instance orelse return error.WouldBlock;
@@ -912,7 +908,7 @@ fn linuxLookupName(
var prefixlen: i32 = 0;
const sock_flags = os.SOCK_DGRAM | os.SOCK_CLOEXEC;
if (os.socket(addr.addr.any.family, sock_flags, os.IPPROTO_UDP)) |fd| syscalls: {
- defer os.close(fd);
+ defer os.closeSocket(fd);
os.connect(fd, da, dalen) catch break :syscalls;
key |= DAS_USABLE;
os.getsockname(fd, sa, &salen) catch break :syscalls;
@@ -1392,7 +1388,7 @@ fn resMSendRc(
},
else => |e| return e,
};
- defer os.close(fd);
+ defer os.closeSocket(fd);
try os.bind(fd, &sa.any, sl);
// Past this point, there are no errors. Each individual query will
@@ -1546,16 +1542,14 @@ fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8)
if (data.len != 4) return error.InvalidDnsARecord;
const new_addr = try ctx.addrs.addOne();
new_addr.* = LookupAddr{
- // TODO slice [0..4] to make this *[4]u8 without @ptrCast
- .addr = Address.initIp4(@ptrCast(*const [4]u8, data.ptr).*, ctx.port),
+ .addr = Address.initIp4(data[0..4].*, ctx.port),
};
},
os.RR_AAAA => {
if (data.len != 16) return error.InvalidDnsAAAARecord;
const new_addr = try ctx.addrs.addOne();
new_addr.* = LookupAddr{
- // TODO slice [0..16] to make this *[16]u8 without @ptrCast
- .addr = Address.initIp6(@ptrCast(*const [16]u8, data.ptr).*, ctx.port, 0, 0),
+ .addr = Address.initIp6(data[0..16].*, ctx.port, 0, 0),
};
},
os.RR_CNAME => {
@@ -1573,19 +1567,19 @@ fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8)
pub const StreamServer = struct {
/// Copied from `Options` on `init`.
- kernel_backlog: u32,
+ kernel_backlog: u31,
reuse_address: bool,
/// `undefined` until `listen` returns successfully.
listen_address: Address,
- sockfd: ?os.fd_t,
+ sockfd: ?os.socket_t,
pub const Options = struct {
/// How many connections the kernel will accept on the application's behalf.
/// If more than this many connections pool in the kernel, clients will start
/// seeing "Connection refused".
- kernel_backlog: u32 = 128,
+ kernel_backlog: u31 = 128,
/// Enable SO_REUSEADDR on the socket.
reuse_address: bool = false,
@@ -1616,13 +1610,13 @@ pub const StreamServer = struct {
const sockfd = try os.socket(address.any.family, sock_flags, proto);
self.sockfd = sockfd;
errdefer {
- os.close(sockfd);
+ os.closeSocket(sockfd);
self.sockfd = null;
}
if (self.reuse_address) {
try os.setsockopt(
- self.sockfd.?,
+ sockfd,
os.SOL_SOCKET,
os.SO_REUSEADDR,
&mem.toBytes(@as(c_int, 1)),
@@ -1640,7 +1634,7 @@ pub const StreamServer = struct {
/// not listening.
pub fn close(self: *StreamServer) void {
if (self.sockfd) |fd| {
- os.close(fd);
+ os.closeSocket(fd);
self.sockfd = null;
self.listen_address = undefined;
}
@@ -1670,6 +1664,14 @@ pub const StreamServer = struct {
/// Permission to create a socket of the specified type and/or
/// protocol is denied.
PermissionDenied,
+
+ FileDescriptorNotASocket,
+
+ ConnectionResetByPeer,
+
+ NetworkSubsystemFailed,
+
+ OperationNotSupported,
} || os.UnexpectedError;
pub const Connection = struct {
@@ -1701,3 +1703,7 @@ pub const StreamServer = struct {
}
}
};
+
+test "" {
+ _ = @import("net/test.zig");
+}
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 815ee81d7f..9f40bb5a3b 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -95,22 +95,81 @@ test "parse and render IPv4 addresses" {
}
test "resolve DNS" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
if (std.builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
- if (builtin.os.tag == .wasi) {
- // DNS resolution not implemented on Windows yet.
- return error.SkipZigTest;
+ defer {
+ if (std.builtin.os.tag == .windows) {
+ std.os.windows.WSACleanup() catch unreachable;
+ }
}
- const address_list = net.getAddressList(testing.allocator, "example.com", 80) catch |err| switch (err) {
+ // Resolve localhost, this should not fail.
+ {
+ const localhost_v4 = try net.Address.parseIp("127.0.0.1", 80);
+ const localhost_v6 = try net.Address.parseIp("::2", 80);
+
+ const result = try net.getAddressList(testing.allocator, "localhost", 80);
+ defer result.deinit();
+ for (result.addrs) |addr| {
+ if (addr.eql(localhost_v4) or addr.eql(localhost_v6)) break;
+ } else @panic("unexpected address for localhost");
+ }
+
+ {
// The tests are required to work even when there is no Internet connection,
// so some of these errors we must accept and skip the test.
- error.UnknownHostName => return error.SkipZigTest,
- error.TemporaryNameServerFailure => return error.SkipZigTest,
- else => return err,
+ const result = net.getAddressList(testing.allocator, "example.com", 80) catch |err| switch (err) {
+ error.UnknownHostName => return error.SkipZigTest,
+ error.TemporaryNameServerFailure => return error.SkipZigTest,
+ else => return err,
+ };
+ result.deinit();
+ }
+}
+
+test "listen on a port, send bytes, receive bytes" {
+ if (builtin.single_threaded) return error.SkipZigTest;
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ if (std.builtin.os.tag == .windows) {
+ _ = try std.os.windows.WSAStartup(2, 2);
+ }
+ defer {
+ if (std.builtin.os.tag == .windows) {
+ std.os.windows.WSACleanup() catch unreachable;
+ }
+ }
+
+ // Try only the IPv4 variant as some CI builders have no IPv6 localhost
+ // configured.
+ const localhost = try net.Address.parseIp("127.0.0.1", 8080);
+
+ var server = net.StreamServer.init(.{});
+ defer server.deinit();
+
+ try server.listen(localhost);
+
+ const S = struct {
+ fn clientFn(server_address: net.Address) !void {
+ const socket = try net.tcpConnectToAddress(server_address);
+ defer socket.close();
+
+ _ = try socket.writer().writeAll("Hello world!");
+ }
};
- address_list.deinit();
+
+ const t = try std.Thread.spawn(server.listen_address, S.clientFn);
+ defer t.wait();
+
+ var client = try server.accept();
+ var buf: [16]u8 = undefined;
+ const n = try client.file.reader().read(&buf);
+
+ testing.expectEqual(@as(usize, 12), n);
+ testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
test "listen on a port, send bytes, receive bytes" {
diff --git a/lib/std/os.zig b/lib/std/os.zig
index be1fe3e5f4..5aa81ac54b 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -2687,6 +2687,14 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
}
}
+pub fn closeSocket(sock: socket_t) void {
+ if (builtin.os.tag == .windows) {
+ windows.closesocket(sock) catch unreachable;
+ } else {
+ close(sock);
+ }
+}
+
pub const BindError = error{
/// The address is protected, and the user is not the superuser.
/// For UNIX domain sockets: Search permission is denied on a component
@@ -2731,8 +2739,8 @@ pub const BindError = error{
/// addr is `*const T` where T is one of the sockaddr
pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!void {
- const rc = system.bind(sock, addr, len);
if (builtin.os.tag == .windows) {
+ const rc = windows.bind(sock, addr, len);
if (rc == windows.ws2_32.SOCKET_ERROR) {
switch (windows.ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable, // not initialized WSA
@@ -2750,6 +2758,7 @@ pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!voi
}
return;
} else {
+ const rc = system.bind(sock, addr, len);
switch (errno(rc)) {
0 => return,
EACCES => return error.AccessDenied,
@@ -2800,8 +2809,8 @@ const ListenError = error{
} || UnexpectedError;
pub fn listen(sock: socket_t, backlog: u31) ListenError!void {
- const rc = system.listen(sock, backlog);
if (builtin.os.tag == .windows) {
+ const rc = windows.listen(sock, backlog);
if (rc == windows.ws2_32.SOCKET_ERROR) {
switch (windows.ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable, // not initialized WSA
@@ -2818,6 +2827,7 @@ pub fn listen(sock: socket_t, backlog: u31) ListenError!void {
}
return;
} else {
+ const rc = system.listen(sock, backlog);
switch (errno(rc)) {
0 => return,
EADDRINUSE => return error.AddressInUse,
@@ -2905,6 +2915,8 @@ pub fn accept(
const accepted_sock = while (true) {
const rc = if (have_accept4)
system.accept4(sock, addr, addr_size, flags)
+ else if (builtin.os.tag == .windows)
+ windows.accept(sock, addr, addr_size)
else
system.accept(sock, addr, addr_size);
@@ -3077,8 +3089,8 @@ pub const GetSockNameError = error{
} || UnexpectedError;
pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSockNameError!void {
- const rc = system.getsockname(sock, addr, addrlen);
if (builtin.os.tag == .windows) {
+ const rc = windows.getsockname(sock, addr, addrlen);
if (rc == windows.ws2_32.SOCKET_ERROR) {
switch (windows.ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
@@ -3091,6 +3103,7 @@ pub fn getsockname(sock: socket_t, addr: *sockaddr, addrlen: *socklen_t) GetSock
}
return;
} else {
+ const rc = system.getsockname(sock, addr, addrlen);
switch (errno(rc)) {
0 => return,
else => |err| return unexpectedErrno(err),
@@ -5378,22 +5391,41 @@ pub const SetSockOptError = error{
/// Insufficient resources are available in the system to complete the call.
SystemResources,
+
+ NetworkSubsystemFailed,
+ FileDescriptorNotASocket,
+ SocketNotBound,
} || UnexpectedError;
/// Set a socket's options.
-pub fn setsockopt(fd: fd_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void {
- switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) {
- 0 => {},
- EBADF => unreachable, // always a race condition
- ENOTSOCK => unreachable, // always a race condition
- EINVAL => unreachable,
- EFAULT => unreachable,
- EDOM => return error.TimeoutTooBig,
- EISCONN => return error.AlreadyConnected,
- ENOPROTOOPT => return error.InvalidProtocolOption,
- ENOMEM => return error.SystemResources,
- ENOBUFS => return error.SystemResources,
- else => |err| return unexpectedErrno(err),
+pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void {
+ if (builtin.os.tag == .windows) {
+ const rc = windows.ws2_32.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len));
+ if (rc == windows.ws2_32.SOCKET_ERROR) {
+ switch (windows.ws2_32.WSAGetLastError()) {
+ .WSANOTINITIALISED => unreachable,
+ .WSAENETDOWN => return error.NetworkSubsystemFailed,
+ .WSAEFAULT => unreachable,
+ .WSAENOTSOCK => return error.FileDescriptorNotASocket,
+ .WSAEINVAL => return error.SocketNotBound,
+ else => |err| return windows.unexpectedWSAError(err),
+ }
+ }
+ return;
+ } else {
+ switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) {
+ 0 => {},
+ EBADF => unreachable, // always a race condition
+ ENOTSOCK => unreachable, // always a race condition
+ EINVAL => unreachable,
+ EFAULT => unreachable,
+ EDOM => return error.TimeoutTooBig,
+ EISCONN => return error.AlreadyConnected,
+ ENOPROTOOPT => return error.InvalidProtocolOption,
+ ENOMEM => return error.SystemResources,
+ ENOBUFS => return error.SystemResources,
+ else => |err| return unexpectedErrno(err),
+ }
}
}
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 19cc3be775..8dbf21fcf2 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -1200,6 +1200,8 @@ pub const IORING_FEAT_NODROP = 1 << 1;
pub const IORING_FEAT_SUBMIT_STABLE = 1 << 2;
pub const IORING_FEAT_RW_CUR_POS = 1 << 3;
pub const IORING_FEAT_CUR_PERSONALITY = 1 << 4;
+pub const IORING_FEAT_FAST_POLL = 1 << 5;
+pub const IORING_FEAT_POLL_32BITS = 1 << 6;
// io_uring_params.flags
@@ -1252,6 +1254,9 @@ pub const io_sqring_offsets = extern struct {
/// needs io_uring_enter wakeup
pub const IORING_SQ_NEED_WAKEUP = 1 << 0;
+/// kernel has cqes waiting beyond the cq ring
+pub const IORING_SQ_CQ_OVERFLOW = 1 << 1;
+
pub const io_cqring_offsets = extern struct {
head: u32,
tail: u32,
@@ -1263,48 +1268,19 @@ pub const io_cqring_offsets = extern struct {
};
pub const io_uring_sqe = extern struct {
- pub const union1 = extern union {
- off: u64,
- addr2: u64,
- };
-
- pub const union2 = extern union {
- rw_flags: kernel_rwf,
- fsync_flags: u32,
- poll_events: u16,
- sync_range_flags: u32,
- msg_flags: u32,
- timeout_flags: u32,
- accept_flags: u32,
- cancel_flags: u32,
- open_flags: u32,
- statx_flags: u32,
- fadvise_flags: u32,
- };
-
- pub const union3 = extern union {
- struct1: extern struct {
- /// index into fixed buffers, if used
- buf_index: u16,
-
- /// personality to use, if used
- personality: u16,
- },
- __pad2: [3]u64,
- };
opcode: IORING_OP,
flags: u8,
ioprio: u16,
fd: i32,
-
- union1: union1,
+ off: u64,
addr: u64,
len: u32,
-
- union2: union2,
+ rw_flags: u32,
user_data: u64,
-
- union3: union3,
+ buf_index: u16,
+ personality: u16,
+ splice_fd_in: i32,
+ __pad2: [2]u64
};
pub const IOSQE_BIT = extern enum(u8) {
@@ -1313,7 +1289,8 @@ pub const IOSQE_BIT = extern enum(u8) {
IO_LINK,
IO_HARDLINK,
ASYNC,
-
+ BUFFER_SELECT,
+
_,
};
@@ -1332,7 +1309,10 @@ pub const IOSQE_IO_LINK = 1 << @enumToInt(IOSQE_BIT.IO_LINK);
pub const IOSQE_IO_HARDLINK = 1 << @enumToInt(IOSQE_BIT.IO_HARDLINK);
/// always go async
-pub const IOSQE_ASYNC = 1 << IOSQE_BIT.ASYNC;
+pub const IOSQE_ASYNC = 1 << @enumToInt(IOSQE_BIT.ASYNC);
+
+/// select buffer from buf_group
+pub const IOSQE_BUFFER_SELECT = 1 << @enumToInt(IOSQE_BIT.BUFFER_SELECT);
pub const IORING_OP = extern enum(u8) {
NOP,
@@ -1365,6 +1345,10 @@ pub const IORING_OP = extern enum(u8) {
RECV,
OPENAT2,
EPOLL_CTL,
+ SPLICE,
+ PROVIDE_BUFFERS,
+ REMOVE_BUFFERS,
+ TEE,
_,
};
diff --git a/lib/std/os/bits/linux/mips.zig b/lib/std/os/bits/linux/mips.zig
index 4b81c6e622..f3e590fcea 100644
--- a/lib/std/os/bits/linux/mips.zig
+++ b/lib/std/os/bits/linux/mips.zig
@@ -383,6 +383,9 @@ pub const SYS = extern enum(usize) {
statx = Linux + 366,
rseq = Linux + 367,
io_pgetevents = Linux + 368,
+ io_uring_setup = Linux + 425,
+ io_uring_enter = Linux + 426,
+ io_uring_register = Linux + 427,
openat2 = Linux + 437,
pidfd_getfd = Linux + 438,
diff --git a/lib/std/os/bits/linux/powerpc64.zig b/lib/std/os/bits/linux/powerpc64.zig
index adc6c87c1a..769b7e614d 100644
--- a/lib/std/os/bits/linux/powerpc64.zig
+++ b/lib/std/os/bits/linux/powerpc64.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
const std = @import("../../../std.zig");
const linux = std.os.linux;
const socklen_t = linux.socklen_t;
diff --git a/lib/std/os/bits/openbsd.zig b/lib/std/os/bits/openbsd.zig
index 1419e7f27a..5a9dfab119 100644
--- a/lib/std/os/bits/openbsd.zig
+++ b/lib/std/os/bits/openbsd.zig
@@ -33,10 +33,10 @@ pub const Kevent = extern struct {
};
pub const dl_phdr_info = extern struct {
- dlpi_addr: usize,
+ dlpi_addr: std.elf.Addr,
dlpi_name: ?[*:0]const u8,
dlpi_phdr: [*]std.elf.Phdr,
- dlpi_phnum: u16,
+ dlpi_phnum: std.elf.Half,
};
pub const Flock = extern struct {
diff --git a/lib/std/os/bits/windows.zig b/lib/std/os/bits/windows.zig
index a92961881e..dda57208f8 100644
--- a/lib/std/os/bits/windows.zig
+++ b/lib/std/os/bits/windows.zig
@@ -256,6 +256,41 @@ pub const POLLERR = ws2_32.POLLERR;
pub const POLLHUP = ws2_32.POLLHUP;
pub const POLLNVAL = ws2_32.POLLNVAL;
+pub const SOL_SOCKET = ws2_32.SOL_SOCKET;
+
+pub const SO_DEBUG = ws2_32.SO_DEBUG;
+pub const SO_ACCEPTCONN = ws2_32.SO_ACCEPTCONN;
+pub const SO_REUSEADDR = ws2_32.SO_REUSEADDR;
+pub const SO_KEEPALIVE = ws2_32.SO_KEEPALIVE;
+pub const SO_DONTROUTE = ws2_32.SO_DONTROUTE;
+pub const SO_BROADCAST = ws2_32.SO_BROADCAST;
+pub const SO_USELOOPBACK = ws2_32.SO_USELOOPBACK;
+pub const SO_LINGER = ws2_32.SO_LINGER;
+pub const SO_OOBINLINE = ws2_32.SO_OOBINLINE;
+
+pub const SO_DONTLINGER = ws2_32.SO_DONTLINGER;
+pub const SO_EXCLUSIVEADDRUSE = ws2_32.SO_EXCLUSIVEADDRUSE;
+
+pub const SO_SNDBUF = ws2_32.SO_SNDBUF;
+pub const SO_RCVBUF = ws2_32.SO_RCVBUF;
+pub const SO_SNDLOWAT = ws2_32.SO_SNDLOWAT;
+pub const SO_RCVLOWAT = ws2_32.SO_RCVLOWAT;
+pub const SO_SNDTIMEO = ws2_32.SO_SNDTIMEO;
+pub const SO_RCVTIMEO = ws2_32.SO_RCVTIMEO;
+pub const SO_ERROR = ws2_32.SO_ERROR;
+pub const SO_TYPE = ws2_32.SO_TYPE;
+
+pub const SO_GROUP_ID = ws2_32.SO_GROUP_ID;
+pub const SO_GROUP_PRIORITY = ws2_32.SO_GROUP_PRIORITY;
+pub const SO_MAX_MSG_SIZE = ws2_32.SO_MAX_MSG_SIZE;
+pub const SO_PROTOCOL_INFOA = ws2_32.SO_PROTOCOL_INFOA;
+pub const SO_PROTOCOL_INFOW = ws2_32.SO_PROTOCOL_INFOW;
+
+pub const PVD_CONFIG = ws2_32.PVD_CONFIG;
+pub const SO_CONDITIONAL_ACCEPT = ws2_32.SO_CONDITIONAL_ACCEPT;
+
+pub const TCP_NODELAY = ws2_32.TCP_NODELAY;
+
pub const O_RDONLY = 0o0;
pub const O_WRONLY = 0o1;
pub const O_RDWR = 0o2;
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index e38d9bc10d..a126231db1 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -31,6 +31,7 @@ pub usingnamespace switch (builtin.arch) {
pub usingnamespace @import("bits.zig");
pub const tls = @import("linux/tls.zig");
pub const BPF = @import("linux/bpf.zig");
+pub usingnamespace @import("linux/io_uring.zig");
/// Set by startup code, used by `getauxval`.
pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
@@ -1003,14 +1004,14 @@ pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usiz
return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(&fd[0]));
}
-pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
+pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize {
if (builtin.arch == .i386) {
return socketcall(SC_accept, &[4]usize{ fd, addr, len, 0 });
}
return accept4(fd, addr, len, 0);
}
-pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
+pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize {
if (builtin.arch == .i386) {
return socketcall(SC_accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len), flags });
}
@@ -1279,7 +1280,7 @@ pub fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: ?*const rlimit,
@bitCast(usize, @as(isize, pid)),
@bitCast(usize, @as(isize, @enumToInt(resource))),
@ptrToInt(new_limit),
- @ptrToInt(old_limit)
+ @ptrToInt(old_limit),
);
}
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
new file mode 100644
index 0000000000..b2d42bab93
--- /dev/null
+++ b/lib/std/os/linux/io_uring.zig
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+const std = @import("../../std.zig");
+const assert = std.debug.assert;
+const builtin = std.builtin;
+const mem = std.mem;
+const net = std.net;
+const os = std.os;
+const linux = os.linux;
+const testing = std.testing;
+
+const io_uring_params = linux.io_uring_params;
+const io_uring_sqe = linux.io_uring_sqe;
+const io_uring_cqe = linux.io_uring_cqe;
+
+pub const IO_Uring = struct {
+ fd: os.fd_t = -1,
+ sq: SubmissionQueue,
+ cq: CompletionQueue,
+ flags: u32,
+ features: u32,
+
+ /// A friendly way to setup an io_uring, with default io_uring_params.
+ /// `entries` must be a power of two between 1 and 4096, although the kernel will make the final
+ /// call on how many entries the submission and completion queues will ultimately have,
+ /// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L8027-L8050.
+ /// Matches the interface of io_uring_queue_init() in liburing.
+ pub fn init(entries: u12, flags: u32) !IO_Uring {
+ var params = mem.zeroInit(io_uring_params, .{
+ .flags = flags,
+ .sq_thread_idle = 1000
+ });
+ return try IO_Uring.init_params(entries, &params);
+ }
+
+ /// A powerful way to setup an io_uring, if you want to tweak io_uring_params such as submission
+ /// queue thread cpu affinity or thread idle timeout (the kernel and our default is 1 second).
+ /// `params` is passed by reference because the kernel needs to modify the parameters.
+ /// You may only set the `flags`, `sq_thread_cpu` and `sq_thread_idle` parameters.
+ /// Every other parameter belongs to the kernel and must be zeroed.
+ /// Matches the interface of io_uring_queue_init_params() in liburing.
+ pub fn init_params(entries: u12, p: *io_uring_params) !IO_Uring {
+ if (entries == 0) return error.EntriesZero;
+ if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
+
+ assert(p.sq_entries == 0);
+ assert(p.cq_entries == 0);
+ assert(p.features == 0);
+ assert(p.wq_fd == 0);
+ assert(p.resv[0] == 0);
+ assert(p.resv[1] == 0);
+ assert(p.resv[2] == 0);
+
+ const res = linux.io_uring_setup(entries, p);
+ switch (linux.getErrno(res)) {
+ 0 => {},
+ linux.EFAULT => return error.ParamsOutsideAccessibleAddressSpace,
+ // The resv array contains non-zero data, p.flags contains an unsupported flag,
+ // entries out of bounds, IORING_SETUP_SQ_AFF was specified without IORING_SETUP_SQPOLL,
+ // or IORING_SETUP_CQSIZE was specified but io_uring_params.cq_entries was invalid:
+ linux.EINVAL => return error.ArgumentsInvalid,
+ linux.EMFILE => return error.ProcessFdQuotaExceeded,
+ linux.ENFILE => return error.SystemFdQuotaExceeded,
+ linux.ENOMEM => return error.SystemResources,
+ // IORING_SETUP_SQPOLL was specified but effective user ID lacks sufficient privileges,
+ // or a container seccomp policy prohibits io_uring syscalls:
+ linux.EPERM => return error.PermissionDenied,
+ linux.ENOSYS => return error.SystemOutdated,
+ else => |errno| return os.unexpectedErrno(errno)
+ }
+ const fd = @intCast(os.fd_t, res);
+ assert(fd >= 0);
+ errdefer os.close(fd);
+
+ // Kernel versions 5.4 and up use only one mmap() for the submission and completion queues.
+ // This is not an optional feature for us... if the kernel does it, we have to do it.
+ // The thinking on this by the kernel developers was that both the submission and the
+ // completion queue rings have sizes just over a power of two, but the submission queue ring
+ // is significantly smaller with u32 slots. By bundling both in a single mmap, the kernel
+ // gets the submission queue ring for free.
+ // See https://patchwork.kernel.org/patch/11115257 for the kernel patch.
+ // We do not support the double mmap() done before 5.4, because we want to keep the
+ // init/deinit mmap paths simple and because io_uring has had many bug fixes even since 5.4.
+ if ((p.features & linux.IORING_FEAT_SINGLE_MMAP) == 0) {
+ return error.SystemOutdated;
+ }
+
+ // Check that the kernel has actually set params and that "impossible is nothing".
+ assert(p.sq_entries != 0);
+ assert(p.cq_entries != 0);
+ assert(p.cq_entries >= p.sq_entries);
+
+ // From here on, we only need to read from params, so pass `p` by value as immutable.
+ // The completion queue shares the mmap with the submission queue, so pass `sq` there too.
+ var sq = try SubmissionQueue.init(fd, p.*);
+ errdefer sq.deinit();
+ var cq = try CompletionQueue.init(fd, p.*, sq);
+ errdefer cq.deinit();
+
+ // Check that our starting state is as we expect.
+ assert(sq.head.* == 0);
+ assert(sq.tail.* == 0);
+ assert(sq.mask == p.sq_entries - 1);
+ // Allow flags.* to be non-zero, since the kernel may set IORING_SQ_NEED_WAKEUP at any time.
+ assert(sq.dropped.* == 0);
+ assert(sq.array.len == p.sq_entries);
+ assert(sq.sqes.len == p.sq_entries);
+ assert(sq.sqe_head == 0);
+ assert(sq.sqe_tail == 0);
+
+ assert(cq.head.* == 0);
+ assert(cq.tail.* == 0);
+ assert(cq.mask == p.cq_entries - 1);
+ assert(cq.overflow.* == 0);
+ assert(cq.cqes.len == p.cq_entries);
+
+ return IO_Uring {
+ .fd = fd,
+ .sq = sq,
+ .cq = cq,
+ .flags = p.flags,
+ .features = p.features
+ };
+ }
+
+ pub fn deinit(self: *IO_Uring) void {
+ assert(self.fd >= 0);
+ // The mmaps depend on the fd, so the order of these calls is important:
+ self.cq.deinit();
+ self.sq.deinit();
+ os.close(self.fd);
+ self.fd = -1;
+ }
+
+ /// Returns a pointer to a vacant SQE, or an error if the submission queue is full.
+ /// We follow the implementation (and atomics) of liburing's `io_uring_get_sqe()` exactly.
+ /// However, instead of a null we return an error to force safe handling.
+ /// Any situation where the submission queue is full tends more towards a control flow error,
+ /// and the null return in liburing is more a C idiom than anything else, for lack of a better
+ /// alternative. In Zig, we have first-class error handling... so let's use it.
+ /// Matches the implementation of io_uring_get_sqe() in liburing.
+ pub fn get_sqe(self: *IO_Uring) !*io_uring_sqe {
+ const head = @atomicLoad(u32, self.sq.head, .Acquire);
+ // Remember that these head and tail offsets wrap around every four billion operations.
+ // We must therefore use wrapping addition and subtraction to avoid a runtime crash.
+ const next = self.sq.sqe_tail +% 1;
+ if (next -% head > self.sq.sqes.len) return error.SubmissionQueueFull;
+ var sqe = &self.sq.sqes[self.sq.sqe_tail & self.sq.mask];
+ self.sq.sqe_tail = next;
+ return sqe;
+ }
+
+ /// Submits the SQEs acquired via get_sqe() to the kernel. You can call this once after you have
+ /// called get_sqe() multiple times to setup multiple I/O requests.
+ /// Returns the number of SQEs submitted.
+ /// Matches the implementation of io_uring_submit() in liburing.
+ pub fn submit(self: *IO_Uring) !u32 {
+ return self.submit_and_wait(0);
+ }
+
+ /// Like submit(), but allows waiting for events as well.
+ /// Returns the number of SQEs submitted.
+ /// Matches the implementation of io_uring_submit_and_wait() in liburing.
+ pub fn submit_and_wait(self: *IO_Uring, wait_nr: u32) !u32 {
+ var submitted = self.flush_sq();
+ var flags: u32 = 0;
+ if (self.sq_ring_needs_enter(submitted, &flags) or wait_nr > 0) {
+ if (wait_nr > 0 or (self.flags & linux.IORING_SETUP_IOPOLL) != 0) {
+ flags |= linux.IORING_ENTER_GETEVENTS;
+ }
+ return try self.enter(submitted, wait_nr, flags);
+ }
+ return submitted;
+ }
+
+ /// Tell the kernel we have submitted SQEs and/or want to wait for CQEs.
+ /// Returns the number of SQEs submitted.
+ pub fn enter(self: *IO_Uring, to_submit: u32, min_complete: u32, flags: u32) !u32 {
+ assert(self.fd >= 0);
+ const res = linux.io_uring_enter(self.fd, to_submit, min_complete, flags, null);
+ switch (linux.getErrno(res)) {
+ 0 => {},
+ // The kernel was unable to allocate memory or ran out of resources for the request.
+ // The application should wait for some completions and try again:
+ linux.EAGAIN => return error.SystemResources,
+ // The SQE `fd` is invalid, or IOSQE_FIXED_FILE was set but no files were registered:
+ linux.EBADF => return error.FileDescriptorInvalid,
+ // The file descriptor is valid, but the ring is not in the right state.
+ // See io_uring_register(2) for how to enable the ring.
+ linux.EBADFD => return error.FileDescriptorInBadState,
+ // The application attempted to overcommit the number of requests it can have pending.
+ // The application should wait for some completions and try again:
+ linux.EBUSY => return error.CompletionQueueOvercommitted,
+ // The SQE is invalid, or valid but the ring was setup with IORING_SETUP_IOPOLL:
+ linux.EINVAL => return error.SubmissionQueueEntryInvalid,
+ // The buffer is outside the process' accessible address space, or IORING_OP_READ_FIXED
+ // or IORING_OP_WRITE_FIXED was specified but no buffers were registered, or the range
+ // described by `addr` and `len` is not within the buffer registered at `buf_index`:
+ linux.EFAULT => return error.BufferInvalid,
+ linux.ENXIO => return error.RingShuttingDown,
+ // The kernel believes our `self.fd` does not refer to an io_uring instance,
+ // or the opcode is valid but not supported by this kernel (more likely):
+ linux.EOPNOTSUPP => return error.OpcodeNotSupported,
+ // The operation was interrupted by a delivery of a signal before it could complete.
+ // This can happen while waiting for events with IORING_ENTER_GETEVENTS:
+ linux.EINTR => return error.SignalInterrupt,
+ else => |errno| return os.unexpectedErrno(errno)
+ }
+ return @intCast(u32, res);
+ }
+
+ /// Sync internal state with kernel ring state on the SQ side.
+ /// Returns the number of all pending events in the SQ ring, for the shared ring.
+ /// This return value includes previously flushed SQEs, as per liburing.
+ /// The rationale is to suggest that an io_uring_enter() call is needed rather than not.
+ /// Matches the implementation of __io_uring_flush_sq() in liburing.
+ pub fn flush_sq(self: *IO_Uring) u32 {
+ if (self.sq.sqe_head != self.sq.sqe_tail) {
+ // Fill in SQEs that we have queued up, adding them to the kernel ring.
+ const to_submit = self.sq.sqe_tail -% self.sq.sqe_head;
+ var tail = self.sq.tail.*;
+ var i: usize = 0;
+ while (i < to_submit) : (i += 1) {
+ self.sq.array[tail & self.sq.mask] = self.sq.sqe_head & self.sq.mask;
+ tail +%= 1;
+ self.sq.sqe_head +%= 1;
+ }
+ // Ensure that the kernel can actually see the SQE updates when it sees the tail update.
+ @atomicStore(u32, self.sq.tail, tail, .Release);
+ }
+ return self.sq_ready();
+ }
+
+ /// Returns true if we are not using an SQ thread (thus nobody submits but us),
+ /// or if IORING_SQ_NEED_WAKEUP is set and the SQ thread must be explicitly awakened.
+ /// For the latter case, we set the SQ thread wakeup flag.
+ /// Matches the implementation of sq_ring_needs_enter() in liburing.
+ pub fn sq_ring_needs_enter(self: *IO_Uring, submitted: u32, flags: *u32) bool {
+ assert(flags.* == 0);
+ if ((self.flags & linux.IORING_SETUP_SQPOLL) == 0 and submitted > 0) return true;
+ if ((@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_NEED_WAKEUP) != 0) {
+ flags.* |= linux.IORING_ENTER_SQ_WAKEUP;
+ return true;
+ }
+ return false;
+ }
+
+ /// Returns the number of flushed and unflushed SQEs pending in the submission queue.
+ /// In other words, this is the number of SQEs in the submission queue, i.e. its length.
+ /// These are SQEs that the kernel is yet to consume.
+ /// Matches the implementation of io_uring_sq_ready in liburing.
+ pub fn sq_ready(self: *IO_Uring) u32 {
+ // Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,
+ // see https://github.com/axboe/liburing/issues/92.
+ return self.sq.sqe_tail -% @atomicLoad(u32, self.sq.head, .Acquire);
+ }
+
+ /// Returns the number of CQEs in the completion queue, i.e. its length.
+ /// These are CQEs that the application is yet to consume.
+ /// Matches the implementation of io_uring_cq_ready in liburing.
+ pub fn cq_ready(self: *IO_Uring) u32 {
+ return @atomicLoad(u32, self.cq.tail, .Acquire) -% self.cq.head.*;
+ }
+
+ /// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.
+ /// If none are available, enters into the kernel to wait for at most `wait_nr` CQEs.
+ /// Returns the number of CQEs copied, advancing the CQ ring.
+ /// Provides all the wait/peek methods found in liburing, but with batching and a single method.
+ /// The rationale for copying CQEs rather than copying pointers is that pointers are 8 bytes
+ /// whereas CQEs are not much more at only 16 bytes, and this provides a safer faster interface.
+ /// Safer, because you no longer need to call cqe_seen(), avoiding idempotency bugs.
+ /// Faster, because we can now amortize the atomic store release to `cq.head` across the batch.
+ /// See https://github.com/axboe/liburing/issues/103#issuecomment-686665007.
+ /// Matches the implementation of io_uring_peek_batch_cqe() in liburing, but supports waiting.
+ pub fn copy_cqes(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) !u32 {
+ const count = self.copy_cqes_ready(cqes, wait_nr);
+ if (count > 0) return count;
+ if (self.cq_ring_needs_flush() or wait_nr > 0) {
+ _ = try self.enter(0, wait_nr, linux.IORING_ENTER_GETEVENTS);
+ return self.copy_cqes_ready(cqes, wait_nr);
+ }
+ return 0;
+ }
+
+ fn copy_cqes_ready(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) u32 {
+ const ready = self.cq_ready();
+ const count = std.math.min(cqes.len, ready);
+ var head = self.cq.head.*;
+ var tail = head +% count;
+ // TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop.
+ var i: usize = 0;
+ // Do not use "less-than" operator since head and tail may wrap:
+ while (head != tail) {
+ cqes[i] = self.cq.cqes[head & self.cq.mask]; // Copy struct by value.
+ head +%= 1;
+ i += 1;
+ }
+ self.cq_advance(count);
+ return count;
+ }
+
+ /// Returns a copy of an I/O completion, waiting for it if necessary, and advancing the CQ ring.
+ /// A convenience method for `copy_cqes()` for when you don't need to batch or peek.
+ pub fn copy_cqe(ring: *IO_Uring) !io_uring_cqe {
+ var cqes: [1]io_uring_cqe = undefined;
+ const count = try ring.copy_cqes(&cqes, 1);
+ assert(count == 1);
+ return cqes[0];
+ }
+
+ /// Matches the implementation of cq_ring_needs_flush() in liburing.
+ pub fn cq_ring_needs_flush(self: *IO_Uring) bool {
+ return (@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_CQ_OVERFLOW) != 0;
+ }
+
+ /// For advanced use cases only that implement custom completion queue methods.
+ /// If you use copy_cqes() or copy_cqe() you must not call cqe_seen() or cq_advance().
+ /// Must be called exactly once after a zero-copy CQE has been processed by your application.
+ /// Not idempotent, calling more than once will result in other CQEs being lost.
+ /// Matches the implementation of cqe_seen() in liburing.
+ pub fn cqe_seen(self: *IO_Uring, cqe: *io_uring_cqe) void {
+ self.cq_advance(1);
+ }
+
+ /// For advanced use cases only that implement custom completion queue methods.
+ /// Matches the implementation of cq_advance() in liburing.
+ pub fn cq_advance(self: *IO_Uring, count: u32) void {
+ if (count > 0) {
+ // Ensure the kernel only sees the new head value after the CQEs have been read.
+ @atomicStore(u32, self.cq.head, self.cq.head.* +% count, .Release);
+ }
+ }
+
+ /// Queues (but does not submit) an SQE to perform an `fsync(2)`.
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ /// For example, for `fdatasync()` you can set `IORING_FSYNC_DATASYNC` in the SQE's `rw_flags`.
+ /// N.B. While SQEs are initiated in the order in which they appear in the submission queue,
+ /// operations execute in parallel and completions are unordered. Therefore, an application that
+ /// submits a write followed by an fsync in the submission queue cannot expect the fsync to
+ /// apply to the write, since the fsync may complete before the write is issued to the disk.
+ /// You should preferably use `link_with_next_sqe()` on a write's SQE to link it with an fsync,
+ /// or else insert a full write barrier using `drain_previous_sqes()` when queueing an fsync.
+ pub fn fsync(self: *IO_Uring, user_data: u64, fd: os.fd_t, flags: u32) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_fsync(sqe, fd, flags);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a no-op.
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ /// A no-op is more useful than may appear at first glance.
+ /// For example, you could call `drain_previous_sqes()` on the returned SQE, to use the no-op to
+ /// know when the ring is idle before acting on a kill signal.
+ pub fn nop(self: *IO_Uring, user_data: u64) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_nop(sqe);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `read(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn read(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: []u8,
+ offset: u64
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_read(sqe, fd, buffer, offset);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `write(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn write(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: []const u8,
+ offset: u64
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_write(sqe, fd, buffer, offset);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `preadv()`.
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ /// For example, if you want to do a `preadv2()` then set `rw_flags` on the returned SQE.
+ /// See https://linux.die.net/man/2/preadv.
+ pub fn readv(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ iovecs: []const os.iovec,
+ offset: u64
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_readv(sqe, fd, iovecs, offset);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `pwritev()`.
+ /// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
+ /// For example, if you want to do a `pwritev2()` then set `rw_flags` on the returned SQE.
+ /// See https://linux.die.net/man/2/pwritev.
+ pub fn writev(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ iovecs: []const os.iovec_const,
+ offset: u64
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_writev(sqe, fd, iovecs, offset);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.
+ /// Returns a pointer to the SQE.
+ pub fn accept(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ addr: *os.sockaddr,
+ addrlen: *os.socklen_t,
+ flags: u32
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queue (but does not submit) an SQE to perform a `connect(2)` on a socket.
+ /// Returns a pointer to the SQE.
+ pub fn connect(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ addr: *const os.sockaddr,
+ addrlen: os.socklen_t
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_connect(sqe, fd, addr, addrlen);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `recv(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn recv(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: []u8,
+ flags: u32
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_recv(sqe, fd, buffer, flags);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `send(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn send(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ buffer: []const u8,
+ flags: u32
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_send(sqe, fd, buffer, flags);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform an `openat(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn openat(
+ self: *IO_Uring,
+ user_data: u64,
+ fd: os.fd_t,
+ path: [*:0]const u8,
+ flags: u32,
+ mode: os.mode_t
+ ) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_openat(sqe, fd, path, flags, mode);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Queues (but does not submit) an SQE to perform a `close(2)`.
+ /// Returns a pointer to the SQE.
+ pub fn close(self: *IO_Uring, user_data: u64, fd: os.fd_t) !*io_uring_sqe {
+ const sqe = try self.get_sqe();
+ io_uring_prep_close(sqe, fd);
+ sqe.user_data = user_data;
+ return sqe;
+ }
+
+ /// Registers an array of file descriptors.
+ /// Every time a file descriptor is put in an SQE and submitted to the kernel, the kernel must
+ /// retrieve a reference to the file, and once I/O has completed the file reference must be
+ /// dropped. The atomic nature of this file reference can be a slowdown for high IOPS workloads.
+ /// This slowdown can be avoided by pre-registering file descriptors.
+ /// To refer to a registered file descriptor, IOSQE_FIXED_FILE must be set in the SQE's flags,
+ /// and the SQE's fd must be set to the index of the file descriptor in the registered array.
+ /// Registering file descriptors will wait for the ring to idle.
+ /// Files are automatically unregistered by the kernel when the ring is torn down.
+ /// An application need unregister only if it wants to register a new array of file descriptors.
+ pub fn register_files(self: *IO_Uring, fds: []const os.fd_t) !void {
+ assert(self.fd >= 0);
+ comptime assert(@sizeOf(os.fd_t) == @sizeOf(c_int));
+ const res = linux.io_uring_register(
+ self.fd,
+ .REGISTER_FILES,
+ @ptrCast(*const c_void, fds.ptr),
+ @intCast(u32, fds.len)
+ );
+ switch (linux.getErrno(res)) {
+ 0 => {},
+ // One or more fds in the array are invalid, or the kernel does not support sparse sets:
+ linux.EBADF => return error.FileDescriptorInvalid,
+ linux.EBUSY => return error.FilesAlreadyRegistered,
+ linux.EINVAL => return error.FilesEmpty,
+ // Adding `nr_args` file references would exceed the maximum allowed number of files the
+ // user is allowed to have according to the per-user RLIMIT_NOFILE resource limit and
+ // the CAP_SYS_RESOURCE capability is not set, or `nr_args` exceeds the maximum allowed
+ // for a fixed file set (older kernels have a limit of 1024 files vs 64K files):
+ linux.EMFILE => return error.UserFdQuotaExceeded,
+ // Insufficient kernel resources, or the caller had a non-zero RLIMIT_MEMLOCK soft
+ // resource limit but tried to lock more memory than the limit permitted (not enforced
+ // when the process is privileged with CAP_IPC_LOCK):
+ linux.ENOMEM => return error.SystemResources,
+ // Attempt to register files on a ring already registering files or being torn down:
+ linux.ENXIO => return error.RingShuttingDownOrAlreadyRegisteringFiles,
+ else => |errno| return os.unexpectedErrno(errno)
+ }
+ }
+
+ /// Unregisters all registered file descriptors previously associated with the ring.
+ pub fn unregister_files(self: *IO_Uring) !void {
+ assert(self.fd >= 0);
+ const res = linux.io_uring_register(self.fd, .UNREGISTER_FILES, null, 0);
+ switch (linux.getErrno(res)) {
+ 0 => {},
+ linux.ENXIO => return error.FilesNotRegistered,
+ else => |errno| return os.unexpectedErrno(errno)
+ }
+ }
+};
+
+pub const SubmissionQueue = struct {
+ head: *u32,
+ tail: *u32,
+ mask: u32,
+ flags: *u32,
+ dropped: *u32,
+ array: []u32,
+ sqes: []io_uring_sqe,
+ mmap: []align(mem.page_size) u8,
+ mmap_sqes: []align(mem.page_size) u8,
+
+ // We use `sqe_head` and `sqe_tail` in the same way as liburing:
+ // We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
+ // We then set `tail` to `sqe_tail` once, only when these events are actually submitted.
+ // This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.
+ sqe_head: u32 = 0,
+ sqe_tail: u32 = 0,
+
+ pub fn init(fd: os.fd_t, p: io_uring_params) !SubmissionQueue {
+ assert(fd >= 0);
+ assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
+ const size = std.math.max(
+ p.sq_off.array + p.sq_entries * @sizeOf(u32),
+ p.cq_off.cqes + p.cq_entries * @sizeOf(io_uring_cqe)
+ );
+ const mmap = try os.mmap(
+ null,
+ size,
+ os.PROT_READ | os.PROT_WRITE,
+ os.MAP_SHARED | os.MAP_POPULATE,
+ fd,
+ linux.IORING_OFF_SQ_RING,
+ );
+ errdefer os.munmap(mmap);
+ assert(mmap.len == size);
+
+ // The motivation for the `sqes` and `array` indirection is to make it possible for the
+ // application to preallocate static io_uring_sqe entries and then replay them when needed.
+ const size_sqes = p.sq_entries * @sizeOf(io_uring_sqe);
+ const mmap_sqes = try os.mmap(
+ null,
+ size_sqes,
+ os.PROT_READ | os.PROT_WRITE,
+ os.MAP_SHARED | os.MAP_POPULATE,
+ fd,
+ linux.IORING_OFF_SQES,
+ );
+ errdefer os.munmap(mmap_sqes);
+ assert(mmap_sqes.len == size_sqes);
+
+ const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array]));
+ const sqes = @ptrCast([*]io_uring_sqe, @alignCast(@alignOf(io_uring_sqe), &mmap_sqes[0]));
+ // We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
+ // see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
+ assert(
+ p.sq_entries ==
+ @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*
+ );
+ return SubmissionQueue {
+ .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])),
+ .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])),
+ .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*,
+ .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])),
+ .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])),
+ .array = array[0..p.sq_entries],
+ .sqes = sqes[0..p.sq_entries],
+ .mmap = mmap,
+ .mmap_sqes = mmap_sqes
+ };
+ }
+
+ pub fn deinit(self: *SubmissionQueue) void {
+ os.munmap(self.mmap_sqes);
+ os.munmap(self.mmap);
+ }
+};
+
+pub const CompletionQueue = struct {
+ head: *u32,
+ tail: *u32,
+ mask: u32,
+ overflow: *u32,
+ cqes: []io_uring_cqe,
+
+ pub fn init(fd: os.fd_t, p: io_uring_params, sq: SubmissionQueue) !CompletionQueue {
+ assert(fd >= 0);
+ assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
+ const mmap = sq.mmap;
+ const cqes = @ptrCast(
+ [*]io_uring_cqe,
+ @alignCast(@alignOf(io_uring_cqe), &mmap[p.cq_off.cqes])
+ );
+ assert(
+ p.cq_entries ==
+ @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*
+ );
+ return CompletionQueue {
+ .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])),
+ .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])),
+ .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*,
+ .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])),
+ .cqes = cqes[0..p.cq_entries]
+ };
+ }
+
+ pub fn deinit(self: *CompletionQueue) void {
+ // A no-op since we now share the mmap with the submission queue.
+ // Here for symmetry with the submission queue, and for any future feature support.
+ }
+};
+
+pub fn io_uring_prep_nop(sqe: *io_uring_sqe) void {
+ sqe.* = .{
+ .opcode = .NOP,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = 0,
+ .off = 0,
+ .addr = 0,
+ .len = 0,
+ .rw_flags = 0,
+ .user_data = 0,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ };
+}
+
+pub fn io_uring_prep_fsync(sqe: *io_uring_sqe, fd: os.fd_t, flags: u32) void {
+ sqe.* = .{
+ .opcode = .FSYNC,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = fd,
+ .off = 0,
+ .addr = 0,
+ .len = 0,
+ .rw_flags = flags,
+ .user_data = 0,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ };
+}
+
+pub fn io_uring_prep_rw(
+ op: linux.IORING_OP,
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ addr: anytype,
+ len: usize,
+ offset: u64
+) void {
+ sqe.* = .{
+ .opcode = op,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = fd,
+ .off = offset,
+ .addr = @ptrToInt(addr),
+ .len = @intCast(u32, len),
+ .rw_flags = 0,
+ .user_data = 0,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ };
+}
+
+pub fn io_uring_prep_read(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []u8, offset: u64) void {
+ io_uring_prep_rw(.READ, sqe, fd, buffer.ptr, buffer.len, offset);
+}
+
+pub fn io_uring_prep_write(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []const u8, offset: u64) void {
+ io_uring_prep_rw(.WRITE, sqe, fd, buffer.ptr, buffer.len, offset);
+}
+
+pub fn io_uring_prep_readv(
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ iovecs: []const os.iovec,
+ offset: u64
+) void {
+ io_uring_prep_rw(.READV, sqe, fd, iovecs.ptr, iovecs.len, offset);
+}
+
+pub fn io_uring_prep_writev(
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ iovecs: []const os.iovec_const,
+ offset: u64
+) void {
+ io_uring_prep_rw(.WRITEV, sqe, fd, iovecs.ptr, iovecs.len, offset);
+}
+
+pub fn io_uring_prep_accept(
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ addr: *os.sockaddr,
+ addrlen: *os.socklen_t,
+ flags: u32
+) void {
+ // `addr` holds a pointer to `sockaddr`, and `addr2` holds a pointer to socklen_t`.
+ // `addr2` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
+ io_uring_prep_rw(.ACCEPT, sqe, fd, addr, 0, @ptrToInt(addrlen));
+ sqe.rw_flags = flags;
+}
+
+pub fn io_uring_prep_connect(
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ addr: *const os.sockaddr,
+ addrlen: os.socklen_t
+) void {
+ // `addrlen` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
+ io_uring_prep_rw(.CONNECT, sqe, fd, addr, 0, addrlen);
+}
+
+pub fn io_uring_prep_recv(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
+ io_uring_prep_rw(.RECV, sqe, fd, buffer.ptr, buffer.len, 0);
+ sqe.rw_flags = flags;
+}
+
+pub fn io_uring_prep_send(sqe: *io_uring_sqe, fd: os.fd_t, buffer: []const u8, flags: u32) void {
+ io_uring_prep_rw(.SEND, sqe, fd, buffer.ptr, buffer.len, 0);
+ sqe.rw_flags = flags;
+}
+
+pub fn io_uring_prep_openat(
+ sqe: *io_uring_sqe,
+ fd: os.fd_t,
+ path: [*:0]const u8,
+ flags: u32,
+ mode: os.mode_t
+) void {
+ io_uring_prep_rw(.OPENAT, sqe, fd, path, mode, 0);
+ sqe.rw_flags = flags;
+}
+
+pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
+ sqe.* = .{
+ .opcode = .CLOSE,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = fd,
+ .off = 0,
+ .addr = 0,
+ .len = 0,
+ .rw_flags = 0,
+ .user_data = 0,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ };
+}
+
+test "structs/offsets/entries" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ testing.expectEqual(@as(usize, 120), @sizeOf(io_uring_params));
+ testing.expectEqual(@as(usize, 64), @sizeOf(io_uring_sqe));
+ testing.expectEqual(@as(usize, 16), @sizeOf(io_uring_cqe));
+
+ testing.expectEqual(0, linux.IORING_OFF_SQ_RING);
+ testing.expectEqual(0x8000000, linux.IORING_OFF_CQ_RING);
+ testing.expectEqual(0x10000000, linux.IORING_OFF_SQES);
+
+ testing.expectError(error.EntriesZero, IO_Uring.init(0, 0));
+ testing.expectError(error.EntriesNotPowerOfTwo, IO_Uring.init(3, 0));
+}
+
+test "nop" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer {
+ ring.deinit();
+ testing.expectEqual(@as(os.fd_t, -1), ring.fd);
+ }
+
+ const sqe = try ring.nop(0xaaaaaaaa);
+ testing.expectEqual(io_uring_sqe {
+ .opcode = .NOP,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = 0,
+ .off = 0,
+ .addr = 0,
+ .len = 0,
+ .rw_flags = 0,
+ .user_data = 0xaaaaaaaa,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ }, sqe.*);
+
+ testing.expectEqual(@as(u32, 0), ring.sq.sqe_head);
+ testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
+ testing.expectEqual(@as(u32, 0), ring.sq.tail.*);
+ testing.expectEqual(@as(u32, 0), ring.cq.head.*);
+ testing.expectEqual(@as(u32, 1), ring.sq_ready());
+ testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+ testing.expectEqual(@as(u32, 1), ring.sq.sqe_head);
+ testing.expectEqual(@as(u32, 1), ring.sq.sqe_tail);
+ testing.expectEqual(@as(u32, 1), ring.sq.tail.*);
+ testing.expectEqual(@as(u32, 0), ring.cq.head.*);
+ testing.expectEqual(@as(u32, 0), ring.sq_ready());
+
+ testing.expectEqual(io_uring_cqe {
+ .user_data = 0xaaaaaaaa,
+ .res = 0,
+ .flags = 0
+ }, try ring.copy_cqe());
+ testing.expectEqual(@as(u32, 1), ring.cq.head.*);
+ testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ const sqe_barrier = try ring.nop(0xbbbbbbbb);
+ sqe_barrier.flags |= linux.IOSQE_IO_DRAIN;
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+ testing.expectEqual(io_uring_cqe {
+ .user_data = 0xbbbbbbbb,
+ .res = 0,
+ .flags = 0
+ }, try ring.copy_cqe());
+ testing.expectEqual(@as(u32, 2), ring.sq.sqe_head);
+ testing.expectEqual(@as(u32, 2), ring.sq.sqe_tail);
+ testing.expectEqual(@as(u32, 2), ring.sq.tail.*);
+ testing.expectEqual(@as(u32, 2), ring.cq.head.*);
+}
+
+test "readv" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const fd = try os.openZ("/dev/zero", os.O_RDONLY | os.O_CLOEXEC, 0);
+ defer os.close(fd);
+
+ // Linux Kernel 5.4 supports IORING_REGISTER_FILES but not sparse fd sets (i.e. an fd of -1).
+ // Linux Kernel 5.5 adds support for sparse fd sets.
+ // Compare:
+ // https://github.com/torvalds/linux/blob/v5.4/fs/io_uring.c#L3119-L3124 vs
+ // https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L6687-L6691
+ // We therefore avoid stressing sparse fd sets here:
+ var registered_fds = [_]os.fd_t{0} ** 1;
+ const fd_index = 0;
+ registered_fds[fd_index] = fd;
+ try ring.register_files(registered_fds[0..]);
+
+ var buffer = [_]u8{42} ** 128;
+ var iovecs = [_]os.iovec{ os.iovec { .iov_base = &buffer, .iov_len = buffer.len } };
+ const sqe = try ring.readv(0xcccccccc, fd_index, iovecs[0..], 0);
+ testing.expectEqual(linux.IORING_OP.READV, sqe.opcode);
+ sqe.flags |= linux.IOSQE_FIXED_FILE;
+
+ testing.expectError(error.SubmissionQueueFull, ring.nop(0));
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xcccccccc,
+ .res = buffer.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ testing.expectEqualSlices(u8, &([_]u8{0} ** buffer.len), buffer[0..]);
+
+ try ring.unregister_files();
+}
+
+test "writev/fsync/readv" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(4, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_writev_fsync_readv";
+ const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ defer file.close();
+ defer std.fs.cwd().deleteFile(path) catch {};
+ const fd = file.handle;
+
+ const buffer_write = [_]u8{42} ** 128;
+ const iovecs_write = [_]os.iovec_const {
+ os.iovec_const { .iov_base = &buffer_write, .iov_len = buffer_write.len }
+ };
+ var buffer_read = [_]u8{0} ** 128;
+ var iovecs_read = [_]os.iovec {
+ os.iovec { .iov_base = &buffer_read, .iov_len = buffer_read.len }
+ };
+
+ const sqe_writev = try ring.writev(0xdddddddd, fd, iovecs_write[0..], 17);
+ testing.expectEqual(linux.IORING_OP.WRITEV, sqe_writev.opcode);
+ testing.expectEqual(@as(u64, 17), sqe_writev.off);
+ sqe_writev.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_fsync = try ring.fsync(0xeeeeeeee, fd, 0);
+ testing.expectEqual(linux.IORING_OP.FSYNC, sqe_fsync.opcode);
+ testing.expectEqual(fd, sqe_fsync.fd);
+ sqe_fsync.flags |= linux.IOSQE_IO_LINK;
+
+ const sqe_readv = try ring.readv(0xffffffff, fd, iovecs_read[0..], 17);
+ testing.expectEqual(linux.IORING_OP.READV, sqe_readv.opcode);
+ testing.expectEqual(@as(u64, 17), sqe_readv.off);
+
+ testing.expectEqual(@as(u32, 3), ring.sq_ready());
+ testing.expectEqual(@as(u32, 3), try ring.submit_and_wait(3));
+ testing.expectEqual(@as(u32, 0), ring.sq_ready());
+ testing.expectEqual(@as(u32, 3), ring.cq_ready());
+
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xdddddddd,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ testing.expectEqual(@as(u32, 2), ring.cq_ready());
+
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xeeeeeeee,
+ .res = 0,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ testing.expectEqual(@as(u32, 1), ring.cq_ready());
+
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xffffffff,
+ .res = buffer_read.len,
+ .flags = 0,
+ }, try ring.copy_cqe());
+ testing.expectEqual(@as(u32, 0), ring.cq_ready());
+
+ testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
+}
+
+test "write/read" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_write_read";
+ const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
+ defer file.close();
+ defer std.fs.cwd().deleteFile(path) catch {};
+ const fd = file.handle;
+
+ const buffer_write = [_]u8{97} ** 20;
+ var buffer_read = [_]u8{98} ** 20;
+ const sqe_write = try ring.write(0x11111111, fd, buffer_write[0..], 10);
+ testing.expectEqual(linux.IORING_OP.WRITE, sqe_write.opcode);
+ testing.expectEqual(@as(u64, 10), sqe_write.off);
+ sqe_write.flags |= linux.IOSQE_IO_LINK;
+ const sqe_read = try ring.read(0x22222222, fd, buffer_read[0..], 10);
+ testing.expectEqual(linux.IORING_OP.READ, sqe_read.opcode);
+ testing.expectEqual(@as(u64, 10), sqe_read.off);
+ testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_write = try ring.copy_cqe();
+ const cqe_read = try ring.copy_cqe();
+ // Prior to Linux Kernel 5.6 this is the only way to test for read/write support:
+ // https://lwn.net/Articles/809820/
+ if (cqe_write.res == -linux.EINVAL) return error.SkipZigTest;
+ if (cqe_read.res == -linux.EINVAL) return error.SkipZigTest;
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0x11111111,
+ .res = buffer_write.len,
+ .flags = 0,
+ }, cqe_write);
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0x22222222,
+ .res = buffer_read.len,
+ .flags = 0,
+ }, cqe_read);
+ testing.expectEqualSlices(u8, buffer_write[0..], buffer_read[0..]);
+}
+
+test "openat" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_openat";
+ defer std.fs.cwd().deleteFile(path) catch {};
+
+ const flags: u32 = os.O_CLOEXEC | os.O_RDWR | os.O_CREAT;
+ const mode: os.mode_t = 0o666;
+ const sqe_openat = try ring.openat(0x33333333, linux.AT_FDCWD, path, flags, mode);
+ testing.expectEqual(io_uring_sqe {
+ .opcode = .OPENAT,
+ .flags = 0,
+ .ioprio = 0,
+ .fd = linux.AT_FDCWD,
+ .off = 0,
+ .addr = @ptrToInt(path),
+ .len = mode,
+ .rw_flags = flags,
+ .user_data = 0x33333333,
+ .buf_index = 0,
+ .personality = 0,
+ .splice_fd_in = 0,
+ .__pad2 = [2]u64{ 0, 0 }
+ }, sqe_openat.*);
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe_openat = try ring.copy_cqe();
+ testing.expectEqual(@as(u64, 0x33333333), cqe_openat.user_data);
+ if (cqe_openat.res == -linux.EINVAL) return error.SkipZigTest;
+ // AT_FDCWD is not fully supported before kernel 5.6:
+ // See https://lore.kernel.org/io-uring/20200207155039.12819-1-axboe@kernel.dk/T/
+ // We use IORING_FEAT_RW_CUR_POS to know if we are pre-5.6 since that feature was added in 5.6.
+ if (cqe_openat.res == -linux.EBADF and (ring.features & linux.IORING_FEAT_RW_CUR_POS) == 0) {
+ return error.SkipZigTest;
+ }
+ if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{ cqe_openat.res });
+ testing.expect(cqe_openat.res > 0);
+ testing.expectEqual(@as(u32, 0), cqe_openat.flags);
+
+ os.close(cqe_openat.res);
+}
+
+test "close" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const path = "test_io_uring_close";
+ const file = try std.fs.cwd().createFile(path, .{});
+ errdefer file.close();
+ defer std.fs.cwd().deleteFile(path) catch {};
+
+ const sqe_close = try ring.close(0x44444444, file.handle);
+ testing.expectEqual(linux.IORING_OP.CLOSE, sqe_close.opcode);
+ testing.expectEqual(file.handle, sqe_close.fd);
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const cqe_close = try ring.copy_cqe();
+ if (cqe_close.res == -linux.EINVAL) return error.SkipZigTest;
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0x44444444,
+ .res = 0,
+ .flags = 0,
+ }, cqe_close);
+}
+
+test "accept/connect/send/recv" {
+ if (builtin.os.tag != .linux) return error.SkipZigTest;
+
+ var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
+ error.SystemOutdated => return error.SkipZigTest,
+ error.PermissionDenied => return error.SkipZigTest,
+ else => return err
+ };
+ defer ring.deinit();
+
+ const address = try net.Address.parseIp4("127.0.0.1", 3131);
+ const kernel_backlog = 1;
+ const server = try os.socket(address.any.family, os.SOCK_STREAM | os.SOCK_CLOEXEC, 0);
+ defer os.close(server);
+ try os.setsockopt(server, os.SOL_SOCKET, os.SO_REUSEADDR, &mem.toBytes(@as(c_int, 1)));
+ try os.bind(server, &address.any, address.getOsSockLen());
+ try os.listen(server, kernel_backlog);
+
+ const buffer_send = [_]u8{ 1,0,1,0,1,0,1,0,1,0 };
+ var buffer_recv = [_]u8{ 0,1,0,1,0 };
+
+ var accept_addr: os.sockaddr = undefined;
+ var accept_addr_len: os.socklen_t = @sizeOf(@TypeOf(accept_addr));
+ const accept = try ring.accept(0xaaaaaaaa, server, &accept_addr, &accept_addr_len, 0);
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ const client = try os.socket(address.any.family, os.SOCK_STREAM | os.SOCK_CLOEXEC, 0);
+ defer os.close(client);
+ const connect = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
+ testing.expectEqual(@as(u32, 1), try ring.submit());
+
+ var cqe_accept = try ring.copy_cqe();
+ if (cqe_accept.res == -linux.EINVAL) return error.SkipZigTest;
+ var cqe_connect = try ring.copy_cqe();
+ if (cqe_connect.res == -linux.EINVAL) return error.SkipZigTest;
+
+ // The accept/connect CQEs may arrive in any order, the connect CQE will sometimes come first:
+ if (cqe_accept.user_data == 0xcccccccc and cqe_connect.user_data == 0xaaaaaaaa) {
+ const a = cqe_accept;
+ const b = cqe_connect;
+ cqe_accept = b;
+ cqe_connect = a;
+ }
+
+ testing.expectEqual(@as(u64, 0xaaaaaaaa), cqe_accept.user_data);
+ if (cqe_accept.res <= 0) std.debug.print("\ncqe_accept.res={}\n", .{ cqe_accept.res });
+ testing.expect(cqe_accept.res > 0);
+ testing.expectEqual(@as(u32, 0), cqe_accept.flags);
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xcccccccc,
+ .res = 0,
+ .flags = 0,
+ }, cqe_connect);
+
+ const send = try ring.send(0xeeeeeeee, client, buffer_send[0..], 0);
+ send.flags |= linux.IOSQE_IO_LINK;
+ const recv = try ring.recv(0xffffffff, cqe_accept.res, buffer_recv[0..], 0);
+ testing.expectEqual(@as(u32, 2), try ring.submit());
+
+ const cqe_send = try ring.copy_cqe();
+ if (cqe_send.res == -linux.EINVAL) return error.SkipZigTest;
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xeeeeeeee,
+ .res = buffer_send.len,
+ .flags = 0,
+ }, cqe_send);
+
+ const cqe_recv = try ring.copy_cqe();
+ if (cqe_recv.res == -linux.EINVAL) return error.SkipZigTest;
+ testing.expectEqual(linux.io_uring_cqe {
+ .user_data = 0xffffffff,
+ .res = buffer_recv.len,
+ .flags = 0,
+ }, cqe_recv);
+
+ testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
+}
diff --git a/lib/std/os/linux/powerpc64.zig b/lib/std/os/linux/powerpc64.zig
index 337a6aa30a..7252000f24 100644
--- a/lib/std/os/linux/powerpc64.zig
+++ b/lib/std/os/linux/powerpc64.zig
@@ -1,3 +1,9 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2020 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
usingnamespace @import("../bits.zig");
pub fn syscall0(number: SYS) usize {
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index 58c2b311b1..7df05df2cc 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -594,7 +594,7 @@ test "fsync" {
test "getrlimit and setrlimit" {
// TODO enable for other systems when implemented
- if(builtin.os.tag != .linux){
+ if (builtin.os.tag != .linux) {
return error.SkipZigTest;
}
diff --git a/lib/std/os/windows/ws2_32.zig b/lib/std/os/windows/ws2_32.zig
index cfd48e4ff5..f0bbf75854 100644
--- a/lib/std/os/windows/ws2_32.zig
+++ b/lib/std/os/windows/ws2_32.zig
@@ -718,6 +718,41 @@ const IOC_WS2 = 0x08000000;
pub const SIO_BASE_HANDLE = IOC_OUT | IOC_WS2 | 34;
+pub const SOL_SOCKET = 0xffff;
+
+pub const SO_DEBUG = 0x0001;
+pub const SO_ACCEPTCONN = 0x0002;
+pub const SO_REUSEADDR = 0x0004;
+pub const SO_KEEPALIVE = 0x0008;
+pub const SO_DONTROUTE = 0x0010;
+pub const SO_BROADCAST = 0x0020;
+pub const SO_USELOOPBACK = 0x0040;
+pub const SO_LINGER = 0x0080;
+pub const SO_OOBINLINE = 0x0100;
+
+pub const SO_DONTLINGER = ~@as(u32, SO_LINGER);
+pub const SO_EXCLUSIVEADDRUSE = ~@as(u32, SO_REUSEADDR);
+
+pub const SO_SNDBUF = 0x1001;
+pub const SO_RCVBUF = 0x1002;
+pub const SO_SNDLOWAT = 0x1003;
+pub const SO_RCVLOWAT = 0x1004;
+pub const SO_SNDTIMEO = 0x1005;
+pub const SO_RCVTIMEO = 0x1006;
+pub const SO_ERROR = 0x1007;
+pub const SO_TYPE = 0x1008;
+
+pub const SO_GROUP_ID = 0x2001;
+pub const SO_GROUP_PRIORITY = 0x2002;
+pub const SO_MAX_MSG_SIZE = 0x2003;
+pub const SO_PROTOCOL_INFOA = 0x2004;
+pub const SO_PROTOCOL_INFOW = 0x2005;
+
+pub const PVD_CONFIG = 0x3001;
+pub const SO_CONDITIONAL_ACCEPT = 0x3002;
+
+pub const TCP_NODELAY = 0x0001;
+
pub extern "ws2_32" fn WSAStartup(
wVersionRequired: WORD,
lpWSAData: *WSADATA,
@@ -835,3 +870,10 @@ pub extern "ws2_32" fn getsockname(
name: *sockaddr,
namelen: *c_int,
) callconv(.Stdcall) c_int;
+pub extern "ws2_32" fn setsockopt(
+ s: SOCKET,
+ level: u32,
+ optname: u32,
+ optval: ?*const c_void,
+ optlen: socklen_t,
+) callconv(.Stdcall) c_int;
diff --git a/lib/std/special/docs/main.js b/lib/std/special/docs/main.js
index 1e7a23cb9e..b95a93d5dd 100644
--- a/lib/std/special/docs/main.js
+++ b/lib/std/special/docs/main.js
@@ -540,7 +540,23 @@
return typeIndexName(value, wantHtml, wantLink);
case typeKinds.Fn:
var fnObj = zigAnalysis.fns[value];
- return typeIndexName(fnObj.type, wantHtml, wantLink);
+ var declPath = fnObj.decl && getCanonDeclPath(fnObj.decl);
+ var fnName = declPath ? declPath.declNames.join('.') : '(unknown)';
+
+ if (!wantHtml) {
+ return fnName;
+ }
+
+ var str = '<span class="tok-fn">';
+ if (wantLink && declPath != null) {
+ str += '<a href="' + navLink(declPath.pkgNames, declPath.declNames) + '">';
+ str += escapeHtml(fnName);
+ str += '</a>';
+ } else {
+ str += escapeHtml(fnName);
+ }
+ str += '</span>';
+ return str;
case typeKinds.Int:
return token(value, tokenKinds.Number, wantHtml);
case typeKinds.Optional:
@@ -566,6 +582,13 @@
name += "]";
name += typeIndexName(typeObj.elem, wantHtml, wantSubLink, null);
return name;
+ case typeKinds.Vector:
+ var name = "Vector(";
+ name += token(typeObj.len, tokenKinds.Number, wantHtml);
+ name += ", ";
+ name += typeIndexName(typeObj.elem, wantHtml, wantSubLink, null);
+ name += ")";
+ return name;
case typeKinds.Optional:
return "?" + typeIndexName(typeObj.child, wantHtml, wantSubLink, fnDecl, linkFnNameDecl);
case typeKinds.Pointer:
@@ -721,6 +744,16 @@
payloadHtml += token('var', tokenKinds.Keyword, wantHtml);
}
return payloadHtml;
+ case typeKinds.Frame:
+ var fnObj = zigAnalysis.fns[typeObj.fn];
+ return '@Frame(' + getValueText(fnObj.type, typeObj.fn, wantHtml, wantSubLink) + ')';
+ case typeKinds.AnyFrame:
+ var name = token('anyframe', tokenKinds.Keyword, wantHtml);
+ if (typeObj.result) {
+ name += "->";
+ name += typeIndexName(typeObj.result, wantHtml, wantSubLink, null);
+ }
+ return name;
default:
if (wantHtml) {
return escapeHtml(typeObj.name);
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 4b388adb67..5f2cb112bb 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -53,7 +53,12 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) void {
.Void,
=> return,
- .Type,
+ .Type => {
+ if (actual != expected) {
+ std.debug.panic("expected type {}, found type {}", .{ @typeName(expected), @typeName(actual) });
+ }
+ },
+
.Bool,
.Int,
.Float,