aboutsummaryrefslogtreecommitdiff
path: root/lib/std/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'lib/std/crypto')
-rw-r--r--lib/std/crypto/25519/ed25519.zig18
-rw-r--r--lib/std/crypto/25519/edwards25519.zig8
-rw-r--r--lib/std/crypto/25519/scalar.zig6
-rw-r--r--lib/std/crypto/25519/x25519.zig14
-rw-r--r--lib/std/crypto/Certificate.zig14
-rw-r--r--lib/std/crypto/aegis.zig50
-rw-r--r--lib/std/crypto/aes_gcm.zig4
-rw-r--r--lib/std/crypto/aes_ocb.zig20
-rw-r--r--lib/std/crypto/argon2.zig16
-rw-r--r--lib/std/crypto/ascon.zig14
-rw-r--r--lib/std/crypto/bcrypt.zig6
-rw-r--r--lib/std/crypto/benchmark.zig4
-rw-r--r--lib/std/crypto/blake2.zig30
-rw-r--r--lib/std/crypto/blake3.zig8
-rw-r--r--lib/std/crypto/chacha20.zig8
-rw-r--r--lib/std/crypto/ecdsa.zig20
-rw-r--r--lib/std/crypto/hkdf.zig2
-rw-r--r--lib/std/crypto/hmac.zig8
-rw-r--r--lib/std/crypto/isap.zig2
-rw-r--r--lib/std/crypto/keccak_p.zig16
-rw-r--r--lib/std/crypto/kyber_d00.zig32
-rw-r--r--lib/std/crypto/md5.zig11
-rw-r--r--lib/std/crypto/modes.zig6
-rw-r--r--lib/std/crypto/pbkdf2.zig4
-rw-r--r--lib/std/crypto/pcurves/common.zig4
-rw-r--r--lib/std/crypto/pcurves/p256.zig6
-rw-r--r--lib/std/crypto/pcurves/p256/scalar.zig10
-rw-r--r--lib/std/crypto/pcurves/p384.zig6
-rw-r--r--lib/std/crypto/pcurves/p384/scalar.zig8
-rw-r--r--lib/std/crypto/pcurves/secp256k1.zig6
-rw-r--r--lib/std/crypto/pcurves/secp256k1/scalar.zig10
-rw-r--r--lib/std/crypto/phc_encoding.zig2
-rw-r--r--lib/std/crypto/salsa20.zig12
-rw-r--r--lib/std/crypto/scrypt.zig6
-rw-r--r--lib/std/crypto/sha1.zig8
-rw-r--r--lib/std/crypto/sha2.zig18
-rw-r--r--lib/std/crypto/sha3.zig4
-rw-r--r--lib/std/crypto/siphash.zig9
-rw-r--r--lib/std/crypto/tls.zig4
-rw-r--r--lib/std/crypto/tls/Client.zig30
-rw-r--r--lib/std/crypto/utils.zig12
41 files changed, 237 insertions, 239 deletions
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 66dcf3705b..6b9c1028b6 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -79,8 +79,8 @@ pub const Ed25519 = struct {
const r_bytes = r.toBytes();
var t: [64]u8 = undefined;
- mem.copy(u8, t[0..32], &r_bytes);
- mem.copy(u8, t[32..], &public_key.bytes);
+ t[0..32].* = r_bytes;
+ t[32..].* = public_key.bytes;
var h = Sha512.init(.{});
h.update(&t);
@@ -200,8 +200,8 @@ pub const Ed25519 = struct {
/// Return the raw signature (r, s) in little-endian format.
pub fn toBytes(self: Signature) [encoded_length]u8 {
var bytes: [encoded_length]u8 = undefined;
- mem.copy(u8, bytes[0 .. encoded_length / 2], &self.r);
- mem.copy(u8, bytes[encoded_length / 2 ..], &self.s);
+ bytes[0 .. encoded_length / 2].* = self.r;
+ bytes[encoded_length / 2 ..].* = self.s;
return bytes;
}
@@ -260,8 +260,8 @@ pub const Ed25519 = struct {
const pk_p = Curve.basePoint.clampedMul(az[0..32].*) catch return error.IdentityElement;
const pk_bytes = pk_p.toBytes();
var sk_bytes: [SecretKey.encoded_length]u8 = undefined;
- mem.copy(u8, &sk_bytes, &ss);
- mem.copy(u8, sk_bytes[seed_length..], &pk_bytes);
+ sk_bytes[0..ss.len].* = ss;
+ sk_bytes[seed_length..].* = pk_bytes;
return KeyPair{
.public_key = PublicKey.fromBytes(pk_bytes) catch unreachable,
.secret_key = try SecretKey.fromBytes(sk_bytes),
@@ -373,7 +373,7 @@ pub const Ed25519 = struct {
var z_batch: [count]Curve.scalar.CompressedScalar = undefined;
for (&z_batch) |*z| {
crypto.random.bytes(z[0..16]);
- mem.set(u8, z[16..], 0);
+ @memset(z[16..], 0);
}
var zs_sum = Curve.scalar.zero;
@@ -444,8 +444,8 @@ pub const Ed25519 = struct {
};
var prefix: [64]u8 = undefined;
- mem.copy(u8, prefix[0..32], h[32..64]);
- mem.copy(u8, prefix[32..64], blind_h[32..64]);
+ prefix[0..32].* = h[32..64].*;
+ prefix[32..64].* = blind_h[32..64].*;
const blind_secret_key = BlindSecretKey{
.prefix = prefix,
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index df4f8467f9..a8ca8e2fb6 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -306,7 +306,7 @@ pub const Edwards25519 = struct {
var pcs: [count][9]Edwards25519 = undefined;
var bpc: [9]Edwards25519 = undefined;
- mem.copy(Edwards25519, bpc[0..], basePointPc[0..bpc.len]);
+ @memcpy(&bpc, basePointPc[0..bpc.len]);
for (ps, 0..) |p, i| {
if (p.is_base) {
@@ -439,7 +439,7 @@ pub const Edwards25519 = struct {
var u: [n * H.digest_length]u8 = undefined;
var i: usize = 0;
while (i < n * H.digest_length) : (i += H.digest_length) {
- mem.copy(u8, u[i..][0..H.digest_length], u_0[0..]);
+ u[i..][0..H.digest_length].* = u_0;
var j: usize = 0;
while (i > 0 and j < H.digest_length) : (j += 1) {
u[i + j] ^= u[i + j - H.digest_length];
@@ -455,8 +455,8 @@ pub const Edwards25519 = struct {
var px: [n]Edwards25519 = undefined;
i = 0;
while (i < n) : (i += 1) {
- mem.set(u8, u_0[0 .. H.digest_length - h_l], 0);
- mem.copy(u8, u_0[H.digest_length - h_l ..][0..h_l], u[i * h_l ..][0..h_l]);
+ @memset(u_0[0 .. H.digest_length - h_l], 0);
+ u_0[H.digest_length - h_l ..][0..h_l].* = u[i * h_l ..][0..h_l].*;
px[i] = fromHash(u_0);
}
return px;
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index ff2e6aff80..fd6d42aebe 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -83,8 +83,8 @@ pub fn add(a: CompressedScalar, b: CompressedScalar) CompressedScalar {
pub fn neg(s: CompressedScalar) CompressedScalar {
const fs: [64]u8 = field_order_s ++ [_]u8{0} ** 32;
var sx: [64]u8 = undefined;
- mem.copy(u8, sx[0..32], s[0..]);
- mem.set(u8, sx[32..], 0);
+ sx[0..32].* = s;
+ @memset(sx[32..], 0);
var carry: u32 = 0;
var i: usize = 0;
while (i < 64) : (i += 1) {
@@ -593,7 +593,7 @@ const ScalarDouble = struct {
limbs[i] = mem.readIntLittle(u64, bytes[i * 7 ..][0..8]) & 0xffffffffffffff;
}
limbs[i] = @as(u64, mem.readIntLittle(u32, bytes[i * 7 ..][0..4]));
- mem.set(u64, limbs[5..], 0);
+ @memset(limbs[5..], 0);
return ScalarDouble{ .limbs = limbs };
}
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index 22bcf00136..b746a51968 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -37,7 +37,7 @@ pub const X25519 = struct {
break :sk random_seed;
};
var kp: KeyPair = undefined;
- mem.copy(u8, &kp.secret_key, sk[0..]);
+ kp.secret_key = sk;
kp.public_key = try X25519.recoverPublicKey(sk);
return kp;
}
@@ -120,8 +120,8 @@ test "x25519 rfc7748 one iteration" {
var i: usize = 0;
while (i < 1) : (i += 1) {
const output = try X25519.scalarmult(k, u);
- mem.copy(u8, u[0..], k[0..]);
- mem.copy(u8, k[0..], output[0..]);
+ u = k;
+ k = output;
}
try std.testing.expectEqual(k, expected_output);
@@ -142,8 +142,8 @@ test "x25519 rfc7748 1,000 iterations" {
var i: usize = 0;
while (i < 1000) : (i += 1) {
const output = try X25519.scalarmult(&k, &u);
- mem.copy(u8, u[0..], k[0..]);
- mem.copy(u8, k[0..], output[0..]);
+ u = k;
+ k = output;
}
try std.testing.expectEqual(k, expected_output);
@@ -163,8 +163,8 @@ test "x25519 rfc7748 1,000,000 iterations" {
var i: usize = 0;
while (i < 1000000) : (i += 1) {
const output = try X25519.scalarmult(&k, &u);
- mem.copy(u8, u[0..], k[0..]);
- mem.copy(u8, k[0..], output[0..]);
+ u = k;
+ k = output;
}
try std.testing.expectEqual(k[0..], expected_output);
diff --git a/lib/std/crypto/Certificate.zig b/lib/std/crypto/Certificate.zig
index 0caffba363..ec4766322c 100644
--- a/lib/std/crypto/Certificate.zig
+++ b/lib/std/crypto/Certificate.zig
@@ -928,7 +928,7 @@ pub const rsa = struct {
pub const PSSSignature = struct {
pub fn fromBytes(comptime modulus_len: usize, msg: []const u8) [modulus_len]u8 {
var result = [1]u8{0} ** modulus_len;
- std.mem.copy(u8, &result, msg);
+ std.mem.copyForwards(u8, &result, msg);
return result;
}
@@ -1025,9 +1025,9 @@ pub const rsa = struct {
// initial zero octets.
var m_p = try allocator.alloc(u8, 8 + Hash.digest_length + sLen);
defer allocator.free(m_p);
- std.mem.copy(u8, m_p, &([_]u8{0} ** 8));
- std.mem.copy(u8, m_p[8..], &mHash);
- std.mem.copy(u8, m_p[(8 + Hash.digest_length)..], salt);
+ std.mem.copyForwards(u8, m_p, &([_]u8{0} ** 8));
+ std.mem.copyForwards(u8, m_p[8..], &mHash);
+ std.mem.copyForwards(u8, m_p[(8 + Hash.digest_length)..], salt);
// 13. Let H' = Hash(M'), an octet string of length hLen.
var h_p: [Hash.digest_length]u8 = undefined;
@@ -1047,7 +1047,7 @@ pub const rsa = struct {
var hash = try allocator.alloc(u8, seed.len + c.len);
defer allocator.free(hash);
- std.mem.copy(u8, hash, seed);
+ std.mem.copyForwards(u8, hash, seed);
var hashed: [Hash.digest_length]u8 = undefined;
while (idx < len) {
@@ -1056,10 +1056,10 @@ pub const rsa = struct {
c[2] = @intCast(u8, (counter >> 8) & 0xFF);
c[3] = @intCast(u8, counter & 0xFF);
- std.mem.copy(u8, hash[seed.len..], &c);
+ std.mem.copyForwards(u8, hash[seed.len..], &c);
Hash.hash(hash, &hashed, .{});
- std.mem.copy(u8, out[idx..], &hashed);
+ std.mem.copyForwards(u8, out[idx..], &hashed);
idx += hashed.len;
counter += 1;
diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig
index d7305b444a..2d37bacc3a 100644
--- a/lib/std/crypto/aegis.zig
+++ b/lib/std/crypto/aegis.zig
@@ -152,8 +152,8 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
state.absorb(ad[i..][0..32]);
}
if (ad.len % 32 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. ad.len % 32], ad[i .. i + ad.len % 32]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. ad.len % 32], ad[i..][0 .. ad.len % 32]);
state.absorb(&src);
}
i = 0;
@@ -161,10 +161,10 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
state.enc(c[i..][0..32], m[i..][0..32]);
}
if (m.len % 32 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. m.len % 32], m[i .. i + m.len % 32]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. m.len % 32], m[i..][0 .. m.len % 32]);
state.enc(&dst, &src);
- mem.copy(u8, c[i .. i + m.len % 32], dst[0 .. m.len % 32]);
+ @memcpy(c[i..][0 .. m.len % 32], dst[0 .. m.len % 32]);
}
tag.* = state.mac(tag_bits, ad.len, m.len);
}
@@ -185,8 +185,8 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
state.absorb(ad[i..][0..32]);
}
if (ad.len % 32 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. ad.len % 32], ad[i .. i + ad.len % 32]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. ad.len % 32], ad[i..][0 .. ad.len % 32]);
state.absorb(&src);
}
i = 0;
@@ -194,11 +194,11 @@ fn Aegis128LGeneric(comptime tag_bits: u9) type {
state.dec(m[i..][0..32], c[i..][0..32]);
}
if (m.len % 32 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. m.len % 32], c[i .. i + m.len % 32]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. m.len % 32], c[i..][0 .. m.len % 32]);
state.dec(&dst, &src);
- mem.copy(u8, m[i .. i + m.len % 32], dst[0 .. m.len % 32]);
- mem.set(u8, dst[0 .. m.len % 32], 0);
+ @memcpy(m[i..][0 .. m.len % 32], dst[0 .. m.len % 32]);
+ @memset(dst[0 .. m.len % 32], 0);
const blocks = &state.blocks;
blocks[0] = blocks[0].xorBlocks(AesBlock.fromBytes(dst[0..16]));
blocks[4] = blocks[4].xorBlocks(AesBlock.fromBytes(dst[16..32]));
@@ -334,8 +334,8 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
state.enc(&dst, ad[i..][0..16]);
}
if (ad.len % 16 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. ad.len % 16], ad[i .. i + ad.len % 16]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. ad.len % 16], ad[i..][0 .. ad.len % 16]);
state.enc(&dst, &src);
}
i = 0;
@@ -343,10 +343,10 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
state.enc(c[i..][0..16], m[i..][0..16]);
}
if (m.len % 16 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. m.len % 16], m[i .. i + m.len % 16]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. m.len % 16], m[i..][0 .. m.len % 16]);
state.enc(&dst, &src);
- mem.copy(u8, c[i .. i + m.len % 16], dst[0 .. m.len % 16]);
+ @memcpy(c[i..][0 .. m.len % 16], dst[0 .. m.len % 16]);
}
tag.* = state.mac(tag_bits, ad.len, m.len);
}
@@ -367,8 +367,8 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
state.enc(&dst, ad[i..][0..16]);
}
if (ad.len % 16 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. ad.len % 16], ad[i .. i + ad.len % 16]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. ad.len % 16], ad[i..][0 .. ad.len % 16]);
state.enc(&dst, &src);
}
i = 0;
@@ -376,11 +376,11 @@ fn Aegis256Generic(comptime tag_bits: u9) type {
state.dec(m[i..][0..16], c[i..][0..16]);
}
if (m.len % 16 != 0) {
- mem.set(u8, src[0..], 0);
- mem.copy(u8, src[0 .. m.len % 16], c[i .. i + m.len % 16]);
+ @memset(src[0..], 0);
+ @memcpy(src[0 .. m.len % 16], c[i..][0 .. m.len % 16]);
state.dec(&dst, &src);
- mem.copy(u8, m[i .. i + m.len % 16], dst[0 .. m.len % 16]);
- mem.set(u8, dst[0 .. m.len % 16], 0);
+ @memcpy(m[i..][0 .. m.len % 16], dst[0 .. m.len % 16]);
+ @memset(dst[0 .. m.len % 16], 0);
const blocks = &state.blocks;
blocks[0] = blocks[0].xorBlocks(AesBlock.fromBytes(&dst));
}
@@ -457,7 +457,7 @@ fn AegisMac(comptime T: type) type {
self.msg_len += b.len;
const len_partial = @min(b.len, block_length - self.off);
- mem.copy(u8, self.buf[self.off..][0..len_partial], b[0..len_partial]);
+ @memcpy(self.buf[self.off..][0..len_partial], b[0..len_partial]);
self.off += len_partial;
if (self.off < block_length) {
return;
@@ -470,7 +470,7 @@ fn AegisMac(comptime T: type) type {
self.state.absorb(b[i..][0..block_length]);
}
if (i != b.len) {
- mem.copy(u8, self.buf[0..], b[i..]);
+ @memcpy(self.buf[0..], b[i..]);
self.off = b.len - i;
}
}
@@ -479,7 +479,7 @@ fn AegisMac(comptime T: type) type {
pub fn final(self: *Self, out: *[mac_length]u8) void {
if (self.off > 0) {
var pad = [_]u8{0} ** block_length;
- mem.copy(u8, pad[0..], self.buf[0..self.off]);
+ @memcpy(pad[0..self.off], self.buf[0..self.off]);
self.state.absorb(&pad);
}
out.* = self.state.mac(T.tag_length * 8, self.msg_len, 0);
diff --git a/lib/std/crypto/aes_gcm.zig b/lib/std/crypto/aes_gcm.zig
index 4ec53371e6..660073d3ae 100644
--- a/lib/std/crypto/aes_gcm.zig
+++ b/lib/std/crypto/aes_gcm.zig
@@ -31,7 +31,7 @@ fn AesGcm(comptime Aes: anytype) type {
var t: [16]u8 = undefined;
var j: [16]u8 = undefined;
- mem.copy(u8, j[0..nonce_length], npub[0..]);
+ j[0..nonce_length].* = npub;
mem.writeIntBig(u32, j[nonce_length..][0..4], 1);
aes.encrypt(&t, &j);
@@ -64,7 +64,7 @@ fn AesGcm(comptime Aes: anytype) type {
var t: [16]u8 = undefined;
var j: [16]u8 = undefined;
- mem.copy(u8, j[0..nonce_length], npub[0..]);
+ j[0..nonce_length].* = npub;
mem.writeIntBig(u32, j[nonce_length..][0..4], 1);
aes.encrypt(&t, &j);
diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig
index 83e33e5fca..6d5ce3779a 100644
--- a/lib/std/crypto/aes_ocb.zig
+++ b/lib/std/crypto/aes_ocb.zig
@@ -75,7 +75,7 @@ fn AesOcb(comptime Aes: anytype) type {
if (leftover > 0) {
xorWith(&offset, lx.star);
var padded = [_]u8{0} ** 16;
- mem.copy(u8, padded[0..leftover], a[i * 16 ..][0..leftover]);
+ @memcpy(padded[0..leftover], a[i * 16 ..][0..leftover]);
padded[leftover] = 1;
var e = xorBlocks(offset, padded);
aes_enc_ctx.encrypt(&e, &e);
@@ -88,7 +88,7 @@ fn AesOcb(comptime Aes: anytype) type {
var nx = [_]u8{0} ** 16;
nx[0] = @intCast(u8, @truncate(u7, tag_length * 8) << 1);
nx[16 - nonce_length - 1] = 1;
- mem.copy(u8, nx[16 - nonce_length ..], &npub);
+ nx[nx.len - nonce_length ..].* = npub;
const bottom = @truncate(u6, nx[15]);
nx[15] &= 0xc0;
@@ -132,14 +132,14 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const p = m[(i + j) * 16 ..][0..16].*;
- mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(p, offsets[j]));
+ es[j * 16 ..][0..16].* = xorBlocks(p, offsets[j]);
xorWith(&sum, p);
}
aes_enc_ctx.encryptWide(wb, &es, &es);
j = 0;
while (j < wb) : (j += 1) {
const e = es[j * 16 ..][0..16].*;
- mem.copy(u8, c[(i + j) * 16 ..][0..16], &xorBlocks(e, offsets[j]));
+ c[(i + j) * 16 ..][0..16].* = xorBlocks(e, offsets[j]);
}
}
while (i < full_blocks) : (i += 1) {
@@ -147,7 +147,7 @@ fn AesOcb(comptime Aes: anytype) type {
const p = m[i * 16 ..][0..16].*;
var e = xorBlocks(p, offset);
aes_enc_ctx.encrypt(&e, &e);
- mem.copy(u8, c[i * 16 ..][0..16], &xorBlocks(e, offset));
+ c[i * 16 ..][0..16].* = xorBlocks(e, offset);
xorWith(&sum, p);
}
const leftover = m.len % 16;
@@ -159,7 +159,7 @@ fn AesOcb(comptime Aes: anytype) type {
c[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
- mem.copy(u8, e[0..leftover], m[i * 16 ..][0..leftover]);
+ @memcpy(e[0..leftover], m[i * 16 ..][0..leftover]);
e[leftover] = 0x80;
xorWith(&sum, e);
}
@@ -196,13 +196,13 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lt[@ctz(i + 1 + j)]);
offsets[j] = offset;
const q = c[(i + j) * 16 ..][0..16].*;
- mem.copy(u8, es[j * 16 ..][0..16], &xorBlocks(q, offsets[j]));
+ es[j * 16 ..][0..16].* = xorBlocks(q, offsets[j]);
}
aes_dec_ctx.decryptWide(wb, &es, &es);
j = 0;
while (j < wb) : (j += 1) {
const p = xorBlocks(es[j * 16 ..][0..16].*, offsets[j]);
- mem.copy(u8, m[(i + j) * 16 ..][0..16], &p);
+ m[(i + j) * 16 ..][0..16].* = p;
xorWith(&sum, p);
}
}
@@ -212,7 +212,7 @@ fn AesOcb(comptime Aes: anytype) type {
var e = xorBlocks(q, offset);
aes_dec_ctx.decrypt(&e, &e);
const p = xorBlocks(e, offset);
- mem.copy(u8, m[i * 16 ..][0..16], &p);
+ m[i * 16 ..][0..16].* = p;
xorWith(&sum, p);
}
const leftover = m.len % 16;
@@ -224,7 +224,7 @@ fn AesOcb(comptime Aes: anytype) type {
m[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
- mem.copy(u8, e[0..leftover], m[i * 16 ..][0..leftover]);
+ @memcpy(e[0..leftover], m[i * 16 ..][0..leftover]);
e[leftover] = 0x80;
xorWith(&sum, e);
}
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 0112e81c6a..43dbe3e332 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -149,7 +149,7 @@ fn blake2bLong(out: []u8, in: []const u8) void {
h.update(&outlen_bytes);
h.update(in);
h.final(&out_buf);
- mem.copy(u8, out, out_buf[0..out.len]);
+ @memcpy(out, out_buf[0..out.len]);
return;
}
@@ -158,19 +158,19 @@ fn blake2bLong(out: []u8, in: []const u8) void {
h.update(in);
h.final(&out_buf);
var out_slice = out;
- mem.copy(u8, out_slice, out_buf[0 .. H.digest_length / 2]);
+ out_slice[0 .. H.digest_length / 2].* = out_buf[0 .. H.digest_length / 2].*;
out_slice = out_slice[H.digest_length / 2 ..];
var in_buf: [H.digest_length]u8 = undefined;
while (out_slice.len > H.digest_length) {
- mem.copy(u8, &in_buf, &out_buf);
+ in_buf = out_buf;
H.hash(&in_buf, &out_buf, .{});
- mem.copy(u8, out_slice, out_buf[0 .. H.digest_length / 2]);
+ out_slice[0 .. H.digest_length / 2].* = out_buf[0 .. H.digest_length / 2].*;
out_slice = out_slice[H.digest_length / 2 ..];
}
- mem.copy(u8, &in_buf, &out_buf);
+ in_buf = out_buf;
H.hash(&in_buf, &out_buf, .{ .expected_out_bits = out_slice.len * 8 });
- mem.copy(u8, out_slice, out_buf[0..out_slice.len]);
+ @memcpy(out_slice, out_buf[0..out_slice.len]);
}
fn initBlocks(
@@ -494,7 +494,7 @@ pub fn kdf(
if (params.t < 1 or params.p < 1) return KdfError.WeakParameters;
var h0 = initHash(password, salt, params, derived_key.len, mode);
- const memory = math.max(
+ const memory = @max(
params.m / (sync_points * params.p) * (sync_points * params.p),
2 * sync_points * params.p,
);
@@ -877,7 +877,7 @@ test "kdf" {
.hash = "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016",
},
};
- inline for (test_vectors) |v| {
+ for (test_vectors) |v| {
var want: [24]u8 = undefined;
_ = try std.fmt.hexToBytes(&want, v.hash);
diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig
index f37d9acea5..ae4bb57d29 100644
--- a/lib/std/crypto/ascon.zig
+++ b/lib/std/crypto/ascon.zig
@@ -34,7 +34,7 @@ pub fn State(comptime endian: builtin.Endian) type {
/// Initialize the state from a slice of bytes.
pub fn init(initial_state: [block_bytes]u8) Self {
var state = Self{ .st = undefined };
- mem.copy(u8, state.asBytes(), &initial_state);
+ @memcpy(state.asBytes(), &initial_state);
state.endianSwap();
return state;
}
@@ -87,7 +87,7 @@ pub fn State(comptime endian: builtin.Endian) type {
}
if (i < bytes.len) {
var padded = [_]u8{0} ** 8;
- mem.copy(u8, padded[0 .. bytes.len - i], bytes[i..]);
+ @memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / 8] = mem.readInt(u64, padded[0..], endian);
}
}
@@ -109,7 +109,7 @@ pub fn State(comptime endian: builtin.Endian) type {
}
if (i < bytes.len) {
var padded = [_]u8{0} ** 8;
- mem.copy(u8, padded[0 .. bytes.len - i], bytes[i..]);
+ @memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / 8] ^= mem.readInt(u64, padded[0..], endian);
}
}
@@ -123,7 +123,7 @@ pub fn State(comptime endian: builtin.Endian) type {
if (i < out.len) {
var padded = [_]u8{0} ** 8;
mem.writeInt(u64, padded[0..], self.st[i / 8], endian);
- mem.copy(u8, out[i..], padded[0 .. out.len - i]);
+ @memcpy(out[i..], padded[0 .. out.len - i]);
}
}
@@ -138,16 +138,16 @@ pub fn State(comptime endian: builtin.Endian) type {
}
if (i < in.len) {
var padded = [_]u8{0} ** 8;
- mem.copy(u8, padded[0 .. in.len - i], in[i..]);
+ @memcpy(padded[0 .. in.len - i], in[i..]);
const x = mem.readIntNative(u64, &padded) ^ mem.nativeTo(u64, self.st[i / 8], endian);
mem.writeIntNative(u64, &padded, x);
- mem.copy(u8, out[i..], padded[0 .. in.len - i]);
+ @memcpy(out[i..], padded[0 .. in.len - i]);
}
}
/// Set the words storing the bytes of a given range to zero.
pub fn clear(self: *Self, from: usize, to: usize) void {
- mem.set(u64, self.st[from / 8 .. (to + 7) / 8], 0);
+ @memset(self.st[from / 8 .. (to + 7) / 8], 0);
}
/// Clear the entire state, disabling compiler optimizations.
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index 2191ab0d9e..dda5f5e377 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -416,8 +416,8 @@ pub fn bcrypt(
) [dk_length]u8 {
var state = State{};
var password_buf: [73]u8 = undefined;
- const trimmed_len = math.min(password.len, password_buf.len - 1);
- mem.copy(u8, password_buf[0..], password[0..trimmed_len]);
+ const trimmed_len = @min(password.len, password_buf.len - 1);
+ @memcpy(password_buf[0..trimmed_len], password[0..trimmed_len]);
password_buf[trimmed_len] = 0;
var passwordZ = password_buf[0 .. trimmed_len + 1];
state.expand(salt[0..], passwordZ);
@@ -626,7 +626,7 @@ const CryptFormatHasher = struct {
crypto.random.bytes(&salt);
const hash = crypt_format.strHashInternal(password, salt, params);
- mem.copy(u8, buf, &hash);
+ @memcpy(buf[0..hash.len], &hash);
return buf[0..pwhash_str_length];
}
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index f512c513e7..696a9c3107 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -113,8 +113,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
var i: usize = 0;
while (i < exchange_count) : (i += 1) {
const out = try DhKeyExchange.scalarmult(secret, public);
- mem.copy(u8, secret[0..16], out[0..16]);
- mem.copy(u8, public[0..16], out[16..32]);
+ secret[0..16].* = out[0..16].*;
+ public[0..16].* = out[16..32].*;
mem.doNotOptimizeAway(&out);
}
}
diff --git a/lib/std/crypto/blake2.zig b/lib/std/crypto/blake2.zig
index 85c26ce599..316ea5e6b7 100644
--- a/lib/std/crypto/blake2.zig
+++ b/lib/std/crypto/blake2.zig
@@ -76,7 +76,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
comptime debug.assert(8 <= out_bits and out_bits <= 256);
var d: Self = undefined;
- mem.copy(u32, d.h[0..], iv[0..]);
+ d.h = iv;
const key_len = if (options.key) |key| key.len else 0;
// default parameters
@@ -93,7 +93,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
d.h[7] ^= mem.readIntLittle(u32, context[4..8]);
}
if (key_len > 0) {
- mem.set(u8, d.buf[key_len..], 0);
+ @memset(d.buf[key_len..], 0);
d.update(options.key.?);
d.buf_len = 64;
}
@@ -112,7 +112,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 64) {
off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.t += 64;
d.round(d.buf[0..], false);
d.buf_len = 0;
@@ -125,16 +125,17 @@ pub fn Blake2s(comptime out_bits: usize) type {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += @intCast(u8, b[off..].len);
+ const b_slice = b[off..];
+ @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
+ d.buf_len += @intCast(u8, b_slice.len);
}
pub fn final(d: *Self, out: *[digest_length]u8) void {
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
- mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
+ out.* = @ptrCast(*[digest_length]u8, &d.h).*;
}
fn round(d: *Self, b: *const [64]u8, last: bool) void {
@@ -511,7 +512,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
comptime debug.assert(8 <= out_bits and out_bits <= 512);
var d: Self = undefined;
- mem.copy(u64, d.h[0..], iv[0..]);
+ d.h = iv;
const key_len = if (options.key) |key| key.len else 0;
// default parameters
@@ -528,7 +529,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
d.h[7] ^= mem.readIntLittle(u64, context[8..16]);
}
if (key_len > 0) {
- mem.set(u8, d.buf[key_len..], 0);
+ @memset(d.buf[key_len..], 0);
d.update(options.key.?);
d.buf_len = 128;
}
@@ -547,7 +548,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len > 128) {
off += 128 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.t += 128;
d.round(d.buf[0..], false);
d.buf_len = 0;
@@ -560,16 +561,17 @@ pub fn Blake2b(comptime out_bits: usize) type {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += @intCast(u8, b[off..].len);
+ const b_slice = b[off..];
+ @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
+ d.buf_len += @intCast(u8, b_slice.len);
}
pub fn final(d: *Self, out: *[digest_length]u8) void {
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
- mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
+ out.* = @ptrCast(*[digest_length]u8, &d.h).*;
}
fn round(d: *Self, b: *const [128]u8, last: bool) void {
diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig
index 36d717387f..fb580fda13 100644
--- a/lib/std/crypto/blake3.zig
+++ b/lib/std/crypto/blake3.zig
@@ -253,7 +253,7 @@ const Output = struct {
while (out_word_it.next()) |out_word| {
var word_bytes: [4]u8 = undefined;
mem.writeIntLittle(u32, &word_bytes, words[word_counter]);
- mem.copy(u8, out_word, word_bytes[0..out_word.len]);
+ @memcpy(out_word, word_bytes[0..out_word.len]);
word_counter += 1;
}
output_block_counter += 1;
@@ -284,7 +284,7 @@ const ChunkState = struct {
fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 {
const want = BLOCK_LEN - self.block_len;
const take = math.min(want, input.len);
- mem.copy(u8, self.block[self.block_len..][0..take], input[0..take]);
+ @memcpy(self.block[self.block_len..][0..take], input[0..take]);
self.block_len += @truncate(u8, take);
return input[take..];
}
@@ -336,8 +336,8 @@ fn parentOutput(
flags: u8,
) Output {
var block_words: [16]u32 align(16) = undefined;
- mem.copy(u32, block_words[0..8], left_child_cv[0..]);
- mem.copy(u32, block_words[8..], right_child_cv[0..]);
+ block_words[0..8].* = left_child_cv;
+ block_words[8..].* = right_child_cv;
return Output{
.input_chaining_value = key,
.block_words = block_words,
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index aa0f148be9..bffc70f500 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -211,7 +211,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize) type {
var buf: [64]u8 = undefined;
hashToBytes(buf[0..], x);
- mem.copy(u8, out[i..], buf[0 .. out.len - i]);
+ @memcpy(out[i..], buf[0 .. out.len - i]);
}
}
@@ -372,7 +372,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
var buf: [64]u8 = undefined;
hashToBytes(buf[0..], x);
- mem.copy(u8, out[i..], buf[0 .. out.len - i]);
+ @memcpy(out[i..], buf[0 .. out.len - i]);
}
}
@@ -413,8 +413,8 @@ fn keyToWords(key: [32]u8) [8]u32 {
fn extend(key: [32]u8, nonce: [24]u8, comptime rounds_nb: usize) struct { key: [32]u8, nonce: [12]u8 } {
var subnonce: [12]u8 = undefined;
- mem.set(u8, subnonce[0..4], 0);
- mem.copy(u8, subnonce[4..], nonce[16..24]);
+ @memset(subnonce[0..4], 0);
+ subnonce[4..].* = nonce[16..24].*;
return .{
.key = ChaChaImpl(rounds_nb).hchacha20(nonce[0..16].*, key),
.nonce = subnonce,
diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig
index 37ae57a7e6..e552af2e26 100644
--- a/lib/std/crypto/ecdsa.zig
+++ b/lib/std/crypto/ecdsa.zig
@@ -102,8 +102,8 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// Return the raw signature (r, s) in big-endian format.
pub fn toBytes(self: Signature) [encoded_length]u8 {
var bytes: [encoded_length]u8 = undefined;
- mem.copy(u8, bytes[0 .. encoded_length / 2], &self.r);
- mem.copy(u8, bytes[encoded_length / 2 ..], &self.s);
+ @memcpy(bytes[0 .. encoded_length / 2], &self.r);
+ @memcpy(bytes[encoded_length / 2 ..], &self.s);
return bytes;
}
@@ -325,11 +325,11 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
fn reduceToScalar(comptime unreduced_len: usize, s: [unreduced_len]u8) Curve.scalar.Scalar {
if (unreduced_len >= 48) {
var xs = [_]u8{0} ** 64;
- mem.copy(u8, xs[xs.len - s.len ..], s[0..]);
+ @memcpy(xs[xs.len - s.len ..], s[0..]);
return Curve.scalar.Scalar.fromBytes64(xs, .Big);
}
var xs = [_]u8{0} ** 48;
- mem.copy(u8, xs[xs.len - s.len ..], s[0..]);
+ @memcpy(xs[xs.len - s.len ..], s[0..]);
return Curve.scalar.Scalar.fromBytes48(xs, .Big);
}
@@ -345,14 +345,13 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
const m_x = m[m_v.len + 1 + noise_length ..][0..secret_key.len];
const m_h = m[m.len - h.len ..];
- mem.set(u8, m_v, 0x01);
+ @memset(m_v, 0x01);
m_i.* = 0x00;
- if (noise) |n| mem.copy(u8, m_z, &n);
- mem.copy(u8, m_x, &secret_key);
- mem.copy(u8, m_h, &h);
+ if (noise) |n| @memcpy(m_z, &n);
+ @memcpy(m_x, &secret_key);
+ @memcpy(m_h, &h);
Hmac.create(&k, &m, &k);
Hmac.create(m_v, m_v, &k);
- mem.copy(u8, m_v, m_v);
m_i.* = 0x01;
Hmac.create(&k, &m, &k);
Hmac.create(m_v, m_v, &k);
@@ -361,10 +360,9 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
while (t_off < t.len) : (t_off += m_v.len) {
const t_end = @min(t_off + m_v.len, t.len);
Hmac.create(m_v, m_v, &k);
- std.mem.copy(u8, t[t_off..t_end], m_v[0 .. t_end - t_off]);
+ @memcpy(t[t_off..t_end], m_v[0 .. t_end - t_off]);
}
if (Curve.scalar.Scalar.fromBytes(t, .Big)) |s| return s else |_| {}
- mem.copy(u8, m_v, m_v);
m_i.* = 0x00;
Hmac.create(&k, m[0 .. m_v.len + 1], &k);
Hmac.create(m_v, m_v, &k);
diff --git a/lib/std/crypto/hkdf.zig b/lib/std/crypto/hkdf.zig
index 7102ffe780..9163ba9d15 100644
--- a/lib/std/crypto/hkdf.zig
+++ b/lib/std/crypto/hkdf.zig
@@ -63,7 +63,7 @@ pub fn Hkdf(comptime Hmac: type) type {
st.update(&counter);
var tmp: [prk_length]u8 = undefined;
st.final(tmp[0..prk_length]);
- mem.copy(u8, out[i..][0..left], tmp[0..left]);
+ @memcpy(out[i..][0..left], tmp[0..left]);
}
}
};
diff --git a/lib/std/crypto/hmac.zig b/lib/std/crypto/hmac.zig
index f279132ee8..8d0daa3032 100644
--- a/lib/std/crypto/hmac.zig
+++ b/lib/std/crypto/hmac.zig
@@ -38,12 +38,12 @@ pub fn Hmac(comptime Hash: type) type {
// Normalize key length to block size of hash
if (key.len > Hash.block_length) {
Hash.hash(key, scratch[0..mac_length], .{});
- mem.set(u8, scratch[mac_length..Hash.block_length], 0);
+ @memset(scratch[mac_length..Hash.block_length], 0);
} else if (key.len < Hash.block_length) {
- mem.copy(u8, scratch[0..key.len], key);
- mem.set(u8, scratch[key.len..Hash.block_length], 0);
+ @memcpy(scratch[0..key.len], key);
+ @memset(scratch[key.len..Hash.block_length], 0);
} else {
- mem.copy(u8, scratch[0..], key);
+ @memcpy(&scratch, key);
}
for (&ctx.o_key_pad, 0..) |*b, i| {
diff --git a/lib/std/crypto/isap.zig b/lib/std/crypto/isap.zig
index 0888cfa4dd..5b0da739de 100644
--- a/lib/std/crypto/isap.zig
+++ b/lib/std/crypto/isap.zig
@@ -43,7 +43,7 @@ pub const IsapA128A = struct {
}
} else {
var padded = [_]u8{0} ** 8;
- mem.copy(u8, padded[0..left], m[i..]);
+ @memcpy(padded[0..left], m[i..]);
padded[left] = 0x80;
isap.st.addBytes(&padded);
isap.st.permute();
diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig
index af7c12c8c2..9226f2f6d4 100644
--- a/lib/std/crypto/keccak_p.zig
+++ b/lib/std/crypto/keccak_p.zig
@@ -68,7 +68,7 @@ pub fn KeccakF(comptime f: u11) type {
}
if (i < bytes.len) {
var padded = [_]u8{0} ** @sizeOf(T);
- mem.copy(u8, padded[0 .. bytes.len - i], bytes[i..]);
+ @memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / @sizeOf(T)] = mem.readIntLittle(T, padded[0..]);
}
}
@@ -87,7 +87,7 @@ pub fn KeccakF(comptime f: u11) type {
}
if (i < bytes.len) {
var padded = [_]u8{0} ** @sizeOf(T);
- mem.copy(u8, padded[0 .. bytes.len - i], bytes[i..]);
+ @memcpy(padded[0 .. bytes.len - i], bytes[i..]);
self.st[i / @sizeOf(T)] ^= mem.readIntLittle(T, padded[0..]);
}
}
@@ -101,7 +101,7 @@ pub fn KeccakF(comptime f: u11) type {
if (i < out.len) {
var padded = [_]u8{0} ** @sizeOf(T);
mem.writeIntLittle(T, padded[0..], self.st[i / @sizeOf(T)]);
- mem.copy(u8, out[i..], padded[0 .. out.len - i]);
+ @memcpy(out[i..], padded[0 .. out.len - i]);
}
}
@@ -116,16 +116,16 @@ pub fn KeccakF(comptime f: u11) type {
}
if (i < in.len) {
var padded = [_]u8{0} ** @sizeOf(T);
- mem.copy(u8, padded[0 .. in.len - i], in[i..]);
+ @memcpy(padded[0 .. in.len - i], in[i..]);
const x = mem.readIntNative(T, &padded) ^ mem.nativeToLittle(T, self.st[i / @sizeOf(T)]);
mem.writeIntNative(T, &padded, x);
- mem.copy(u8, out[i..], padded[0 .. in.len - i]);
+ @memcpy(out[i..], padded[0 .. in.len - i]);
}
}
/// Set the words storing the bytes of a given range to zero.
pub fn clear(self: *Self, from: usize, to: usize) void {
- mem.set(T, self.st[from / @sizeOf(T) .. (to + @sizeOf(T) - 1) / @sizeOf(T)], 0);
+ @memset(self.st[from / @sizeOf(T) .. (to + @sizeOf(T) - 1) / @sizeOf(T)], 0);
}
/// Clear the entire state, disabling compiler optimizations.
@@ -215,7 +215,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
var bytes = bytes_;
if (self.offset > 0) {
const left = math.min(rate - self.offset, bytes.len);
- mem.copy(u8, self.buf[self.offset..], bytes[0..left]);
+ @memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
self.offset += left;
if (self.offset == rate) {
self.offset = 0;
@@ -231,7 +231,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti
bytes = bytes[rate..];
}
if (bytes.len > 0) {
- mem.copy(u8, &self.buf, bytes);
+ @memcpy(self.buf[0..bytes.len], bytes);
self.offset = bytes.len;
}
}
diff --git a/lib/std/crypto/kyber_d00.zig b/lib/std/crypto/kyber_d00.zig
index 21fdb6ff17..b52f9f475d 100644
--- a/lib/std/crypto/kyber_d00.zig
+++ b/lib/std/crypto/kyber_d00.zig
@@ -323,9 +323,9 @@ fn Kyber(comptime p: Params) type {
s += InnerSk.bytes_length;
ret.pk = InnerPk.fromBytes(buf[s .. s + InnerPk.bytes_length]);
s += InnerPk.bytes_length;
- mem.copy(u8, &ret.hpk, buf[s .. s + h_length]);
+ ret.hpk = buf[s..][0..h_length].*;
s += h_length;
- mem.copy(u8, &ret.z, buf[s .. s + shared_length]);
+ ret.z = buf[s..][0..shared_length].*;
return ret;
}
};
@@ -345,7 +345,7 @@ fn Kyber(comptime p: Params) type {
break :sk random_seed;
};
var ret: KeyPair = undefined;
- mem.copy(u8, &ret.secret_key.z, seed[inner_seed_length..seed_length]);
+ ret.secret_key.z = seed[inner_seed_length..seed_length].*;
// Generate inner key
innerKeyFromSeed(
@@ -356,7 +356,7 @@ fn Kyber(comptime p: Params) type {
ret.secret_key.pk = ret.public_key.pk;
// Copy over z from seed.
- mem.copy(u8, &ret.secret_key.z, seed[inner_seed_length..seed_length]);
+ ret.secret_key.z = seed[inner_seed_length..seed_length].*;
// Compute H(pk)
var h = sha3.Sha3_256.init(.{});
@@ -418,7 +418,7 @@ fn Kyber(comptime p: Params) type {
fn fromBytes(buf: *const [bytes_length]u8) InnerPk {
var ret: InnerPk = undefined;
ret.th = V.fromBytes(buf[0..V.bytes_length]).normalize();
- mem.copy(u8, &ret.rho, buf[V.bytes_length..bytes_length]);
+ ret.rho = buf[V.bytes_length..bytes_length].*;
ret.aT = M.uniform(ret.rho, true);
return ret;
}
@@ -459,7 +459,7 @@ fn Kyber(comptime p: Params) type {
var h = sha3.Sha3_512.init(.{});
h.update(&seed);
h.final(&expanded_seed);
- mem.copy(u8, &pk.rho, expanded_seed[0..32]);
+ pk.rho = expanded_seed[0..32].*;
const sigma = expanded_seed[32..64];
pk.aT = M.uniform(pk.rho, false); // Expand ρ to A; we'll transpose later on
@@ -1381,7 +1381,7 @@ fn Vec(comptime K: u8) type {
const cs = comptime Poly.compressedSize(d);
var ret: [compressedSize(d)]u8 = undefined;
inline for (0..K) |i| {
- mem.copy(u8, ret[i * cs .. (i + 1) * cs], &v.ps[i].compress(d));
+ ret[i * cs .. (i + 1) * cs].* = v.ps[i].compress(d);
}
return ret;
}
@@ -1399,11 +1399,7 @@ fn Vec(comptime K: u8) type {
fn toBytes(v: Self) [bytes_length]u8 {
var ret: [bytes_length]u8 = undefined;
inline for (0..K) |i| {
- mem.copy(
- u8,
- ret[i * Poly.bytes_length .. (i + 1) * Poly.bytes_length],
- &v.ps[i].toBytes(),
- );
+ ret[i * Poly.bytes_length .. (i + 1) * Poly.bytes_length].* = v.ps[i].toBytes();
}
return ret;
}
@@ -1479,7 +1475,7 @@ test "MulHat" {
const p2 = a.ntt().mulHat(b.ntt()).barrettReduce().invNTT().normalize();
var p: Poly = undefined;
- mem.set(i16, &p.cs, 0);
+ @memset(&p.cs, 0);
for (0..N) |i| {
for (0..N) |j| {
@@ -1742,15 +1738,15 @@ const NistDRBG = struct {
g.incV();
var block: [16]u8 = undefined;
ctx.encrypt(&block, &g.v);
- mem.copy(u8, buf[i * 16 .. (i + 1) * 16], &block);
+ buf[i * 16 ..][0..16].* = block;
}
if (pd) |p| {
for (&buf, p) |*b, x| {
b.* ^= x;
}
}
- mem.copy(u8, &g.key, buf[0..32]);
- mem.copy(u8, &g.v, buf[32..48]);
+ g.key = buf[0..32].*;
+ g.v = buf[32..48].*;
}
// randombytes.
@@ -1763,10 +1759,10 @@ const NistDRBG = struct {
g.incV();
ctx.encrypt(&block, &g.v);
if (dst.len < 16) {
- mem.copy(u8, dst, block[0..dst.len]);
+ @memcpy(dst, block[0..dst.len]);
break;
}
- mem.copy(u8, dst, &block);
+ dst[0..block.len].* = block;
dst = dst[16..dst.len];
}
g.update(null);
diff --git a/lib/std/crypto/md5.zig b/lib/std/crypto/md5.zig
index 6276fadb43..bd4a78c032 100644
--- a/lib/std/crypto/md5.zig
+++ b/lib/std/crypto/md5.zig
@@ -66,7 +66,7 @@ pub const Md5 = struct {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.round(&d.buf);
d.buf_len = 0;
@@ -78,8 +78,9 @@ pub const Md5 = struct {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
- d.buf_len += @intCast(u8, b[off..].len);
+ const b_slice = b[off..];
+ @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
+ d.buf_len += @intCast(u8, b_slice.len);
// Md5 uses the bottom 64-bits for length padding
d.total_len +%= b.len;
@@ -87,7 +88,7 @@ pub const Md5 = struct {
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
// Append padding bits.
d.buf[d.buf_len] = 0x80;
@@ -96,7 +97,7 @@ pub const Md5 = struct {
// > 448 mod 512 so need to add an extra round to wrap around.
if (64 - d.buf_len < 8) {
d.round(d.buf[0..]);
- mem.set(u8, d.buf[0..], 0);
+ @memset(d.buf[0..], 0);
}
// Append message length.
diff --git a/lib/std/crypto/modes.zig b/lib/std/crypto/modes.zig
index 325d8c0ceb..eed803a899 100644
--- a/lib/std/crypto/modes.zig
+++ b/lib/std/crypto/modes.zig
@@ -38,8 +38,10 @@ pub fn ctr(comptime BlockCipher: anytype, block_cipher: BlockCipher, dst: []u8,
if (i < src.len) {
mem.writeInt(u128, &counter, counterInt, endian);
var pad = [_]u8{0} ** block_length;
- mem.copy(u8, &pad, src[i..]);
+ const src_slice = src[i..];
+ @memcpy(pad[0..src_slice.len], src_slice);
block_cipher.xor(&pad, &pad, counter);
- mem.copy(u8, dst[i..], pad[0 .. src.len - i]);
+ const pad_slice = pad[0 .. src.len - i];
+ @memcpy(dst[i..][0..pad_slice.len], pad_slice);
}
}
diff --git a/lib/std/crypto/pbkdf2.zig b/lib/std/crypto/pbkdf2.zig
index 6f9783df72..7c6df5444d 100644
--- a/lib/std/crypto/pbkdf2.zig
+++ b/lib/std/crypto/pbkdf2.zig
@@ -129,13 +129,13 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com
const offset = block * h_len;
const block_len = if (block != blocks_count - 1) h_len else r;
const dk_block: []u8 = dk[offset..][0..block_len];
- mem.copy(u8, dk_block, prev_block[0..dk_block.len]);
+ @memcpy(dk_block, prev_block[0..dk_block.len]);
var i: u32 = 1;
while (i < rounds) : (i += 1) {
// U_c = PRF (P, U_{c-1})
Prf.create(&new_block, prev_block[0..], password);
- mem.copy(u8, prev_block[0..], new_block[0..]);
+ prev_block = new_block;
// F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
for (dk_block, 0..) |_, j| {
diff --git a/lib/std/crypto/pcurves/common.zig b/lib/std/crypto/pcurves/common.zig
index 40f4a728c7..5d41bc190a 100644
--- a/lib/std/crypto/pcurves/common.zig
+++ b/lib/std/crypto/pcurves/common.zig
@@ -228,8 +228,8 @@ pub fn Field(comptime params: FieldParams) type {
}
if (iterations % 2 != 0) {
fiat.divstep(&out1, &out2, &out3, &out4, &out5, d, f, g, v, r);
- mem.copy(Word, &v, &out4);
- mem.copy(Word, &f, &out2);
+ v = out4;
+ f = out2;
}
var v_opp: Limbs = undefined;
fiat.opp(&v_opp, v);
diff --git a/lib/std/crypto/pcurves/p256.zig b/lib/std/crypto/pcurves/p256.zig
index a160d08016..0460515511 100644
--- a/lib/std/crypto/pcurves/p256.zig
+++ b/lib/std/crypto/pcurves/p256.zig
@@ -105,7 +105,7 @@ pub const P256 = struct {
var out: [33]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
- mem.copy(u8, out[1..], &xy.x.toBytes(.Big));
+ out[1..].* = xy.x.toBytes(.Big);
return out;
}
@@ -114,8 +114,8 @@ pub const P256 = struct {
var out: [65]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
- mem.copy(u8, out[1..33], &xy.x.toBytes(.Big));
- mem.copy(u8, out[33..65], &xy.y.toBytes(.Big));
+ out[1..33].* = xy.x.toBytes(.Big);
+ out[33..65].* = xy.y.toBytes(.Big);
return out;
}
diff --git a/lib/std/crypto/pcurves/p256/scalar.zig b/lib/std/crypto/pcurves/p256/scalar.zig
index ce019082ef..4e88d1fee7 100644
--- a/lib/std/crypto/pcurves/p256/scalar.zig
+++ b/lib/std/crypto/pcurves/p256/scalar.zig
@@ -192,20 +192,20 @@ const ScalarDouble = struct {
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len, 24);
- mem.copy(u8, b[0..len], s[0..len]);
+ const len = @min(s.len, 24);
+ b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len - 24, 24);
- mem.copy(u8, b[0..len], s[24..][0..len]);
+ const len = @min(s.len - 24, 24);
+ b[0..len].* = s[24..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
- mem.copy(u8, b[0..len], s[48..][0..len]);
+ b[0..len].* = s[48..][0..len].*;
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
}
return t;
diff --git a/lib/std/crypto/pcurves/p384.zig b/lib/std/crypto/pcurves/p384.zig
index 3aaf9e341f..6662fc0011 100644
--- a/lib/std/crypto/pcurves/p384.zig
+++ b/lib/std/crypto/pcurves/p384.zig
@@ -105,7 +105,7 @@ pub const P384 = struct {
var out: [49]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
- mem.copy(u8, out[1..], &xy.x.toBytes(.Big));
+ out[1..].* = xy.x.toBytes(.Big);
return out;
}
@@ -114,8 +114,8 @@ pub const P384 = struct {
var out: [97]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
- mem.copy(u8, out[1..49], &xy.x.toBytes(.Big));
- mem.copy(u8, out[49..97], &xy.y.toBytes(.Big));
+ out[1..49].* = xy.x.toBytes(.Big);
+ out[49..97].* = xy.y.toBytes(.Big);
return out;
}
diff --git a/lib/std/crypto/pcurves/p384/scalar.zig b/lib/std/crypto/pcurves/p384/scalar.zig
index ec71a52efa..ef257ab7ce 100644
--- a/lib/std/crypto/pcurves/p384/scalar.zig
+++ b/lib/std/crypto/pcurves/p384/scalar.zig
@@ -180,14 +180,14 @@ const ScalarDouble = struct {
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero };
{
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len, 32);
- mem.copy(u8, b[0..len], s[0..len]);
+ const len = @min(s.len, 32);
+ b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 32) {
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len - 32, 32);
- mem.copy(u8, b[0..len], s[32..][0..len]);
+ const len = @min(s.len - 32, 32);
+ b[0..len].* = s[32..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
}
return t;
diff --git a/lib/std/crypto/pcurves/secp256k1.zig b/lib/std/crypto/pcurves/secp256k1.zig
index 6998b0db82..753f929f44 100644
--- a/lib/std/crypto/pcurves/secp256k1.zig
+++ b/lib/std/crypto/pcurves/secp256k1.zig
@@ -158,7 +158,7 @@ pub const Secp256k1 = struct {
var out: [33]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
- mem.copy(u8, out[1..], &xy.x.toBytes(.Big));
+ out[1..].* = xy.x.toBytes(.Big);
return out;
}
@@ -167,8 +167,8 @@ pub const Secp256k1 = struct {
var out: [65]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
- mem.copy(u8, out[1..33], &xy.x.toBytes(.Big));
- mem.copy(u8, out[33..65], &xy.y.toBytes(.Big));
+ out[1..33].* = xy.x.toBytes(.Big);
+ out[33..65].* = xy.y.toBytes(.Big);
return out;
}
diff --git a/lib/std/crypto/pcurves/secp256k1/scalar.zig b/lib/std/crypto/pcurves/secp256k1/scalar.zig
index 0b7d6e952d..e0b5e053e3 100644
--- a/lib/std/crypto/pcurves/secp256k1/scalar.zig
+++ b/lib/std/crypto/pcurves/secp256k1/scalar.zig
@@ -192,20 +192,20 @@ const ScalarDouble = struct {
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len, 24);
- mem.copy(u8, b[0..len], s[0..len]);
+ const len = @min(s.len, 24);
+ b[0..len].* = s[0..len].*;
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
- const len = math.min(s.len - 24, 24);
- mem.copy(u8, b[0..len], s[24..][0..len]);
+ const len = @min(s.len - 24, 24);
+ b[0..len].* = s[24..][0..len].*;
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
- mem.copy(u8, b[0..len], s[48..][0..len]);
+ b[0..len].* = s[48..][0..len].*;
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
}
return t;
diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig
index 4b6965d040..cc0f10e395 100644
--- a/lib/std/crypto/phc_encoding.zig
+++ b/lib/std/crypto/phc_encoding.zig
@@ -35,7 +35,7 @@ pub fn BinValue(comptime max_len: usize) type {
pub fn fromSlice(slice: []const u8) Error!Self {
if (slice.len > capacity) return Error.NoSpaceLeft;
var bin_value: Self = undefined;
- mem.copy(u8, &bin_value.buf, slice);
+ @memcpy(bin_value.buf[0..slice.len], slice);
bin_value.len = slice.len;
return bin_value;
}
diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig
index 492b8e9988..7f57e6cecb 100644
--- a/lib/std/crypto/salsa20.zig
+++ b/lib/std/crypto/salsa20.zig
@@ -383,10 +383,10 @@ pub const XSalsa20Poly1305 = struct {
debug.assert(c.len == m.len);
const extended = extend(rounds, k, npub);
var block0 = [_]u8{0} ** 64;
- const mlen0 = math.min(32, m.len);
- mem.copy(u8, block0[32..][0..mlen0], m[0..mlen0]);
+ const mlen0 = @min(32, m.len);
+ @memcpy(block0[32..][0..mlen0], m[0..mlen0]);
Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
- mem.copy(u8, c[0..mlen0], block0[32..][0..mlen0]);
+ @memcpy(c[0..mlen0], block0[32..][0..mlen0]);
Salsa20.xor(c[mlen0..], m[mlen0..], 1, extended.key, extended.nonce);
var mac = Poly1305.init(block0[0..32]);
mac.update(ad);
@@ -405,7 +405,7 @@ pub const XSalsa20Poly1305 = struct {
const extended = extend(rounds, k, npub);
var block0 = [_]u8{0} ** 64;
const mlen0 = math.min(32, c.len);
- mem.copy(u8, block0[32..][0..mlen0], c[0..mlen0]);
+ @memcpy(block0[32..][0..mlen0], c[0..mlen0]);
Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce);
var mac = Poly1305.init(block0[0..32]);
mac.update(ad);
@@ -420,7 +420,7 @@ pub const XSalsa20Poly1305 = struct {
utils.secureZero(u8, &computedTag);
return error.AuthenticationFailed;
}
- mem.copy(u8, m[0..mlen0], block0[32..][0..mlen0]);
+ @memcpy(m[0..mlen0], block0[32..][0..mlen0]);
Salsa20.xor(m[mlen0..], c[mlen0..], 1, extended.key, extended.nonce);
}
};
@@ -533,7 +533,7 @@ pub const SealedBox = struct {
debug.assert(c.len == m.len + seal_length);
var ekp = try KeyPair.create(null);
const nonce = createNonce(ekp.public_key, public_key);
- mem.copy(u8, c[0..public_length], ekp.public_key[0..]);
+ c[0..public_length].* = ekp.public_key;
try Box.seal(c[Box.public_length..], m, nonce, public_key, ekp.secret_key);
utils.secureZero(u8, ekp.secret_key[0..]);
}
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index dc73d974c7..077de3b510 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -27,7 +27,7 @@ const max_salt_len = 64;
const max_hash_len = 64;
fn blockCopy(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
- mem.copy(u32, dst, src[0 .. n * 16]);
+ @memcpy(dst[0 .. n * 16], src[0 .. n * 16]);
}
fn blockXor(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
@@ -242,7 +242,7 @@ const crypt_format = struct {
pub fn fromSlice(slice: []const u8) EncodingError!Self {
if (slice.len > capacity) return EncodingError.NoSpaceLeft;
var bin_value: Self = undefined;
- mem.copy(u8, &bin_value.buf, slice);
+ @memcpy(bin_value.buf[0..slice.len], slice);
bin_value.len = slice.len;
return bin_value;
}
@@ -314,7 +314,7 @@ const crypt_format = struct {
fn serializeTo(params: anytype, out: anytype) !void {
var header: [14]u8 = undefined;
- mem.copy(u8, header[0..3], prefix);
+ header[0..3].* = prefix.*;
Codec.intEncode(header[3..4], params.ln);
Codec.intEncode(header[4..9], params.r);
Codec.intEncode(header[9..14], params.p);
diff --git a/lib/std/crypto/sha1.zig b/lib/std/crypto/sha1.zig
index 4d11b04eb5..1f5f3eaae2 100644
--- a/lib/std/crypto/sha1.zig
+++ b/lib/std/crypto/sha1.zig
@@ -62,7 +62,7 @@ pub const Sha1 = struct {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.round(d.buf[0..]);
d.buf_len = 0;
@@ -74,7 +74,7 @@ pub const Sha1 = struct {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ @memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]);
d.buf_len += @intCast(u8, b[off..].len);
d.total_len += b.len;
@@ -82,7 +82,7 @@ pub const Sha1 = struct {
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
// Append padding bits.
d.buf[d.buf_len] = 0x80;
@@ -91,7 +91,7 @@ pub const Sha1 = struct {
// > 448 mod 512 so need to add an extra round to wrap around.
if (64 - d.buf_len < 8) {
d.round(d.buf[0..]);
- mem.set(u8, d.buf[0..], 0);
+ @memset(d.buf[0..], 0);
}
// Append message length.
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index ad99079852..bd5a7cc5d4 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -118,7 +118,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len >= 64) {
off += 64 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.round(&d.buf);
d.buf_len = 0;
@@ -130,7 +130,8 @@ fn Sha2x32(comptime params: Sha2Params32) type {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ const b_slice = b[off..];
+ @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
d.buf_len += @intCast(u8, b[off..].len);
d.total_len += b.len;
@@ -143,7 +144,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
// Append padding bits.
d.buf[d.buf_len] = 0x80;
@@ -152,7 +153,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// > 448 mod 512 so need to add an extra round to wrap around.
if (64 - d.buf_len < 8) {
d.round(&d.buf);
- mem.set(u8, d.buf[0..], 0);
+ @memset(d.buf[0..], 0);
}
// Append message length.
@@ -609,7 +610,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// Partial buffer exists from previous update. Copy into buffer then hash.
if (d.buf_len != 0 and d.buf_len + b.len >= 128) {
off += 128 - d.buf_len;
- mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
+ @memcpy(d.buf[d.buf_len..][0..off], b[0..off]);
d.round(&d.buf);
d.buf_len = 0;
@@ -621,7 +622,8 @@ fn Sha2x64(comptime params: Sha2Params64) type {
}
// Copy any remainder for next pass.
- mem.copy(u8, d.buf[d.buf_len..], b[off..]);
+ const b_slice = b[off..];
+ @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
d.buf_len += @intCast(u8, b[off..].len);
d.total_len += b.len;
@@ -634,7 +636,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
pub fn final(d: *Self, out: *[digest_length]u8) void {
// The buffer here will never be completely full.
- mem.set(u8, d.buf[d.buf_len..], 0);
+ @memset(d.buf[d.buf_len..], 0);
// Append padding bits.
d.buf[d.buf_len] = 0x80;
@@ -643,7 +645,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// > 896 mod 1024 so need to add an extra round to wrap around.
if (128 - d.buf_len < 16) {
d.round(d.buf[0..]);
- mem.set(u8, d.buf[0..], 0);
+ @memset(d.buf[0..], 0);
}
// Append message length.
diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig
index 1f48f87c53..23f9e65534 100644
--- a/lib/std/crypto/sha3.zig
+++ b/lib/std/crypto/sha3.zig
@@ -149,7 +149,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds:
const left = self.buf.len - self.offset;
if (left > 0) {
const n = math.min(left, out.len);
- mem.copy(u8, out[0..n], self.buf[self.offset..][0..n]);
+ @memcpy(out[0..n], self.buf[self.offset..][0..n]);
out = out[n..];
self.offset += n;
if (out.len == 0) {
@@ -164,7 +164,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds:
}
if (out.len > 0) {
self.st.squeeze(self.buf[0..]);
- mem.copy(u8, out[0..], self.buf[0..out.len]);
+ @memcpy(out[0..], self.buf[0..out.len]);
self.offset = out.len;
}
}
diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig
index 16388439d1..d91485cdfa 100644
--- a/lib/std/crypto/siphash.zig
+++ b/lib/std/crypto/siphash.zig
@@ -98,7 +98,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
self.msg_len +%= @truncate(u8, b.len);
var buf = [_]u8{0} ** 8;
- mem.copy(u8, buf[0..], b[0..]);
+ @memcpy(buf[0..b.len], b);
buf[7] = self.msg_len;
self.round(buf);
@@ -203,7 +203,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
if (self.buf_len != 0 and self.buf_len + b.len >= 8) {
off += 8 - self.buf_len;
- mem.copy(u8, self.buf[self.buf_len..], b[0..off]);
+ @memcpy(self.buf[self.buf_len..][0..off], b[0..off]);
self.state.update(self.buf[0..]);
self.buf_len = 0;
}
@@ -212,8 +212,9 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
const aligned_len = remain_len - (remain_len % 8);
self.state.update(b[off .. off + aligned_len]);
- mem.copy(u8, self.buf[self.buf_len..], b[off + aligned_len ..]);
- self.buf_len += @intCast(u8, b[off + aligned_len ..].len);
+ const b_slice = b[off + aligned_len ..];
+ @memcpy(self.buf[self.buf_len..][0..b_slice.len], b_slice);
+ self.buf_len += @intCast(u8, b_slice.len);
}
pub fn peek(self: Self) [mac_length]u8 {
diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig
index 5b5d3dea67..8ae494edce 100644
--- a/lib/std/crypto/tls.zig
+++ b/lib/std/crypto/tls.zig
@@ -312,11 +312,11 @@ pub fn hkdfExpandLabel(
buf[2] = @intCast(u8, tls13.len + label.len);
buf[3..][0..tls13.len].* = tls13.*;
var i: usize = 3 + tls13.len;
- mem.copy(u8, buf[i..], label);
+ @memcpy(buf[i..][0..label.len], label);
i += label.len;
buf[i] = @intCast(u8, context.len);
i += 1;
- mem.copy(u8, buf[i..], context);
+ @memcpy(buf[i..][0..context.len], context);
i += context.len;
var result: [len]u8 = undefined;
diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig
index 1d94dfce31..c886bc7276 100644
--- a/lib/std/crypto/tls/Client.zig
+++ b/lib/std/crypto/tls/Client.zig
@@ -685,7 +685,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
.application_cipher = app_cipher,
.partially_read_buffer = undefined,
};
- mem.copy(u8, &client.partially_read_buffer, leftover);
+ @memcpy(client.partially_read_buffer[0..leftover.len], leftover);
return client;
},
else => {
@@ -809,7 +809,7 @@ fn prepareCiphertextRecord(
.overhead_len = overhead_len,
};
- mem.copy(u8, &cleartext_buf, bytes[bytes_i..][0..encrypted_content_len]);
+ @memcpy(cleartext_buf[0..encrypted_content_len], bytes[bytes_i..][0..encrypted_content_len]);
cleartext_buf[encrypted_content_len] = @enumToInt(inner_content_type);
bytes_i += encrypted_content_len;
const ciphertext_len = encrypted_content_len + 1;
@@ -1029,8 +1029,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
if (frag1.len < second_len)
return finishRead2(c, first, frag1, vp.total);
- mem.copy(u8, frag[0..in], first);
- mem.copy(u8, frag[first.len..], frag1[0..second_len]);
+ @memcpy(frag[0..in], first);
+ @memcpy(frag[first.len..][0..second_len], frag1[0..second_len]);
frag = frag[0..full_record_len];
frag1 = frag1[second_len..];
in = 0;
@@ -1059,8 +1059,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
if (frag1.len < second_len)
return finishRead2(c, first, frag1, vp.total);
- mem.copy(u8, frag[0..in], first);
- mem.copy(u8, frag[first.len..], frag1[0..second_len]);
+ @memcpy(frag[0..in], first);
+ @memcpy(frag[first.len..][0..second_len], frag1[0..second_len]);
frag = frag[0..full_record_len];
frag1 = frag1[second_len..];
in = 0;
@@ -1177,7 +1177,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
// We have already run out of room in iovecs. Continue
// appending to `partially_read_buffer`.
const dest = c.partially_read_buffer[c.partial_ciphertext_idx..];
- mem.copy(u8, dest, msg);
+ @memcpy(dest[0..msg.len], msg);
c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len);
} else {
const amt = vp.put(msg);
@@ -1185,7 +1185,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
const rest = msg[amt..];
c.partial_cleartext_idx = 0;
c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len);
- mem.copy(u8, &c.partially_read_buffer, rest);
+ @memcpy(c.partially_read_buffer[0..rest.len], rest);
}
}
} else {
@@ -1213,12 +1213,12 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
// There is cleartext at the beginning already which we need to preserve.
c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len);
- mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx..], saved_buf);
+ @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..saved_buf.len], saved_buf);
} else {
c.partial_cleartext_idx = 0;
c.partial_ciphertext_idx = 0;
c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len);
- mem.copy(u8, &c.partially_read_buffer, saved_buf);
+ @memcpy(c.partially_read_buffer[0..saved_buf.len], saved_buf);
}
return out;
}
@@ -1227,14 +1227,14 @@ fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usi
if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
// There is cleartext at the beginning already which we need to preserve.
c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len);
- mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx..], first);
- mem.copy(u8, c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..], frag1);
+ @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..first.len], first);
+ @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..][0..frag1.len], frag1);
} else {
c.partial_cleartext_idx = 0;
c.partial_ciphertext_idx = 0;
c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len);
- mem.copy(u8, &c.partially_read_buffer, first);
- mem.copy(u8, c.partially_read_buffer[first.len..], frag1);
+ @memcpy(c.partially_read_buffer[0..first.len], first);
+ @memcpy(c.partially_read_buffer[first.len..][0..frag1.len], frag1);
}
return out;
}
@@ -1282,7 +1282,7 @@ const VecPut = struct {
const v = vp.iovecs[vp.idx];
const dest = v.iov_base[vp.off..v.iov_len];
const src = bytes[bytes_i..][0..@min(dest.len, bytes.len - bytes_i)];
- mem.copy(u8, dest, src);
+ @memcpy(dest[0..src.len], src);
bytes_i += src.len;
vp.off += src.len;
if (vp.off >= v.iov_len) {
diff --git a/lib/std/crypto/utils.zig b/lib/std/crypto/utils.zig
index 38dc236455..14a235e418 100644
--- a/lib/std/crypto/utils.zig
+++ b/lib/std/crypto/utils.zig
@@ -134,12 +134,8 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
/// Sets a slice to zeroes.
/// Prevents the store from being optimized out.
-pub fn secureZero(comptime T: type, s: []T) void {
- // TODO: implement `@memset` for non-byte-sized element type in the llvm backend
- //@memset(@as([]volatile T, s), 0);
- const ptr = @ptrCast([*]volatile u8, s.ptr);
- const length = s.len * @sizeOf(T);
- @memset(ptr[0..length], 0);
+pub inline fn secureZero(comptime T: type, s: []T) void {
+ @memset(@as([]volatile T, s), 0);
}
test "crypto.utils.timingSafeEql" {
@@ -148,7 +144,7 @@ test "crypto.utils.timingSafeEql" {
random.bytes(a[0..]);
random.bytes(b[0..]);
try testing.expect(!timingSafeEql([100]u8, a, b));
- mem.copy(u8, a[0..], b[0..]);
+ a = b;
try testing.expect(timingSafeEql([100]u8, a, b));
}
@@ -201,7 +197,7 @@ test "crypto.utils.secureZero" {
var a = [_]u8{0xfe} ** 8;
var b = [_]u8{0xfe} ** 8;
- mem.set(u8, a[0..], 0);
+ @memset(a[0..], 0);
secureZero(u8, b[0..]);
try testing.expectEqualSlices(u8, a[0..], b[0..]);