aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorJacob Young <jacobly0@users.noreply.github.com>2022-10-29 05:58:41 -0400
committerJacob Young <jacobly0@users.noreply.github.com>2022-10-29 05:58:41 -0400
commit48a2783969b0a43200514a5b4e9cce57be4e5b46 (patch)
tree0f7cc577dd9090938d842250e1d1986d3d05aa0e /lib/std
parente20d2b3151607fe078b43331ea27d5b34f95360b (diff)
parent20925b2f5c5c0ae20fdc0574e5d4e5740d17b4d6 (diff)
downloadzig-48a2783969b0a43200514a5b4e9cce57be4e5b46.tar.gz
zig-48a2783969b0a43200514a5b4e9cce57be4e5b46.zip
cbe: implement optional slice representation change
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/ascii.zig53
-rw-r--r--lib/std/build/CheckObjectStep.zig17
-rw-r--r--lib/std/crypto/25519/ed25519.zig578
-rw-r--r--lib/std/crypto/25519/x25519.zig8
-rw-r--r--lib/std/crypto/bcrypt.zig80
-rw-r--r--lib/std/crypto/benchmark.zig22
-rw-r--r--lib/std/crypto/ecdsa.zig216
-rw-r--r--lib/std/crypto/sha2.zig260
-rw-r--r--lib/std/fs/wasi.zig2
-rw-r--r--lib/std/math/big/int.zig161
-rw-r--r--lib/std/math/big/int_test.zig102
-rw-r--r--lib/std/mem.zig480
-rw-r--r--lib/std/mem/Allocator.zig9
-rw-r--r--lib/std/os.zig50
-rw-r--r--lib/std/os/linux.zig72
-rw-r--r--lib/std/target.zig75
-rw-r--r--lib/std/x/net/bpf.zig4
-rw-r--r--lib/std/zig/c_translation.zig126
-rw-r--r--lib/std/zig/system/NativeTargetInfo.zig1
19 files changed, 1694 insertions, 622 deletions
diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig
index cd8b14e98f..eac3ba0565 100644
--- a/lib/std/ascii.zig
+++ b/lib/std/ascii.zig
@@ -555,22 +555,54 @@ test "ascii.endsWithIgnoreCase" {
try std.testing.expect(!endsWithIgnoreCase("BoB", "Bo"));
}
-/// Finds `substr` in `container`, ignoring case, starting at `start_index`.
-/// TODO boyer-moore algorithm
-pub fn indexOfIgnoreCasePos(container: []const u8, start_index: usize, substr: []const u8) ?usize {
- if (substr.len > container.len) return null;
+/// Finds `needle` in `haystack`, ignoring case, starting at index 0.
+pub fn indexOfIgnoreCase(haystack: []const u8, needle: []const u8) ?usize {
+ return indexOfIgnoreCasePos(haystack, 0, needle);
+}
+
+/// Finds `needle` in `haystack`, ignoring case, starting at `start_index`.
+/// Uses Boyer-Moore-Horspool algorithm on large inputs; `indexOfIgnoreCasePosLinear` on small inputs.
+pub fn indexOfIgnoreCasePos(haystack: []const u8, start_index: usize, needle: []const u8) ?usize {
+ if (needle.len > haystack.len) return null;
+ if (needle.len == 0) return start_index;
+
+ if (haystack.len < 52 or needle.len <= 4)
+ return indexOfIgnoreCasePosLinear(haystack, start_index, needle);
+
+ var skip_table: [256]usize = undefined;
+ boyerMooreHorspoolPreprocessIgnoreCase(needle, skip_table[0..]);
var i: usize = start_index;
- const end = container.len - substr.len;
+ while (i <= haystack.len - needle.len) {
+ if (eqlIgnoreCase(haystack[i .. i + needle.len], needle)) return i;
+ i += skip_table[toLower(haystack[i + needle.len - 1])];
+ }
+
+ return null;
+}
+
+/// Consider using `indexOfIgnoreCasePos` instead of this, which will automatically use a
+/// more sophisticated algorithm on larger inputs.
+pub fn indexOfIgnoreCasePosLinear(haystack: []const u8, start_index: usize, needle: []const u8) ?usize {
+ var i: usize = start_index;
+ const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
- if (eqlIgnoreCase(container[i .. i + substr.len], substr)) return i;
+ if (eqlIgnoreCase(haystack[i .. i + needle.len], needle)) return i;
}
return null;
}
-/// Finds `substr` in `container`, ignoring case, starting at index 0.
-pub fn indexOfIgnoreCase(container: []const u8, substr: []const u8) ?usize {
- return indexOfIgnoreCasePos(container, 0, substr);
+fn boyerMooreHorspoolPreprocessIgnoreCase(pattern: []const u8, table: *[256]usize) void {
+ for (table) |*c| {
+ c.* = pattern.len;
+ }
+
+ var i: usize = 0;
+ // The last item is intentionally ignored and the skip size will be pattern.len.
+ // This is the standard way Boyer-Moore-Horspool is implemented.
+ while (i < pattern.len - 1) : (i += 1) {
+ table[toLower(pattern[i])] = pattern.len - 1 - i;
+ }
}
test "indexOfIgnoreCase" {
@@ -579,6 +611,9 @@ test "indexOfIgnoreCase" {
try std.testing.expect(indexOfIgnoreCase("foO", "Foo").? == 0);
try std.testing.expect(indexOfIgnoreCase("foo", "fool") == null);
try std.testing.expect(indexOfIgnoreCase("FOO foo", "fOo").? == 0);
+
+ try std.testing.expect(indexOfIgnoreCase("one two three four five six seven eight nine ten eleven", "ThReE fOUr").? == 8);
+ try std.testing.expect(indexOfIgnoreCase("one two three four five six seven eight nine ten eleven", "Two tWo") == null);
}
/// Returns the lexicographical order of two slices. O(n).
diff --git a/lib/std/build/CheckObjectStep.zig b/lib/std/build/CheckObjectStep.zig
index 315bbd9b03..63b361473b 100644
--- a/lib/std/build/CheckObjectStep.zig
+++ b/lib/std/build/CheckObjectStep.zig
@@ -649,6 +649,8 @@ const WasmDumper = struct {
try parseDumpNames(reader, writer, data);
} else if (mem.eql(u8, name, "producers")) {
try parseDumpProducers(reader, writer, data);
+ } else if (mem.eql(u8, name, "target_features")) {
+ try parseDumpFeatures(reader, writer, data);
}
// TODO: Implement parsing and dumping other custom sections (such as relocations)
},
@@ -902,4 +904,19 @@ const WasmDumper = struct {
}
}
}
+
+ fn parseDumpFeatures(reader: anytype, writer: anytype, data: []const u8) !void {
+ const feature_count = try std.leb.readULEB128(u32, reader);
+ try writer.print("features {d}\n", .{feature_count});
+
+ var index: u32 = 0;
+ while (index < feature_count) : (index += 1) {
+ const prefix_byte = try std.leb.readULEB128(u8, reader);
+ const name_length = try std.leb.readULEB128(u32, reader);
+ const feature_name = data[reader.context.pos..][0..name_length];
+ reader.context.pos += name_length;
+
+ try writer.print("{c} {s}\n", .{ prefix_byte, feature_name });
+ }
+ }
};
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 7066b1a154..0a9db10d15 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -16,27 +16,227 @@ const WeakPublicKeyError = crypto.errors.WeakPublicKeyError;
/// Ed25519 (EdDSA) signatures.
pub const Ed25519 = struct {
/// The underlying elliptic curve.
- pub const Curve = @import("edwards25519.zig").Edwards25519;
- /// Length (in bytes) of a seed required to create a key pair.
- pub const seed_length = 32;
- /// Length (in bytes) of a compressed secret key.
- pub const secret_length = 64;
- /// Length (in bytes) of a compressed public key.
- pub const public_length = 32;
- /// Length (in bytes) of a signature.
- pub const signature_length = 64;
+ pub const Curve = std.crypto.ecc.Edwards25519;
+
/// Length (in bytes) of optional random bytes, for non-deterministic signatures.
pub const noise_length = 32;
const CompressedScalar = Curve.scalar.CompressedScalar;
const Scalar = Curve.scalar.Scalar;
+ /// An Ed25519 secret key.
+ pub const SecretKey = struct {
+ /// Length (in bytes) of a raw secret key.
+ pub const encoded_length = 64;
+
+ bytes: [encoded_length]u8,
+
+ /// Return the seed used to generate this secret key.
+ pub fn seed(self: SecretKey) [KeyPair.seed_length]u8 {
+ return self.bytes[0..KeyPair.seed_length].*;
+ }
+
+ /// Return the raw public key bytes corresponding to this secret key.
+ pub fn publicKeyBytes(self: SecretKey) [PublicKey.encoded_length]u8 {
+ return self.bytes[KeyPair.seed_length..].*;
+ }
+
+ /// Create a secret key from raw bytes.
+ pub fn fromBytes(bytes: [encoded_length]u8) !SecretKey {
+ return SecretKey{ .bytes = bytes };
+ }
+
+ /// Return the secret key as raw bytes.
+ pub fn toBytes(sk: SecretKey) [encoded_length]u8 {
+ return sk.bytes;
+ }
+
+ // Return the clamped secret scalar and prefix for this secret key
+ fn scalarAndPrefix(self: SecretKey) struct { scalar: CompressedScalar, prefix: [32]u8 } {
+ var az: [Sha512.digest_length]u8 = undefined;
+ var h = Sha512.init(.{});
+ h.update(&self.seed());
+ h.final(&az);
+
+ var s = az[0..32].*;
+ Curve.scalar.clamp(&s);
+
+ return .{ .scalar = s, .prefix = az[32..].* };
+ }
+ };
+
+ /// A Signer is used to incrementally compute a signature.
+ /// It can be obtained from a `KeyPair`, using the `signer()` function.
+ pub const Signer = struct {
+ h: Sha512,
+ scalar: CompressedScalar,
+ nonce: CompressedScalar,
+ r_bytes: [Curve.encoded_length]u8,
+
+ fn init(scalar: CompressedScalar, nonce: CompressedScalar, public_key: PublicKey) (IdentityElementError || KeyMismatchError || NonCanonicalError || WeakPublicKeyError)!Signer {
+ const r = try Curve.basePoint.mul(nonce);
+ const r_bytes = r.toBytes();
+
+ var t: [64]u8 = undefined;
+ mem.copy(u8, t[0..32], &r_bytes);
+ mem.copy(u8, t[32..], &public_key.bytes);
+ var h = Sha512.init(.{});
+ h.update(&t);
+
+ return Signer{ .h = h, .scalar = scalar, .nonce = nonce, .r_bytes = r_bytes };
+ }
+
+ /// Add new data to the message being signed.
+ pub fn update(self: *Signer, data: []const u8) void {
+ self.h.update(data);
+ }
+
+ /// Compute a signature over the entire message.
+ pub fn finalize(self: *Signer) Signature {
+ var hram64: [Sha512.digest_length]u8 = undefined;
+ self.h.final(&hram64);
+ const hram = Curve.scalar.reduce64(hram64);
+
+ const s = Curve.scalar.mulAdd(hram, self.scalar, self.nonce);
+
+ return Signature{ .r = self.r_bytes, .s = s };
+ }
+ };
+
+ /// An Ed25519 public key.
+ pub const PublicKey = struct {
+ /// Length (in bytes) of a raw public key.
+ pub const encoded_length = 32;
+
+ bytes: [encoded_length]u8,
+
+ /// Create a public key from raw bytes.
+ pub fn fromBytes(bytes: [encoded_length]u8) NonCanonicalError!PublicKey {
+ try Curve.rejectNonCanonical(bytes);
+ return PublicKey{ .bytes = bytes };
+ }
+
+ /// Convert a public key to raw bytes.
+ pub fn toBytes(pk: PublicKey) [encoded_length]u8 {
+ return pk.bytes;
+ }
+
+ fn signWithNonce(public_key: PublicKey, msg: []const u8, scalar: CompressedScalar, nonce: CompressedScalar) (IdentityElementError || NonCanonicalError || KeyMismatchError || WeakPublicKeyError)!Signature {
+ var st = try Signer.init(scalar, nonce, public_key);
+ st.update(msg);
+ return st.finalize();
+ }
+
+ fn computeNonceAndSign(public_key: PublicKey, msg: []const u8, noise: ?[noise_length]u8, scalar: CompressedScalar, prefix: []const u8) (IdentityElementError || NonCanonicalError || KeyMismatchError || WeakPublicKeyError)!Signature {
+ var h = Sha512.init(.{});
+ if (noise) |*z| {
+ h.update(z);
+ }
+ h.update(prefix);
+ h.update(msg);
+ var nonce64: [64]u8 = undefined;
+ h.final(&nonce64);
+
+ const nonce = Curve.scalar.reduce64(nonce64);
+
+ return public_key.signWithNonce(msg, scalar, nonce);
+ }
+ };
+
+ /// A Verifier is used to incrementally verify a signature.
+ /// It can be obtained from a `Signature`, using the `verifier()` function.
+ pub const Verifier = struct {
+ h: Sha512,
+ s: CompressedScalar,
+ a: Curve,
+ expected_r: Curve,
+
+ fn init(sig: Signature, public_key: PublicKey) (NonCanonicalError || EncodingError || IdentityElementError)!Verifier {
+ const r = sig.r;
+ const s = sig.s;
+ try Curve.scalar.rejectNonCanonical(s);
+ const a = try Curve.fromBytes(public_key.bytes);
+ try a.rejectIdentity();
+ try Curve.rejectNonCanonical(r);
+ const expected_r = try Curve.fromBytes(r);
+ try expected_r.rejectIdentity();
+
+ var h = Sha512.init(.{});
+ h.update(&r);
+ h.update(&public_key.bytes);
+
+ return Verifier{ .h = h, .s = s, .a = a, .expected_r = expected_r };
+ }
+
+ /// Add new content to the message to be verified.
+ pub fn update(self: *Verifier, msg: []const u8) void {
+ self.h.update(msg);
+ }
+
+ /// Verify that the signature is valid for the entire message.
+ pub fn verify(self: *Verifier) (SignatureVerificationError || WeakPublicKeyError || IdentityElementError)!void {
+ var hram64: [Sha512.digest_length]u8 = undefined;
+ self.h.final(&hram64);
+ const hram = Curve.scalar.reduce64(hram64);
+
+ const sb_ah = try Curve.basePoint.mulDoubleBasePublic(self.s, self.a.neg(), hram);
+ if (self.expected_r.sub(sb_ah).clearCofactor().rejectIdentity()) |_| {
+ return error.SignatureVerificationFailed;
+ } else |_| {}
+ }
+ };
+
+ /// An Ed25519 signature.
+ pub const Signature = struct {
+ /// Length (in bytes) of a raw signature.
+ pub const encoded_length = Curve.encoded_length + @sizeOf(CompressedScalar);
+
+ /// The R component of an EdDSA signature.
+ r: [Curve.encoded_length]u8,
+ /// The S component of an EdDSA signature.
+ s: CompressedScalar,
+
+ /// Return the raw signature (r, s) in little-endian format.
+ pub fn toBytes(self: Signature) [encoded_length]u8 {
+ var bytes: [encoded_length]u8 = undefined;
+ mem.copy(u8, bytes[0 .. encoded_length / 2], &self.r);
+ mem.copy(u8, bytes[encoded_length / 2 ..], &self.s);
+ return bytes;
+ }
+
+ /// Create a signature from a raw encoding of (r, s).
+ /// EdDSA always assumes little-endian.
+ pub fn fromBytes(bytes: [encoded_length]u8) Signature {
+ return Signature{
+ .r = bytes[0 .. encoded_length / 2].*,
+ .s = bytes[encoded_length / 2 ..].*,
+ };
+ }
+
+ /// Create a Verifier for incremental verification of a signature.
+ pub fn verifier(self: Signature, public_key: PublicKey) (NonCanonicalError || EncodingError || IdentityElementError)!Verifier {
+ return Verifier.init(self, public_key);
+ }
+
+ /// Verify the signature against a message and public key.
+ /// Return IdentityElement or NonCanonical if the public key or signature are not in the expected range,
+ /// or SignatureVerificationError if the signature is invalid for the given message and key.
+ pub fn verify(self: Signature, msg: []const u8, public_key: PublicKey) (IdentityElementError || NonCanonicalError || SignatureVerificationError || EncodingError || WeakPublicKeyError)!void {
+ var st = try Verifier.init(self, public_key);
+ st.update(msg);
+ return st.verify();
+ }
+ };
+
/// An Ed25519 key pair.
pub const KeyPair = struct {
+ /// Length (in bytes) of a seed required to create a key pair.
+ pub const seed_length = noise_length;
+
/// Public part.
- public_key: [public_length]u8,
- /// Secret part. What we expose as a secret key is, under the hood, the concatenation of the seed and the public key.
- secret_key: [secret_length]u8,
+ public_key: PublicKey,
+ /// Secret scalar.
+ secret_key: SecretKey,
/// Derive a key pair from an optional secret seed.
///
@@ -56,120 +256,101 @@ pub const Ed25519 = struct {
var h = Sha512.init(.{});
h.update(&ss);
h.final(&az);
- const p = Curve.basePoint.clampedMul(az[0..32].*) catch return error.IdentityElement;
- var sk: [secret_length]u8 = undefined;
- mem.copy(u8, &sk, &ss);
- const pk = p.toBytes();
- mem.copy(u8, sk[seed_length..], &pk);
-
- return KeyPair{ .public_key = pk, .secret_key = sk };
+ const pk_p = Curve.basePoint.clampedMul(az[0..32].*) catch return error.IdentityElement;
+ const pk_bytes = pk_p.toBytes();
+ var sk_bytes: [SecretKey.encoded_length]u8 = undefined;
+ mem.copy(u8, &sk_bytes, &ss);
+ mem.copy(u8, sk_bytes[seed_length..], &pk_bytes);
+ return KeyPair{
+ .public_key = PublicKey.fromBytes(pk_bytes) catch unreachable,
+ .secret_key = try SecretKey.fromBytes(sk_bytes),
+ };
}
/// Create a KeyPair from a secret key.
- pub fn fromSecretKey(secret_key: [secret_length]u8) KeyPair {
+ pub fn fromSecretKey(secret_key: SecretKey) IdentityElementError!KeyPair {
+ const pk_p = try Curve.fromBytes(secret_key.publicKeyBytes());
+
+ // It is critical for EdDSA to use the correct public key.
+ // In order to enforce this, a SecretKey implicitly includes a copy of the public key.
+ // In Debug mode, we can still afford checking that the public key is correct for extra safety.
+ if (std.builtin.mode == .Debug) {
+ const recomputed_kp = try create(secret_key[0..seed_length].*);
+ debug.assert(recomputed_kp.public_key.p.toBytes() == pk_p.toBytes());
+ }
return KeyPair{
+ .public_key = PublicKey{ .p = pk_p },
.secret_key = secret_key,
- .public_key = secret_key[seed_length..].*,
};
}
- };
- /// Sign a message using a key pair, and optional random noise.
- /// Having noise creates non-standard, non-deterministic signatures,
- /// but has been proven to increase resilience against fault attacks.
- pub fn sign(msg: []const u8, key_pair: KeyPair, noise: ?[noise_length]u8) (IdentityElementError || WeakPublicKeyError || KeyMismatchError)![signature_length]u8 {
- const seed = key_pair.secret_key[0..seed_length];
- const public_key = key_pair.secret_key[seed_length..];
- if (!mem.eql(u8, public_key, &key_pair.public_key)) {
- return error.KeyMismatch;
- }
- var az: [Sha512.digest_length]u8 = undefined;
- var h = Sha512.init(.{});
- h.update(seed);
- h.final(&az);
-
- h = Sha512.init(.{});
- if (noise) |*z| {
- h.update(z);
+ /// Sign a message using the key pair.
+ /// The noise can be null in order to create deterministic signatures.
+ /// If deterministic signatures are not required, the noise should be randomly generated instead.
+ /// This helps defend against fault attacks.
+ pub fn sign(key_pair: KeyPair, msg: []const u8, noise: ?[noise_length]u8) (IdentityElementError || NonCanonicalError || KeyMismatchError || WeakPublicKeyError)!Signature {
+ if (!mem.eql(u8, &key_pair.secret_key.publicKeyBytes(), &key_pair.public_key.toBytes())) {
+ return error.KeyMismatch;
+ }
+ const scalar_and_prefix = key_pair.secret_key.scalarAndPrefix();
+ return key_pair.public_key.computeNonceAndSign(
+ msg,
+ noise,
+ scalar_and_prefix.scalar,
+ &scalar_and_prefix.prefix,
+ );
}
- h.update(az[32..]);
- h.update(msg);
- var nonce64: [64]u8 = undefined;
- h.final(&nonce64);
- const nonce = Curve.scalar.reduce64(nonce64);
- const r = try Curve.basePoint.mul(nonce);
-
- var sig: [signature_length]u8 = undefined;
- mem.copy(u8, sig[0..32], &r.toBytes());
- mem.copy(u8, sig[32..], public_key);
- h = Sha512.init(.{});
- h.update(&sig);
- h.update(msg);
- var hram64: [Sha512.digest_length]u8 = undefined;
- h.final(&hram64);
- const hram = Curve.scalar.reduce64(hram64);
-
- var x = az[0..32];
- Curve.scalar.clamp(x);
- const s = Curve.scalar.mulAdd(hram, x.*, nonce);
- mem.copy(u8, sig[32..], s[0..]);
- return sig;
- }
- /// Verify an Ed25519 signature given a message and a public key.
- /// Returns error.SignatureVerificationFailed is the signature verification failed.
- pub fn verify(sig: [signature_length]u8, msg: []const u8, public_key: [public_length]u8) (SignatureVerificationError || WeakPublicKeyError || EncodingError || NonCanonicalError || IdentityElementError)!void {
- const r = sig[0..32];
- const s = sig[32..64];
- try Curve.scalar.rejectNonCanonical(s.*);
- try Curve.rejectNonCanonical(public_key);
- const a = try Curve.fromBytes(public_key);
- try a.rejectIdentity();
- try Curve.rejectNonCanonical(r.*);
- const expected_r = try Curve.fromBytes(r.*);
- try expected_r.rejectIdentity();
-
- var h = Sha512.init(.{});
- h.update(r);
- h.update(&public_key);
- h.update(msg);
- var hram64: [Sha512.digest_length]u8 = undefined;
- h.final(&hram64);
- const hram = Curve.scalar.reduce64(hram64);
-
- const sb_ah = try Curve.basePoint.mulDoubleBasePublic(s.*, a.neg(), hram);
- if (expected_r.sub(sb_ah).clearCofactor().rejectIdentity()) |_| {
- return error.SignatureVerificationFailed;
- } else |_| {}
- }
+ /// Create a Signer, that can be used for incremental signing.
+ /// Note that the signature is not deterministic.
+ /// The noise parameter, if set, should be something unique for each message,
+ /// such as a random nonce, or a counter.
+ pub fn signer(key_pair: KeyPair, noise: ?[noise_length]u8) (IdentityElementError || KeyMismatchError || NonCanonicalError || WeakPublicKeyError)!Signer {
+ if (!mem.eql(u8, &key_pair.secret_key.publicKeyBytes(), &key_pair.public_key.toBytes())) {
+ return error.KeyMismatch;
+ }
+ const scalar_and_prefix = key_pair.secret_key.scalarAndPrefix();
+ var h = Sha512.init(.{});
+ h.update(&scalar_and_prefix.prefix);
+ var noise2: [noise_length]u8 = undefined;
+ crypto.random.bytes(&noise2);
+ if (noise) |*z| {
+ h.update(z);
+ }
+ var nonce64: [64]u8 = undefined;
+ h.final(&nonce64);
+ const nonce = Curve.scalar.reduce64(nonce64);
+
+ return Signer.init(scalar_and_prefix.scalar, nonce, key_pair.public_key);
+ }
+ };
/// A (signature, message, public_key) tuple for batch verification
pub const BatchElement = struct {
- sig: [signature_length]u8,
+ sig: Signature,
msg: []const u8,
- public_key: [public_length]u8,
+ public_key: PublicKey,
};
/// Verify several signatures in a single operation, much faster than verifying signatures one-by-one
pub fn verifyBatch(comptime count: usize, signature_batch: [count]BatchElement) (SignatureVerificationError || IdentityElementError || WeakPublicKeyError || EncodingError || NonCanonicalError)!void {
- var r_batch: [count][32]u8 = undefined;
- var s_batch: [count][32]u8 = undefined;
+ var r_batch: [count]CompressedScalar = undefined;
+ var s_batch: [count]CompressedScalar = undefined;
var a_batch: [count]Curve = undefined;
var expected_r_batch: [count]Curve = undefined;
for (signature_batch) |signature, i| {
- const r = signature.sig[0..32];
- const s = signature.sig[32..64];
- try Curve.scalar.rejectNonCanonical(s.*);
- try Curve.rejectNonCanonical(signature.public_key);
- const a = try Curve.fromBytes(signature.public_key);
+ const r = signature.sig.r;
+ const s = signature.sig.s;
+ try Curve.scalar.rejectNonCanonical(s);
+ const a = try Curve.fromBytes(signature.public_key.bytes);
try a.rejectIdentity();
- try Curve.rejectNonCanonical(r.*);
- const expected_r = try Curve.fromBytes(r.*);
+ try Curve.rejectNonCanonical(r);
+ const expected_r = try Curve.fromBytes(r);
try expected_r.rejectIdentity();
expected_r_batch[i] = expected_r;
- r_batch[i] = r.*;
- s_batch[i] = s.*;
+ r_batch[i] = r;
+ s_batch[i] = s;
a_batch[i] = a;
}
@@ -177,7 +358,7 @@ pub const Ed25519 = struct {
for (signature_batch) |signature, i| {
var h = Sha512.init(.{});
h.update(&r_batch[i]);
- h.update(&signature.public_key);
+ h.update(&signature.public_key.bytes);
h.update(signature.msg);
var hram64: [Sha512.digest_length]u8 = undefined;
h.final(&hram64);
@@ -212,7 +393,7 @@ pub const Ed25519 = struct {
}
/// Ed25519 signatures with key blinding.
- pub const BlindKeySignatures = struct {
+ pub const key_blinding = struct {
/// Length (in bytes) of a blinding seed.
pub const blind_seed_length = 32;
@@ -220,81 +401,69 @@ pub const Ed25519 = struct {
pub const BlindSecretKey = struct {
prefix: [64]u8,
blind_scalar: CompressedScalar,
- blind_public_key: CompressedScalar,
+ blind_public_key: BlindPublicKey,
+ };
+
+ /// A blind public key.
+ pub const BlindPublicKey = struct {
+ /// Public key equivalent, that can used for signature verification.
+ key: PublicKey,
+
+ /// Recover a public key from a blind version of it.
+ pub fn unblind(blind_public_key: BlindPublicKey, blind_seed: [blind_seed_length]u8, ctx: []const u8) (IdentityElementError || NonCanonicalError || EncodingError || WeakPublicKeyError)!PublicKey {
+ const blind_h = blindCtx(blind_seed, ctx);
+ const inv_blind_factor = Scalar.fromBytes(blind_h[0..32].*).invert().toBytes();
+ const pk_p = try (try Curve.fromBytes(blind_public_key.key.bytes)).mul(inv_blind_factor);
+ return PublicKey.fromBytes(pk_p.toBytes());
+ }
};
/// A blind key pair.
pub const BlindKeyPair = struct {
- blind_public_key: [public_length]u8,
+ blind_public_key: BlindPublicKey,
blind_secret_key: BlindSecretKey,
- };
- /// Blind an existing key pair with a blinding seed and a context.
- pub fn blind(key_pair: Ed25519.KeyPair, blind_seed: [blind_seed_length]u8, ctx: []const u8) !BlindKeyPair {
- var h: [Sha512.digest_length]u8 = undefined;
- Sha512.hash(key_pair.secret_key[0..32], &h, .{});
- Curve.scalar.clamp(h[0..32]);
- const scalar = Curve.scalar.reduce(h[0..32].*);
-
- const blind_h = blindCtx(blind_seed, ctx);
- const blind_factor = Curve.scalar.reduce(blind_h[0..32].*);
-
- const blind_scalar = Curve.scalar.mul(scalar, blind_factor);
- const blind_public_key = (Curve.basePoint.mul(blind_scalar) catch return error.IdentityElement).toBytes();
-
- var prefix: [64]u8 = undefined;
- mem.copy(u8, prefix[0..32], h[32..64]);
- mem.copy(u8, prefix[32..64], blind_h[32..64]);
-
- const blind_secret_key = .{
- .prefix = prefix,
- .blind_scalar = blind_scalar,
- .blind_public_key = blind_public_key,
- };
- return BlindKeyPair{
- .blind_public_key = blind_public_key,
- .blind_secret_key = blind_secret_key,
- };
- }
-
- /// Recover a public key from a blind version of it.
- pub fn unblindPublicKey(blind_public_key: [public_length]u8, blind_seed: [blind_seed_length]u8, ctx: []const u8) ![public_length]u8 {
- const blind_h = blindCtx(blind_seed, ctx);
- const inv_blind_factor = Scalar.fromBytes(blind_h[0..32].*).invert().toBytes();
- const public_key = try (try Curve.fromBytes(blind_public_key)).mul(inv_blind_factor);
- return public_key.toBytes();
- }
-
- /// Sign a message using a blind key pair, and optional random noise.
- /// Having noise creates non-standard, non-deterministic signatures,
- /// but has been proven to increase resilience against fault attacks.
- pub fn sign(msg: []const u8, key_pair: BlindKeyPair, noise: ?[noise_length]u8) ![signature_length]u8 {
- var h = Sha512.init(.{});
- if (noise) |*z| {
- h.update(z);
+ /// Create an blind key pair from an existing key pair, a blinding seed and a context.
+ pub fn init(key_pair: Ed25519.KeyPair, blind_seed: [blind_seed_length]u8, ctx: []const u8) (NonCanonicalError || IdentityElementError)!BlindKeyPair {
+ var h: [Sha512.digest_length]u8 = undefined;
+ Sha512.hash(&key_pair.secret_key.seed(), &h, .{});
+ Curve.scalar.clamp(h[0..32]);
+ const scalar = Curve.scalar.reduce(h[0..32].*);
+
+ const blind_h = blindCtx(blind_seed, ctx);
+ const blind_factor = Curve.scalar.reduce(blind_h[0..32].*);
+
+ const blind_scalar = Curve.scalar.mul(scalar, blind_factor);
+ const blind_public_key = BlindPublicKey{
+ .key = try PublicKey.fromBytes((Curve.basePoint.mul(blind_scalar) catch return error.IdentityElement).toBytes()),
+ };
+
+ var prefix: [64]u8 = undefined;
+ mem.copy(u8, prefix[0..32], h[32..64]);
+ mem.copy(u8, prefix[32..64], blind_h[32..64]);
+
+ const blind_secret_key = BlindSecretKey{
+ .prefix = prefix,
+ .blind_scalar = blind_scalar,
+ .blind_public_key = blind_public_key,
+ };
+ return BlindKeyPair{
+ .blind_public_key = blind_public_key,
+ .blind_secret_key = blind_secret_key,
+ };
}
- h.update(&key_pair.blind_secret_key.prefix);
- h.update(msg);
- var nonce64: [64]u8 = undefined;
- h.final(&nonce64);
- const nonce = Curve.scalar.reduce64(nonce64);
- const r = try Curve.basePoint.mul(nonce);
+ /// Sign a message using a blind key pair, and optional random noise.
+ /// Having noise creates non-standard, non-deterministic signatures,
+ /// but has been proven to increase resilience against fault attacks.
+ pub fn sign(key_pair: BlindKeyPair, msg: []const u8, noise: ?[noise_length]u8) (IdentityElementError || KeyMismatchError || NonCanonicalError || WeakPublicKeyError)!Signature {
+ const scalar = key_pair.blind_secret_key.blind_scalar;
+ const prefix = key_pair.blind_secret_key.prefix;
- var sig: [signature_length]u8 = undefined;
- mem.copy(u8, sig[0..32], &r.toBytes());
- mem.copy(u8, sig[32..], &key_pair.blind_public_key);
- h = Sha512.init(.{});
- h.update(&sig);
- h.update(msg);
- var hram64: [Sha512.digest_length]u8 = undefined;
- h.final(&hram64);
- const hram = Curve.scalar.reduce64(hram64);
-
- const s = Curve.scalar.mulAdd(hram, key_pair.blind_secret_key.blind_scalar, nonce);
- mem.copy(u8, sig[32..], s[0..]);
- return sig;
- }
+ return (try PublicKey.fromBytes(key_pair.blind_public_key.key.bytes))
+ .computeNonceAndSign(msg, noise, scalar, &prefix);
+ }
+ };
/// Compute a blind context from a blinding seed and a context.
fn blindCtx(blind_seed: [blind_seed_length]u8, ctx: []const u8) [Sha512.digest_length]u8 {
@@ -306,7 +475,13 @@ pub const Ed25519 = struct {
hx.final(&blind_h);
return blind_h;
}
+
+ pub const sign = @compileError("deprecated; use BlindKeyPair.sign instead");
+ pub const unblindPublicKey = @compileError("deprecated; use BlindPublicKey.unblind instead");
};
+
+ pub const sign = @compileError("deprecated; use KeyPair.sign instead");
+ pub const verify = @compileError("deprecated; use PublicKey.verify instead");
};
test "ed25519 key pair creation" {
@@ -314,8 +489,8 @@ test "ed25519 key pair creation" {
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
var buf: [256]u8 = undefined;
- try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key)}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
- try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key)}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key.toBytes())}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key.toBytes())}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "ed25519 signature" {
@@ -323,11 +498,11 @@ test "ed25519 signature" {
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
- const sig = try Ed25519.sign("test", key_pair, null);
+ const sig = try key_pair.sign("test", null);
var buf: [128]u8 = undefined;
- try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&sig)}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
- try Ed25519.verify(sig, "test", key_pair.public_key);
- try std.testing.expectError(error.SignatureVerificationFailed, Ed25519.verify(sig, "TEST", key_pair.public_key));
+ try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&sig.toBytes())}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
+ try sig.verify("test", key_pair.public_key);
+ try std.testing.expectError(error.SignatureVerificationFailed, sig.verify("TEST", key_pair.public_key));
}
test "ed25519 batch verification" {
@@ -338,8 +513,8 @@ test "ed25519 batch verification" {
var msg2: [32]u8 = undefined;
crypto.random.bytes(&msg1);
crypto.random.bytes(&msg2);
- const sig1 = try Ed25519.sign(&msg1, key_pair, null);
- const sig2 = try Ed25519.sign(&msg2, key_pair, null);
+ const sig1 = try key_pair.sign(&msg1, null);
+ const sig2 = try key_pair.sign(&msg2, null);
var signature_batch = [_]Ed25519.BatchElement{
Ed25519.BatchElement{
.sig = sig1,
@@ -355,9 +530,7 @@ test "ed25519 batch verification" {
try Ed25519.verifyBatch(2, signature_batch);
signature_batch[1].sig = sig1;
- // TODO https://github.com/ziglang/zig/issues/12240
- const sig_len = signature_batch.len;
- try std.testing.expectError(error.SignatureVerificationFailed, Ed25519.verifyBatch(sig_len, signature_batch));
+ try std.testing.expectError(error.SignatureVerificationFailed, Ed25519.verifyBatch(signature_batch.len, signature_batch));
}
}
@@ -446,20 +619,25 @@ test "ed25519 test vectors" {
for (entries) |entry| {
var msg: [entry.msg_hex.len / 2]u8 = undefined;
_ = try fmt.hexToBytes(&msg, entry.msg_hex);
- var public_key: [32]u8 = undefined;
- _ = try fmt.hexToBytes(&public_key, entry.public_key_hex);
- var sig: [64]u8 = undefined;
- _ = try fmt.hexToBytes(&sig, entry.sig_hex);
+ var public_key_bytes: [32]u8 = undefined;
+ _ = try fmt.hexToBytes(&public_key_bytes, entry.public_key_hex);
+ const public_key = Ed25519.PublicKey.fromBytes(public_key_bytes) catch |err| {
+ try std.testing.expectEqual(entry.expected.?, err);
+ continue;
+ };
+ var sig_bytes: [64]u8 = undefined;
+ _ = try fmt.hexToBytes(&sig_bytes, entry.sig_hex);
+ const sig = Ed25519.Signature.fromBytes(sig_bytes);
if (entry.expected) |error_type| {
- try std.testing.expectError(error_type, Ed25519.verify(sig, &msg, public_key));
+ try std.testing.expectError(error_type, sig.verify(&msg, public_key));
} else {
- try Ed25519.verify(sig, &msg, public_key);
+ try sig.verify(&msg, public_key);
}
}
}
test "ed25519 with blind keys" {
- const BlindKeySignatures = Ed25519.BlindKeySignatures;
+ const BlindKeyPair = Ed25519.key_blinding.BlindKeyPair;
// Create a standard Ed25519 key pair
const kp = try Ed25519.KeyPair.create(null);
@@ -469,14 +647,30 @@ test "ed25519 with blind keys" {
crypto.random.bytes(&blind);
// Blind the key pair
- const blind_kp = try BlindKeySignatures.blind(kp, blind, "ctx");
+ const blind_kp = try BlindKeyPair.init(kp, blind, "ctx");
// Sign a message and check that it can be verified with the blind public key
const msg = "test";
- const sig = try BlindKeySignatures.sign(msg, blind_kp, null);
- try Ed25519.verify(sig, msg, blind_kp.blind_public_key);
+ const sig = try blind_kp.sign(msg, null);
+ try sig.verify(msg, blind_kp.blind_public_key.key);
// Unblind the public key
- const pk = try BlindKeySignatures.unblindPublicKey(blind_kp.blind_public_key, blind, "ctx");
- try std.testing.expectEqualSlices(u8, &pk, &kp.public_key);
+ const pk = try blind_kp.blind_public_key.unblind(blind, "ctx");
+ try std.testing.expectEqualSlices(u8, &pk.toBytes(), &kp.public_key.toBytes());
+}
+
+test "ed25519 signatures with streaming" {
+ const kp = try Ed25519.KeyPair.create(null);
+
+ var signer = try kp.signer(null);
+ signer.update("mes");
+ signer.update("sage");
+ const sig = signer.finalize();
+
+ try sig.verify("message", kp.public_key);
+
+ var verifier = try sig.verifier(kp.public_key);
+ verifier.update("mess");
+ verifier.update("age");
+ try verifier.verify();
}
diff --git a/lib/std/crypto/25519/x25519.zig b/lib/std/crypto/25519/x25519.zig
index d935513ab6..22bcf00136 100644
--- a/lib/std/crypto/25519/x25519.zig
+++ b/lib/std/crypto/25519/x25519.zig
@@ -44,9 +44,9 @@ pub const X25519 = struct {
/// Create a key pair from an Ed25519 key pair
pub fn fromEd25519(ed25519_key_pair: crypto.sign.Ed25519.KeyPair) (IdentityElementError || EncodingError)!KeyPair {
- const seed = ed25519_key_pair.secret_key[0..32];
+ const seed = ed25519_key_pair.secret_key.seed();
var az: [Sha512.digest_length]u8 = undefined;
- Sha512.hash(seed, &az, .{});
+ Sha512.hash(&seed, &az, .{});
var sk = az[0..32].*;
Curve.scalar.clamp(&sk);
const pk = try publicKeyFromEd25519(ed25519_key_pair.public_key);
@@ -64,8 +64,8 @@ pub const X25519 = struct {
}
/// Compute the X25519 equivalent to an Ed25519 public eky.
- pub fn publicKeyFromEd25519(ed25519_public_key: [crypto.sign.Ed25519.public_length]u8) (IdentityElementError || EncodingError)![public_length]u8 {
- const pk_ed = try crypto.ecc.Edwards25519.fromBytes(ed25519_public_key);
+ pub fn publicKeyFromEd25519(ed25519_public_key: crypto.sign.Ed25519.PublicKey) (IdentityElementError || EncodingError)![public_length]u8 {
+ const pk_ed = try crypto.ecc.Edwards25519.fromBytes(ed25519_public_key.bytes);
const pk = try Curve.fromEdwards25519(pk_ed);
return pk.toBytes();
}
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index d7a0e0fb80..f924ab681c 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const base64 = std.base64;
const crypto = std.crypto;
const debug = std.debug;
const fmt = std.fmt;
@@ -533,71 +534,10 @@ const crypt_format = struct {
pub const prefix = "$2";
// bcrypt has its own variant of base64, with its own alphabet and no padding
- const Codec = struct {
- const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
-
- fn encode(b64: []u8, bin: []const u8) void {
- var i: usize = 0;
- var j: usize = 0;
- while (i < bin.len) {
- var c1 = bin[i];
- i += 1;
- b64[j] = alphabet[c1 >> 2];
- j += 1;
- c1 = (c1 & 3) << 4;
- if (i >= bin.len) {
- b64[j] = alphabet[c1];
- j += 1;
- break;
- }
- var c2 = bin[i];
- i += 1;
- c1 |= (c2 >> 4) & 0x0f;
- b64[j] = alphabet[c1];
- j += 1;
- c1 = (c2 & 0x0f) << 2;
- if (i >= bin.len) {
- b64[j] = alphabet[c1];
- j += 1;
- break;
- }
- c2 = bin[i];
- i += 1;
- c1 |= (c2 >> 6) & 3;
- b64[j] = alphabet[c1];
- b64[j + 1] = alphabet[c2 & 0x3f];
- j += 2;
- }
- debug.assert(j == b64.len);
- }
-
- fn decode(bin: []u8, b64: []const u8) EncodingError!void {
- var i: usize = 0;
- var j: usize = 0;
- while (j < bin.len) {
- const c1 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i]) orelse
- return EncodingError.InvalidEncoding);
- const c2 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 1]) orelse
- return EncodingError.InvalidEncoding);
- bin[j] = (c1 << 2) | ((c2 & 0x30) >> 4);
- j += 1;
- if (j >= bin.len) {
- break;
- }
- const c3 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 2]) orelse
- return EncodingError.InvalidEncoding);
- bin[j] = ((c2 & 0x0f) << 4) | ((c3 & 0x3c) >> 2);
- j += 1;
- if (j >= bin.len) {
- break;
- }
- const c4 = @intCast(u8, mem.indexOfScalar(u8, alphabet, b64[i + 3]) orelse
- return EncodingError.InvalidEncoding);
- bin[j] = ((c3 & 0x03) << 6) | c4;
- j += 1;
- i += 4;
- }
- }
+ const bcrypt_alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789".*;
+ const Codec = struct { Encoder: base64.Base64Encoder, Decoder: base64.Base64Decoder }{
+ .Encoder = base64.Base64Encoder.init(bcrypt_alphabet, null),
+ .Decoder = base64.Base64Decoder.init(bcrypt_alphabet, null),
};
fn strHashInternal(
@@ -608,10 +548,10 @@ const crypt_format = struct {
var dk = bcrypt(password, salt, params);
var salt_str: [salt_str_length]u8 = undefined;
- Codec.encode(salt_str[0..], salt[0..]);
+ _ = Codec.Encoder.encode(salt_str[0..], salt[0..]);
var ct_str: [ct_str_length]u8 = undefined;
- Codec.encode(ct_str[0..], dk[0..]);
+ _ = Codec.Encoder.encode(ct_str[0..], dk[0..]);
var s_buf: [hash_length]u8 = undefined;
const s = fmt.bufPrint(
@@ -709,7 +649,7 @@ const CryptFormatHasher = struct {
const salt_str = str[7..][0..salt_str_length];
var salt: [salt_length]u8 = undefined;
- try crypt_format.Codec.decode(salt[0..], salt_str[0..]);
+ crypt_format.Codec.Decoder.decode(salt[0..], salt_str[0..]) catch return HasherError.InvalidEncoding;
const wanted_s = crypt_format.strHashInternal(password, salt, .{ .rounds_log = rounds_log });
if (!mem.eql(u8, wanted_s[0..], str[0..])) return HasherError.PasswordVerificationFailed;
@@ -764,9 +704,9 @@ test "bcrypt codec" {
var salt: [salt_length]u8 = undefined;
crypto.random.bytes(&salt);
var salt_str: [salt_str_length]u8 = undefined;
- crypt_format.Codec.encode(salt_str[0..], salt[0..]);
+ _ = crypt_format.Codec.Encoder.encode(salt_str[0..], salt[0..]);
var salt2: [salt_length]u8 = undefined;
- try crypt_format.Codec.decode(salt2[0..], salt_str[0..]);
+ try crypt_format.Codec.Decoder.decode(salt2[0..], salt_str[0..]);
try testing.expectEqualSlices(u8, salt[0..], salt2[0..]);
}
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 28d283048a..d4bf3d2633 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -130,7 +130,7 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count
{
var i: usize = 0;
while (i < signatures_count) : (i += 1) {
- const sig = try Signature.sign(&msg, key_pair, null);
+ const sig = try key_pair.sign(&msg, null);
mem.doNotOptimizeAway(&sig);
}
}
@@ -147,14 +147,14 @@ const signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed25519, .na
pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
const msg = [_]u8{0} ** 64;
const key_pair = try Signature.KeyPair.create(null);
- const sig = try Signature.sign(&msg, key_pair, null);
+ const sig = try key_pair.sign(&msg, null);
var timer = try Timer.start();
const start = timer.lap();
{
var i: usize = 0;
while (i < signatures_count) : (i += 1) {
- try Signature.verify(sig, &msg, key_pair.public_key);
+ try sig.verify(&msg, key_pair.public_key);
mem.doNotOptimizeAway(&sig);
}
}
@@ -171,7 +171,7 @@ const batch_signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed2551
pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
const msg = [_]u8{0} ** 64;
const key_pair = try Signature.KeyPair.create(null);
- const sig = try Signature.sign(&msg, key_pair, null);
+ const sig = try key_pair.sign(&msg, null);
var batch: [64]Signature.BatchElement = undefined;
for (batch) |*element| {
@@ -301,9 +301,13 @@ const CryptoPwhash = struct {
params: *const anyopaque,
name: []const u8,
};
-const bcrypt_params = crypto.pwhash.bcrypt.Params{ .rounds_log = 12 };
+const bcrypt_params = crypto.pwhash.bcrypt.Params{ .rounds_log = 8 };
const pwhashes = [_]CryptoPwhash{
- .{ .ty = crypto.pwhash.bcrypt, .params = &bcrypt_params, .name = "bcrypt" },
+ .{
+ .ty = crypto.pwhash.bcrypt,
+ .params = &bcrypt_params,
+ .name = "bcrypt",
+ },
.{
.ty = crypto.pwhash.scrypt,
.params = &crypto.pwhash.scrypt.Params.interactive,
@@ -323,7 +327,11 @@ fn benchmarkPwhash(
comptime count: comptime_int,
) !f64 {
const password = "testpass" ** 2;
- const opts = .{ .allocator = allocator, .params = @ptrCast(*const ty.Params, params).*, .encoding = .phc };
+ const opts = .{
+ .allocator = allocator,
+ .params = @ptrCast(*const ty.Params, @alignCast(std.meta.alignment(ty.Params), params)).*,
+ .encoding = .phc,
+ };
var buf: [256]u8 = undefined;
var timer = try Timer.start();
diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig
index 3360d7bb87..a0faa3da54 100644
--- a/lib/std/crypto/ecdsa.zig
+++ b/lib/std/crypto/ecdsa.zig
@@ -84,33 +84,18 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// The S component of an ECDSA signature.
s: Curve.scalar.CompressedScalar,
+ /// Create a Verifier for incremental verification of a signature.
+ pub fn verifier(self: Signature, public_key: PublicKey) (NonCanonicalError || EncodingError || IdentityElementError)!Verifier {
+ return Verifier.init(self, public_key);
+ }
+
/// Verify the signature against a message and public key.
/// Return IdentityElement or NonCanonical if the public key or signature are not in the expected range,
/// or SignatureVerificationError if the signature is invalid for the given message and key.
pub fn verify(self: Signature, msg: []const u8, public_key: PublicKey) (IdentityElementError || NonCanonicalError || SignatureVerificationError)!void {
- const r = try Curve.scalar.Scalar.fromBytes(self.r, .Big);
- const s = try Curve.scalar.Scalar.fromBytes(self.s, .Big);
- if (r.isZero() or s.isZero()) return error.IdentityElement;
-
- var h: [Hash.digest_length]u8 = undefined;
- Hash.hash(msg, &h, .{});
-
- const ht = Curve.scalar.encoded_length;
- const z = reduceToScalar(ht, h[0..ht].*);
- if (z.isZero()) {
- return error.SignatureVerificationFailed;
- }
-
- const s_inv = s.invert();
- const v1 = z.mul(s_inv).toBytes(.Little);
- const v2 = r.mul(s_inv).toBytes(.Little);
- const v1g = try Curve.basePoint.mulPublic(v1, .Little);
- const v2pk = try public_key.p.mulPublic(v2, .Little);
- const vxs = v1g.add(v2pk).affineCoordinates().x.toBytes(.Big);
- const vr = reduceToScalar(Curve.Fe.encoded_length, vxs);
- if (!r.equivalent(vr)) {
- return error.SignatureVerificationFailed;
- }
+ var st = try Verifier.init(self, public_key);
+ st.update(msg);
+ return st.verify();
}
/// Return the raw signature (r, s) in big-endian format.
@@ -190,6 +175,104 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
}
};
+ /// A Signer is used to incrementally compute a signature.
+ /// It can be obtained from a `KeyPair`, using the `signer()` function.
+ pub const Signer = struct {
+ h: Hash,
+ secret_key: SecretKey,
+ noise: ?[noise_length]u8,
+
+ fn init(secret_key: SecretKey, noise: ?[noise_length]u8) !Signer {
+ return Signer{
+ .h = Hash.init(.{}),
+ .secret_key = secret_key,
+ .noise = noise,
+ };
+ }
+
+ /// Add new data to the message being signed.
+ pub fn update(self: *Signer, data: []const u8) void {
+ self.h.update(data);
+ }
+
+ /// Compute a signature over the entire message.
+ pub fn finalize(self: *Signer) (IdentityElementError || NonCanonicalError)!Signature {
+ const scalar_encoded_length = Curve.scalar.encoded_length;
+ const h_len = @max(Hash.digest_length, scalar_encoded_length);
+ var h: [h_len]u8 = [_]u8{0} ** h_len;
+ var h_slice = h[h_len - Hash.digest_length .. h_len];
+ self.h.final(h_slice);
+
+ std.debug.assert(h.len >= scalar_encoded_length);
+ const z = reduceToScalar(scalar_encoded_length, h[0..scalar_encoded_length].*);
+
+ const k = deterministicScalar(h_slice.*, self.secret_key.bytes, self.noise);
+
+ const p = try Curve.basePoint.mul(k.toBytes(.Big), .Big);
+ const xs = p.affineCoordinates().x.toBytes(.Big);
+ const r = reduceToScalar(Curve.Fe.encoded_length, xs);
+ if (r.isZero()) return error.IdentityElement;
+
+ const k_inv = k.invert();
+ const zrs = z.add(r.mul(try Curve.scalar.Scalar.fromBytes(self.secret_key.bytes, .Big)));
+ const s = k_inv.mul(zrs);
+ if (s.isZero()) return error.IdentityElement;
+
+ return Signature{ .r = r.toBytes(.Big), .s = s.toBytes(.Big) };
+ }
+ };
+
+ /// A Verifier is used to incrementally verify a signature.
+ /// It can be obtained from a `Signature`, using the `verifier()` function.
+ pub const Verifier = struct {
+ h: Hash,
+ r: Curve.scalar.Scalar,
+ s: Curve.scalar.Scalar,
+ public_key: PublicKey,
+
+ fn init(sig: Signature, public_key: PublicKey) (IdentityElementError || NonCanonicalError)!Verifier {
+ const r = try Curve.scalar.Scalar.fromBytes(sig.r, .Big);
+ const s = try Curve.scalar.Scalar.fromBytes(sig.s, .Big);
+ if (r.isZero() or s.isZero()) return error.IdentityElement;
+
+ return Verifier{
+ .h = Hash.init(.{}),
+ .r = r,
+ .s = s,
+ .public_key = public_key,
+ };
+ }
+
+ /// Add new content to the message to be verified.
+ pub fn update(self: *Verifier, data: []const u8) void {
+ self.h.update(data);
+ }
+
+ /// Verify that the signature is valid for the entire message.
+ pub fn verify(self: *Verifier) (IdentityElementError || SignatureVerificationError)!void {
+ const ht = Curve.scalar.encoded_length;
+ const h_len = @max(Hash.digest_length, ht);
+ var h: [h_len]u8 = [_]u8{0} ** h_len;
+ self.h.final(h[h_len - Hash.digest_length .. h_len]);
+
+ const z = reduceToScalar(ht, h[0..ht].*);
+ if (z.isZero()) {
+ return error.SignatureVerificationFailed;
+ }
+
+ const s_inv = self.s.invert();
+ const v1 = z.mul(s_inv).toBytes(.Little);
+ const v2 = self.r.mul(s_inv).toBytes(.Little);
+ const v1g = try Curve.basePoint.mulPublic(v1, .Little);
+ const v2pk = try self.public_key.p.mulPublic(v2, .Little);
+ const vxs = v1g.add(v2pk).affineCoordinates().x.toBytes(.Big);
+ const vr = reduceToScalar(Curve.Fe.encoded_length, vxs);
+ if (!self.r.equivalent(vr)) {
+ return error.SignatureVerificationFailed;
+ }
+ }
+ };
+
/// An ECDSA key pair.
pub const KeyPair = struct {
/// Length (in bytes) of a seed required to create a key pair.
@@ -226,28 +309,14 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// If deterministic signatures are not required, the noise should be randomly generated instead.
/// This helps defend against fault attacks.
pub fn sign(key_pair: KeyPair, msg: []const u8, noise: ?[noise_length]u8) (IdentityElementError || NonCanonicalError)!Signature {
- const secret_key = key_pair.secret_key;
-
- var h: [Hash.digest_length]u8 = undefined;
- Hash.hash(msg, &h, .{});
-
- const scalar_encoded_length = Curve.scalar.encoded_length;
- std.debug.assert(h.len >= scalar_encoded_length);
- const z = reduceToScalar(scalar_encoded_length, h[0..scalar_encoded_length].*);
-
- const k = deterministicScalar(h, secret_key.bytes, noise);
-
- const p = try Curve.basePoint.mul(k.toBytes(.Big), .Big);
- const xs = p.affineCoordinates().x.toBytes(.Big);
- const r = reduceToScalar(Curve.Fe.encoded_length, xs);
- if (r.isZero()) return error.IdentityElement;
-
- const k_inv = k.invert();
- const zrs = z.add(r.mul(try Curve.scalar.Scalar.fromBytes(secret_key.bytes, .Big)));
- const s = k_inv.mul(zrs);
- if (s.isZero()) return error.IdentityElement;
+ var st = try key_pair.signer(noise);
+ st.update(msg);
+ return st.finalize();
+ }
- return Signature{ .r = r.toBytes(.Big), .s = s.toBytes(.Big) };
+ /// Create a Signer, that can be used for incremental signature verification.
+ pub fn signer(key_pair: KeyPair, noise: ?[noise_length]u8) !Signer {
+ return Signer.init(key_pair.secret_key, noise);
}
};
@@ -268,6 +337,7 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
fn deterministicScalar(h: [Hash.digest_length]u8, secret_key: Curve.scalar.CompressedScalar, noise: ?[noise_length]u8) Curve.scalar.Scalar {
var k = [_]u8{0x00} ** h.len;
var m = [_]u8{0x00} ** (h.len + 1 + noise_length + secret_key.len + h.len);
+ var t = [_]u8{0x00} ** Curve.scalar.encoded_length;
const m_v = m[0..h.len];
const m_i = &m[m_v.len];
const m_z = m[m_v.len + 1 ..][0..noise_length];
@@ -286,8 +356,13 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
Hmac.create(&k, &m, &k);
Hmac.create(m_v, m_v, &k);
while (true) {
- Hmac.create(m_v, m_v, &k);
- if (Curve.scalar.Scalar.fromBytes(m_v[0..Curve.scalar.encoded_length].*, .Big)) |s| return s else |_| {}
+ var t_off: usize = 0;
+ while (t_off < t.len) : (t_off += m_v.len) {
+ const t_end = @min(t_off + m_v.len, t.len);
+ Hmac.create(m_v, m_v, &k);
+ std.mem.copy(u8, t[t_off..t_end], m_v[0 .. t_end - t_off]);
+ }
+ if (Curve.scalar.Scalar.fromBytes(t, .Big)) |s| return s else |_| {}
mem.copy(u8, m_v, m_v);
m_i.* = 0x00;
Hmac.create(&k, m[0 .. m_v.len + 1], &k);
@@ -325,6 +400,55 @@ test "ECDSA - Basic operations over Secp256k1" {
try sig2.verify(msg, kp.public_key);
}
+test "ECDSA - Basic operations over EcdsaP384Sha256" {
+ const Scheme = Ecdsa(crypto.ecc.P384, crypto.hash.sha2.Sha256);
+ const kp = try Scheme.KeyPair.create(null);
+ const msg = "test";
+
+ var noise: [Scheme.noise_length]u8 = undefined;
+ crypto.random.bytes(&noise);
+ const sig = try kp.sign(msg, noise);
+ try sig.verify(msg, kp.public_key);
+
+ const sig2 = try kp.sign(msg, null);
+ try sig2.verify(msg, kp.public_key);
+}
+
+test "ECDSA - Verifying a existing signature with EcdsaP384Sha256" {
+ const Scheme = Ecdsa(crypto.ecc.P384, crypto.hash.sha2.Sha256);
+ // zig fmt: off
+ const sk_bytes = [_]u8{
+ 0x6a, 0x53, 0x9c, 0x83, 0x0f, 0x06, 0x86, 0xd9, 0xef, 0xf1, 0xe7, 0x5c, 0xae,
+ 0x93, 0xd9, 0x5b, 0x16, 0x1e, 0x96, 0x7c, 0xb0, 0x86, 0x35, 0xc9, 0xea, 0x20,
+ 0xdc, 0x2b, 0x02, 0x37, 0x6d, 0xd2, 0x89, 0x72, 0x0a, 0x37, 0xf6, 0x5d, 0x4f,
+ 0x4d, 0xf7, 0x97, 0xcb, 0x8b, 0x03, 0x63, 0xc3, 0x2d
+ };
+ const msg = [_]u8{
+ 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x73, 0x69, 0x67, 0x6e,
+ 0x69, 0x6e, 0x67, 0x0a
+ };
+ const sig_ans_bytes = [_]u8{
+ 0x30, 0x64, 0x02, 0x30, 0x7a, 0x31, 0xd8, 0xe0, 0xf8, 0x40, 0x7d, 0x6a, 0xf3,
+ 0x1a, 0x5d, 0x02, 0xe5, 0xcb, 0x24, 0x29, 0x1a, 0xac, 0x15, 0x94, 0xd1, 0x5b,
+ 0xcd, 0x75, 0x2f, 0x45, 0x79, 0x98, 0xf7, 0x60, 0x9a, 0xd5, 0xca, 0x80, 0x15,
+ 0x87, 0x9b, 0x0c, 0x27, 0xe3, 0x01, 0x8b, 0x73, 0x4e, 0x57, 0xa3, 0xd2, 0x9a,
+ 0x02, 0x30, 0x33, 0xe0, 0x04, 0x5e, 0x76, 0x1f, 0xc8, 0xcf, 0xda, 0xbe, 0x64,
+ 0x95, 0x0a, 0xd4, 0x85, 0x34, 0x33, 0x08, 0x7a, 0x81, 0xf2, 0xf6, 0xb6, 0x94,
+ 0x68, 0xc3, 0x8c, 0x5f, 0x88, 0x92, 0x27, 0x5e, 0x4e, 0x84, 0x96, 0x48, 0x42,
+ 0x84, 0x28, 0xac, 0x37, 0x93, 0x07, 0xd3, 0x50, 0x32, 0x71, 0xb0
+ };
+ // zig fmt: on
+
+ const sk = try Scheme.SecretKey.fromBytes(sk_bytes);
+ const kp = try Scheme.KeyPair.fromSecretKey(sk);
+
+ const sig_ans = try Scheme.Signature.fromDer(&sig_ans_bytes);
+ try sig_ans.verify(&msg, kp.public_key);
+
+ const sig = try kp.sign(&msg, null);
+ try sig.verify(&msg, kp.public_key);
+}
+
const TestVector = struct {
key: []const u8,
msg: []const u8,
diff --git a/lib/std/crypto/sha2.zig b/lib/std/crypto/sha2.zig
index b7a78c4b44..9cdf8edcf1 100644
--- a/lib/std/crypto/sha2.zig
+++ b/lib/std/crypto/sha2.zig
@@ -1,4 +1,5 @@
const std = @import("../std.zig");
+const builtin = @import("builtin");
const mem = std.mem;
const math = std.math;
const htest = @import("test.zig");
@@ -16,10 +17,9 @@ const RoundParam256 = struct {
g: usize,
h: usize,
i: usize,
- k: u32,
};
-fn roundParam256(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize, k: u32) RoundParam256 {
+fn roundParam256(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize) RoundParam256 {
return RoundParam256{
.a = a,
.b = b,
@@ -30,7 +30,6 @@ fn roundParam256(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g:
.g = g,
.h = h,
.i = i,
- .k = k,
};
}
@@ -70,6 +69,14 @@ const Sha256Params = Sha2Params32{
.digest_bits = 256,
};
+const v4u32 = @Vector(4, u32);
+
+// TODO: Remove once https://github.com/ziglang/zig/issues/868 is resolved.
+fn isComptime() bool {
+ var a: u8 = 0;
+ return @typeInfo(@TypeOf(.{a})).Struct.fields[0].is_comptime;
+}
+
/// SHA-224
pub const Sha224 = Sha2x32(Sha224Params);
@@ -83,7 +90,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
pub const digest_length = params.digest_bits / 8;
pub const Options = struct {};
- s: [8]u32,
+ s: [8]u32 align(16),
// Streaming Cache
buf: [64]u8 = undefined,
buf_len: u8 = 0,
@@ -168,17 +175,116 @@ fn Sha2x32(comptime params: Sha2Params32) type {
}
}
+ const W = [64]u32{
+ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
+ 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
+ 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
+ 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
+ 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
+ 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
+ 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
+ 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
+ };
+
fn round(d: *Self, b: *const [64]u8) void {
- var s: [64]u32 = undefined;
+ var s: [64]u32 align(16) = undefined;
+ for (@ptrCast(*align(1) const [16]u32, b)) |*elem, i| {
+ s[i] = mem.readIntBig(u32, mem.asBytes(elem));
+ }
- var i: usize = 0;
- while (i < 16) : (i += 1) {
- s[i] = 0;
- s[i] |= @as(u32, b[i * 4 + 0]) << 24;
- s[i] |= @as(u32, b[i * 4 + 1]) << 16;
- s[i] |= @as(u32, b[i * 4 + 2]) << 8;
- s[i] |= @as(u32, b[i * 4 + 3]) << 0;
+ if (!isComptime()) {
+ switch (builtin.cpu.arch) {
+ .aarch64 => if (comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) {
+ var x: v4u32 = d.s[0..4].*;
+ var y: v4u32 = d.s[4..8].*;
+ const s_v = @ptrCast(*[16]v4u32, &s);
+
+ comptime var k: u8 = 0;
+ inline while (k < 16) : (k += 1) {
+ if (k > 3) {
+ s_v[k] = asm (
+ \\sha256su0.4s %[w0_3], %[w4_7]
+ \\sha256su1.4s %[w0_3], %[w8_11], %[w12_15]
+ : [w0_3] "=w" (-> v4u32),
+ : [_] "0" (s_v[k - 4]),
+ [w4_7] "w" (s_v[k - 3]),
+ [w8_11] "w" (s_v[k - 2]),
+ [w12_15] "w" (s_v[k - 1]),
+ );
+ }
+
+ const w: v4u32 = s_v[k] +% @as(v4u32, W[4 * k ..][0..4].*);
+ asm volatile (
+ \\mov.4s v0, %[x]
+ \\sha256h.4s %[x], %[y], %[w]
+ \\sha256h2.4s %[y], v0, %[w]
+ : [x] "=w" (x),
+ [y] "=w" (y),
+ : [_] "0" (x),
+ [_] "1" (y),
+ [w] "w" (w),
+ : "v0"
+ );
+ }
+
+ d.s[0..4].* = x +% @as(v4u32, d.s[0..4].*);
+ d.s[4..8].* = y +% @as(v4u32, d.s[4..8].*);
+ return;
+ },
+ .x86_64 => if (comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) {
+ var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] };
+ var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] };
+ const s_v = @ptrCast(*[16]v4u32, &s);
+
+ comptime var k: u8 = 0;
+ inline while (k < 16) : (k += 1) {
+ if (k < 12) {
+ var tmp = s_v[k];
+ s_v[k + 4] = asm (
+ \\ sha256msg1 %[w4_7], %[tmp]
+ \\ vpalignr $0x4, %[w8_11], %[w12_15], %[result]
+ \\ paddd %[tmp], %[result]
+ \\ sha256msg2 %[w12_15], %[result]
+ : [tmp] "=&x" (tmp),
+ [result] "=&x" (-> v4u32),
+ : [_] "0" (tmp),
+ [w4_7] "x" (s_v[k + 1]),
+ [w8_11] "x" (s_v[k + 2]),
+ [w12_15] "x" (s_v[k + 3]),
+ );
+ }
+
+ const w: v4u32 = s_v[k] +% @as(v4u32, W[4 * k ..][0..4].*);
+ y = asm ("sha256rnds2 %[x], %[y]"
+ : [y] "=x" (-> v4u32),
+ : [_] "0" (y),
+ [x] "x" (x),
+ [_] "{xmm0}" (w),
+ );
+
+ x = asm ("sha256rnds2 %[y], %[x]"
+ : [x] "=x" (-> v4u32),
+ : [_] "0" (x),
+ [y] "x" (y),
+ [_] "{xmm0}" (@bitCast(v4u32, @bitCast(u128, w) >> 64)),
+ );
+ }
+
+ d.s[0] +%= x[3];
+ d.s[1] +%= x[2];
+ d.s[4] +%= x[1];
+ d.s[5] +%= x[0];
+ d.s[2] +%= y[3];
+ d.s[3] +%= y[2];
+ d.s[6] +%= y[1];
+ d.s[7] +%= y[0];
+ return;
+ },
+ else => {},
+ }
}
+
+ var i: usize = 16;
while (i < 64) : (i += 1) {
s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u32, s[i - 15], @as(u32, 7)) ^ math.rotr(u32, s[i - 15], @as(u32, 18)) ^ (s[i - 15] >> 3)) +% (math.rotr(u32, s[i - 2], @as(u32, 17)) ^ math.rotr(u32, s[i - 2], @as(u32, 19)) ^ (s[i - 2] >> 10));
}
@@ -195,73 +301,73 @@ fn Sha2x32(comptime params: Sha2Params32) type {
};
const round0 = comptime [_]RoundParam256{
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x71374491),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCF),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA5),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25B),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B01),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A7),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C1),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC6),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DC),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C8),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF3),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x14292967),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A85),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B2138),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D13),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A7354),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C85),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A1),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664B),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A3),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD6990624),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E3585),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA070),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C08),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774C),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4A),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3),
- roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE),
- roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F),
- roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814),
- roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC70208),
- roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA),
- roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEB),
- roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7),
- roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 0),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 1),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 2),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 3),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 4),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 5),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 6),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 7),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 8),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 9),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 10),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 11),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 12),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 13),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 14),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 15),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 16),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 17),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 18),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 19),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 20),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 21),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 22),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 23),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 24),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 25),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 26),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 27),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 28),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 29),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 30),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 31),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 32),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 33),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 34),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 35),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 36),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 37),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 38),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 39),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 40),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 41),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 42),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 43),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 44),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 45),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 46),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 47),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 48),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 49),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 50),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 51),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 52),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 53),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 54),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 55),
+ roundParam256(0, 1, 2, 3, 4, 5, 6, 7, 56),
+ roundParam256(7, 0, 1, 2, 3, 4, 5, 6, 57),
+ roundParam256(6, 7, 0, 1, 2, 3, 4, 5, 58),
+ roundParam256(5, 6, 7, 0, 1, 2, 3, 4, 59),
+ roundParam256(4, 5, 6, 7, 0, 1, 2, 3, 60),
+ roundParam256(3, 4, 5, 6, 7, 0, 1, 2, 61),
+ roundParam256(2, 3, 4, 5, 6, 7, 0, 1, 62),
+ roundParam256(1, 2, 3, 4, 5, 6, 7, 0, 63),
};
inline for (round0) |r| {
- v[r.h] = v[r.h] +% (math.rotr(u32, v[r.e], @as(u32, 6)) ^ math.rotr(u32, v[r.e], @as(u32, 11)) ^ math.rotr(u32, v[r.e], @as(u32, 25))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
+ v[r.h] = v[r.h] +% (math.rotr(u32, v[r.e], @as(u32, 6)) ^ math.rotr(u32, v[r.e], @as(u32, 11)) ^ math.rotr(u32, v[r.e], @as(u32, 25))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% W[r.i] +% s[r.i];
v[r.d] = v[r.d] +% v[r.h];
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index 81a43062dc..522731ef02 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -201,7 +201,7 @@ pub const PreopenList = struct {
// If we were provided a CWD root to resolve against, we try to treat Preopen dirs as
// POSIX paths, relative to "/" or `cwd_root` depending on whether they start with "."
const path = if (cwd_root) |cwd| blk: {
- const resolve_paths: [][]const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path };
+ const resolve_paths: []const []const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path };
break :blk fs.path.resolve(self.buffer.allocator, resolve_paths) catch |err| switch (err) {
error.CurrentWorkingDirectoryUnlinked => unreachable, // root is absolute, so CWD not queried
else => |e| return e,
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index b875f73b2e..ac2f089ea1 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -1762,16 +1762,32 @@ pub const Mutable = struct {
}
/// Read the value of `x` from `buffer`
- /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value.
+ /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count`.
///
/// The contents of `buffer` are interpreted as if they were the contents of
- /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian`
+ /// @ptrCast(*[buffer.len]const u8, &x). Byte ordering is determined by `endian`
/// and any required padding bits are expected on the MSB end.
pub fn readTwosComplement(
x: *Mutable,
buffer: []const u8,
bit_count: usize,
- abi_size: usize,
+ endian: Endian,
+ signedness: Signedness,
+ ) void {
+ return readPackedTwosComplement(x, buffer, 0, bit_count, endian, signedness);
+ }
+
+ /// Read the value of `x` from a packed memory `buffer`.
+ /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count`
+ /// at offset `bit_offset`.
+ ///
+ /// This is equivalent to loading the value of an integer with `bit_count` bits as
+ /// if it were a field in packed memory at the provided bit offset.
+ pub fn readPackedTwosComplement(
+ x: *Mutable,
+ bytes: []const u8,
+ bit_offset: usize,
+ bit_count: usize,
endian: Endian,
signedness: Signedness,
) void {
@@ -1782,75 +1798,54 @@ pub const Mutable = struct {
return;
}
- // byte_count is our total read size: it cannot exceed abi_size,
- // but may be less as long as it includes the required bits
- const limb_count = calcTwosCompLimbCount(bit_count);
- const byte_count = std.math.min(abi_size, @sizeOf(Limb) * limb_count);
- assert(8 * byte_count >= bit_count);
-
// Check whether the input is negative
var positive = true;
if (signedness == .signed) {
+ const total_bits = bit_offset + bit_count;
var last_byte = switch (endian) {
- .Little => ((bit_count + 7) / 8) - 1,
- .Big => abi_size - ((bit_count + 7) / 8),
+ .Little => ((total_bits + 7) / 8) - 1,
+ .Big => bytes.len - ((total_bits + 7) / 8),
};
- const sign_bit = @as(u8, 1) << @intCast(u3, (bit_count - 1) % 8);
- positive = ((buffer[last_byte] & sign_bit) == 0);
+ const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8);
+ positive = ((bytes[last_byte] & sign_bit) == 0);
}
// Copy all complete limbs
- var carry: u1 = if (positive) 0 else 1;
+ var carry: u1 = 1;
var limb_index: usize = 0;
+ var bit_index: usize = 0;
while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) {
- var buf_index = switch (endian) {
- .Little => @sizeOf(Limb) * limb_index,
- .Big => abi_size - (limb_index + 1) * @sizeOf(Limb),
- };
-
- const limb_buf = @ptrCast(*const [@sizeOf(Limb)]u8, buffer[buf_index..]);
- var limb = mem.readInt(Limb, limb_buf, endian);
+ // Read one Limb of bits
+ var limb = mem.readPackedInt(Limb, bytes, bit_index + bit_offset, endian);
+ bit_index += @bitSizeOf(Limb);
// 2's complement (bitwise not, then add carry bit)
if (!positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
x.limbs[limb_index] = limb;
}
- // Copy the remaining N bytes (N <= @sizeOf(Limb))
- var bytes_read = limb_index * @sizeOf(Limb);
- if (bytes_read != byte_count) {
- var limb: Limb = 0;
-
- while (bytes_read != byte_count) {
- const read_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_read);
- var int_buffer = switch (endian) {
- .Little => buffer[bytes_read..],
- .Big => buffer[(abi_size - bytes_read - read_size)..],
- };
- limb |= @intCast(Limb, switch (read_size) {
- 1 => mem.readInt(u8, int_buffer[0..1], endian),
- 2 => mem.readInt(u16, int_buffer[0..2], endian),
- 4 => mem.readInt(u32, int_buffer[0..4], endian),
- 8 => mem.readInt(u64, int_buffer[0..8], endian),
- 16 => mem.readInt(u128, int_buffer[0..16], endian),
- else => unreachable,
- }) << @intCast(Log2Limb, 8 * (bytes_read % @sizeOf(Limb)));
- bytes_read += read_size;
- }
+ // Copy the remaining bits
+ if (bit_count != bit_index) {
+ // Read all remaining bits
+ var limb = switch (signedness) {
+ .unsigned => mem.readVarPackedInt(Limb, bytes, bit_index + bit_offset, bit_count - bit_index, endian, .unsigned),
+ .signed => b: {
+ const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb));
+ const limb = mem.readVarPackedInt(SLimb, bytes, bit_index + bit_offset, bit_count - bit_index, endian, .signed);
+ break :b @bitCast(Limb, limb);
+ },
+ };
// 2's complement (bitwise not, then add carry bit)
- if (!positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
-
- // Mask off any unused bits
- const valid_bits = @intCast(Log2Limb, bit_count % @bitSizeOf(Limb));
- const mask = (@as(Limb, 1) << valid_bits) -% 1; // 0b0..01..1 with (valid_bits_in_limb) trailing ones
- limb &= mask;
+ if (!positive) assert(!@addWithOverflow(Limb, ~limb, carry, &limb));
+ x.limbs[limb_index] = limb;
- x.limbs[limb_count - 1] = limb;
+ limb_index += 1;
}
+
x.positive = positive;
- x.len = limb_count;
+ x.len = limb_index;
x.normalize(x.len);
}
@@ -2212,66 +2207,48 @@ pub const Const = struct {
}
/// Write the value of `x` into `buffer`
- /// Asserts that `buffer`, `abi_size`, and `bit_count` are large enough to store the value.
+ /// Asserts that `buffer` is large enough to store the value.
///
/// `buffer` is filled so that its contents match what would be observed via
- /// @ptrCast(*[abi_size]const u8, &x). Byte ordering is determined by `endian`,
+ /// @ptrCast(*[buffer.len]const u8, &x). Byte ordering is determined by `endian`,
/// and any required padding bits are added on the MSB end.
- pub fn writeTwosComplement(x: Const, buffer: []u8, bit_count: usize, abi_size: usize, endian: Endian) void {
+ pub fn writeTwosComplement(x: Const, buffer: []u8, endian: Endian) void {
+ return writePackedTwosComplement(x, buffer, 0, 8 * buffer.len, endian);
+ }
- // byte_count is our total write size
- const byte_count = abi_size;
- assert(8 * byte_count >= bit_count);
- assert(buffer.len >= byte_count);
+ /// Write the value of `x` to a packed memory `buffer`.
+ /// Asserts that `buffer` is large enough to contain a value of bit-size `bit_count`
+ /// at offset `bit_offset`.
+ ///
+ /// This is equivalent to storing the value of an integer with `bit_count` bits as
+ /// if it were a field in packed memory at the provided bit offset.
+ pub fn writePackedTwosComplement(x: Const, bytes: []u8, bit_offset: usize, bit_count: usize, endian: Endian) void {
assert(x.fitsInTwosComp(if (x.positive) .unsigned else .signed, bit_count));
// Copy all complete limbs
- var carry: u1 = if (x.positive) 0 else 1;
+ var carry: u1 = 1;
var limb_index: usize = 0;
- while (limb_index < byte_count / @sizeOf(Limb)) : (limb_index += 1) {
- var buf_index = switch (endian) {
- .Little => @sizeOf(Limb) * limb_index,
- .Big => abi_size - (limb_index + 1) * @sizeOf(Limb),
- };
-
+ var bit_index: usize = 0;
+ while (limb_index < bit_count / @bitSizeOf(Limb)) : (limb_index += 1) {
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
+
// 2's complement (bitwise not, then add carry bit)
if (!x.positive) carry = @boolToInt(@addWithOverflow(Limb, ~limb, carry, &limb));
- var limb_buf = @ptrCast(*[@sizeOf(Limb)]u8, buffer[buf_index..]);
- mem.writeInt(Limb, limb_buf, limb, endian);
+ // Write one Limb of bits
+ mem.writePackedInt(Limb, bytes, bit_index + bit_offset, limb, endian);
+ bit_index += @bitSizeOf(Limb);
}
- // Copy the remaining N bytes (N < @sizeOf(Limb))
- var bytes_written = limb_index * @sizeOf(Limb);
- if (bytes_written != byte_count) {
+ // Copy the remaining bits
+ if (bit_count != bit_index) {
var limb: Limb = if (limb_index < x.limbs.len) x.limbs[limb_index] else 0;
+
// 2's complement (bitwise not, then add carry bit)
if (!x.positive) _ = @addWithOverflow(Limb, ~limb, carry, &limb);
- while (bytes_written != byte_count) {
- const write_size = std.math.floorPowerOfTwo(usize, byte_count - bytes_written);
- var int_buffer = switch (endian) {
- .Little => buffer[bytes_written..],
- .Big => buffer[(abi_size - bytes_written - write_size)..],
- };
-
- if (write_size == 1) {
- mem.writeInt(u8, int_buffer[0..1], @truncate(u8, limb), endian);
- } else if (@sizeOf(Limb) >= 2 and write_size == 2) {
- mem.writeInt(u16, int_buffer[0..2], @truncate(u16, limb), endian);
- } else if (@sizeOf(Limb) >= 4 and write_size == 4) {
- mem.writeInt(u32, int_buffer[0..4], @truncate(u32, limb), endian);
- } else if (@sizeOf(Limb) >= 8 and write_size == 8) {
- mem.writeInt(u64, int_buffer[0..8], @truncate(u64, limb), endian);
- } else if (@sizeOf(Limb) >= 16 and write_size == 16) {
- mem.writeInt(u128, int_buffer[0..16], @truncate(u128, limb), endian);
- } else if (@sizeOf(Limb) >= 32) {
- @compileError("@sizeOf(Limb) exceeded supported range");
- } else unreachable;
- limb >>= @intCast(Log2Limb, 8 * write_size);
- bytes_written += write_size;
- }
+ // Write all remaining bits
+ mem.writeVarPackedInt(bytes, bit_index + bit_offset, bit_count - bit_index, limb, endian);
}
}
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 5685a38d41..97de06bfcc 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -2603,13 +2603,13 @@ test "big int conversion read/write twos complement" {
for (endians) |endian| {
// Writing to buffer and back should not change anything
- a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian);
- m.readTwosComplement(buffer1, 493, abi_size, endian, .unsigned);
+ a.toConst().writeTwosComplement(buffer1[0..abi_size], endian);
+ m.readTwosComplement(buffer1[0..abi_size], 493, endian, .unsigned);
try testing.expect(m.toConst().order(a.toConst()) == .eq);
// Equivalent to @bitCast(i493, @as(u493, intMax(u493))
- a.toConst().writeTwosComplement(buffer1, 493, abi_size, endian);
- m.readTwosComplement(buffer1, 493, abi_size, endian, .signed);
+ a.toConst().writeTwosComplement(buffer1[0..abi_size], endian);
+ m.readTwosComplement(buffer1[0..abi_size], 493, endian, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-1) == .eq);
}
}
@@ -2628,26 +2628,26 @@ test "big int conversion read twos complement with padding" {
// (3) should sign-extend any bits from bit_count to 8 * abi_size
var bit_count: usize = 12 * 8 + 1;
- a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ a.toConst().writeTwosComplement(buffer1[0..13], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0xaa, 0xaa, 0xaa }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ a.toConst().writeTwosComplement(buffer1[0..13], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xaa, 0xaa, 0xaa }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ a.toConst().writeTwosComplement(buffer1[0..16], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0, 0x0, 0x0 }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ a.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));
@memset(buffer1.ptr, 0xaa, buffer1.len);
try a.set(-0x01_02030405_06070809_0a0b0c0d);
bit_count = 12 * 8 + 2;
- a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ a.toConst().writeTwosComplement(buffer1[0..13], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xaa, 0xaa, 0xaa }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ a.toConst().writeTwosComplement(buffer1[0..13], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3, 0xaa, 0xaa, 0xaa }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ a.toConst().writeTwosComplement(buffer1[0..16], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff }));
- a.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ a.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 }));
}
@@ -2660,17 +2660,15 @@ test "big int write twos complement +/- zero" {
defer testing.allocator.free(buffer1);
@memset(buffer1.ptr, 0xaa, buffer1.len);
- var bit_count: usize = 0;
-
// Test zero
- m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ m.toConst().writeTwosComplement(buffer1[0..13], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ m.toConst().writeTwosComplement(buffer1[0..13], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ m.toConst().writeTwosComplement(buffer1[0..16], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ m.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
@memset(buffer1.ptr, 0xaa, buffer1.len);
@@ -2678,13 +2676,13 @@ test "big int write twos complement +/- zero" {
// Test negative zero
- m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Little);
+ m.toConst().writeTwosComplement(buffer1[0..13], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 13, .Big);
+ m.toConst().writeTwosComplement(buffer1[0..13], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 13) ++ ([_]u8{0xaa} ** 3))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Little);
+ m.toConst().writeTwosComplement(buffer1[0..16], .Little);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
- m.toConst().writeTwosComplement(buffer1, bit_count, 16, .Big);
+ m.toConst().writeTwosComplement(buffer1[0..16], .Big);
try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
}
@@ -2705,62 +2703,82 @@ test "big int conversion write twos complement with padding" {
// Test 0x01_02030405_06070809_0a0b0c0d
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xb };
- m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xb, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
- m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xab, 0xaa, 0xaa, 0xaa };
- m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xab, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
- m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x01_02030405_06070809_0a0b0c0d) == .eq);
+ bit_count = @sizeOf(Limb) * 8;
+
+ // Test 0x0a0a0a0a_02030405_06070809_0a0b0c0d
+
+ buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa };
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+
+ buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+
+ buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa };
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+
+ buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
+ try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+
bit_count = 12 * 8 + 2;
// Test -0x01_02030405_06070809_0a0b0c0d
buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02 };
- m.readTwosComplement(buffer, bit_count, 13, .Little, .signed);
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
- m.readTwosComplement(buffer, bit_count, 13, .Big, .signed);
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xf3, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0x02, 0xaa, 0xaa, 0xaa };
- m.readTwosComplement(buffer, bit_count, 16, .Little, .signed);
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0x02, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf3 };
- m.readTwosComplement(buffer, bit_count, 16, .Big, .signed);
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .signed);
try testing.expect(m.toConst().orderAgainstScalar(-0x01_02030405_06070809_0a0b0c0d) == .eq);
// Test 0
buffer = &([_]u8{0} ** 16);
- m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
bit_count = 0;
buffer = &([_]u8{0xaa} ** 16);
- m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
}
@@ -2779,15 +2797,15 @@ test "big int conversion write twos complement zero" {
var buffer: []const u8 = undefined;
buffer = &([_]u8{0} ** 13);
- m.readTwosComplement(buffer, bit_count, 13, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 13, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
buffer = &([_]u8{0} ** 16);
- m.readTwosComplement(buffer, bit_count, 16, .Little, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
- m.readTwosComplement(buffer, bit_count, 16, .Big, .unsigned);
+ m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
try testing.expect(m.toConst().orderAgainstScalar(0x0) == .eq);
}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 4000030fc0..93dd54bdb5 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -1083,7 +1083,7 @@ fn boyerMooreHorspoolPreprocessReverse(pattern: []const u8, table: *[256]usize)
var i: usize = pattern.len - 1;
// The first item is intentionally ignored and the skip size will be pattern.len.
- // This is the standard way boyer-moore-horspool is implemented.
+ // This is the standard way Boyer-Moore-Horspool is implemented.
while (i > 0) : (i -= 1) {
table[pattern[i]] = i;
}
@@ -1096,14 +1096,15 @@ fn boyerMooreHorspoolPreprocess(pattern: []const u8, table: *[256]usize) void {
var i: usize = 0;
// The last item is intentionally ignored and the skip size will be pattern.len.
- // This is the standard way boyer-moore-horspool is implemented.
+ // This is the standard way Boyer-Moore-Horspool is implemented.
while (i < pattern.len - 1) : (i += 1) {
table[pattern[i]] = pattern.len - 1 - i;
}
}
+
/// Find the index in a slice of a sub-slice, searching from the end backwards.
/// To start looking at a different index, slice the haystack first.
-/// Uses the Reverse boyer-moore-horspool algorithm on large inputs;
+/// Uses the Reverse Boyer-Moore-Horspool algorithm on large inputs;
/// `lastIndexOfLinear` on small inputs.
pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize {
if (needle.len > haystack.len) return null;
@@ -1131,7 +1132,7 @@ pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?us
return null;
}
-/// Uses Boyer-moore-horspool algorithm on large inputs; `indexOfPosLinear` on small inputs.
+/// Uses Boyer-Moore-Horspool algorithm on large inputs; `indexOfPosLinear` on small inputs.
pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize {
if (needle.len > haystack.len) return null;
if (needle.len == 0) return start_index;
@@ -1183,7 +1184,7 @@ test "indexOf" {
test "indexOf multibyte" {
{
- // make haystack and needle long enough to trigger boyer-moore-horspool algorithm
+ // make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm
const haystack = [1]u16{0} ** 100 ++ [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff };
const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee };
try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needle), 100);
@@ -1196,7 +1197,7 @@ test "indexOf multibyte" {
}
{
- // make haystack and needle long enough to trigger boyer-moore-horspool algorithm
+ // make haystack and needle long enough to trigger Boyer-Moore-Horspool algorithm
const haystack = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100;
const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee };
try testing.expectEqual(lastIndexOf(u16, &haystack, &needle), 0);
@@ -1298,6 +1299,76 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian)
return result;
}
+/// Loads an integer from packed memory with provided bit_count, bit_offset, and signedness.
+/// Asserts that T is large enough to store the read value.
+///
+/// Example:
+/// const T = packed struct(u16){ a: u3, b: u7, c: u6 };
+/// var st = T{ .a = 1, .b = 2, .c = 4 };
+/// const b_field = readVarPackedInt(u64, std.mem.asBytes(&st), @bitOffsetOf(T, "b"), 7, builtin.cpu.arch.endian(), .unsigned);
+///
+pub fn readVarPackedInt(
+ comptime T: type,
+ bytes: []const u8,
+ bit_offset: usize,
+ bit_count: usize,
+ endian: std.builtin.Endian,
+ signedness: std.builtin.Signedness,
+) T {
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const iN = std.meta.Int(.signed, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const read_size = (bit_count + (bit_offset % 8) + 7) / 8;
+ const bit_shift = @intCast(u3, bit_offset % 8);
+ const pad = @intCast(Log2N, @bitSizeOf(T) - bit_count);
+
+ const lowest_byte = switch (endian) {
+ .Big => bytes.len - (bit_offset / 8) - read_size,
+ .Little => bit_offset / 8,
+ };
+ const read_bytes = bytes[lowest_byte..][0..read_size];
+
+ if (@bitSizeOf(T) <= 8) {
+ // These are the same shifts/masks we perform below, but adds `@truncate`/`@intCast`
+ // where needed since int is smaller than a byte.
+ const value = if (read_size == 1) b: {
+ break :b @truncate(uN, read_bytes[0] >> bit_shift);
+ } else b: {
+ const i: u1 = @boolToInt(endian == .Big);
+ const head = @truncate(uN, read_bytes[i] >> bit_shift);
+ const tail_shift = @intCast(Log2N, @as(u4, 8) - bit_shift);
+ const tail = @truncate(uN, read_bytes[1 - i]);
+ break :b (tail << tail_shift) | head;
+ };
+ switch (signedness) {
+ .signed => return @intCast(T, (@bitCast(iN, value) << pad) >> pad),
+ .unsigned => return @intCast(T, (@bitCast(uN, value) << pad) >> pad),
+ }
+ }
+
+ // Copy the value out (respecting endianness), accounting for bit_shift
+ var int: uN = 0;
+ switch (endian) {
+ .Big => {
+ for (read_bytes[0 .. read_size - 1]) |elem| {
+ int = elem | (int << 8);
+ }
+ int = (read_bytes[read_size - 1] >> bit_shift) | (int << (@as(u4, 8) - bit_shift));
+ },
+ .Little => {
+ int = read_bytes[0] >> bit_shift;
+ for (read_bytes[1..]) |elem, i| {
+ int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift)));
+ }
+ },
+ }
+ switch (signedness) {
+ .signed => return @intCast(T, (@bitCast(iN, int) << pad) >> pad),
+ .unsigned => return @intCast(T, (@bitCast(uN, int) << pad) >> pad),
+ }
+}
+
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
@@ -1365,6 +1436,84 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits,
}
}
+fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T {
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const bit_count = @as(usize, @bitSizeOf(T));
+ const bit_shift = @intCast(u3, bit_offset % 8);
+
+ const load_size = (bit_count + 7) / 8;
+ const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+ const LoadInt = std.meta.Int(.unsigned, load_size * 8);
+
+ if (bit_count == 0)
+ return 0;
+
+ // Read by loading a LoadInt, and then follow it up with a 1-byte read
+ // of the tail if bit_offset pushed us over a byte boundary.
+ const read_bytes = bytes[bit_offset / 8 ..];
+ const val = @truncate(uN, readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift);
+ if (bit_shift > load_tail_bits) {
+ const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
+ const tail_byte = read_bytes[load_size];
+ const tail_truncated = if (bit_count < 8) @truncate(uN, tail_byte) else @as(uN, tail_byte);
+ return @bitCast(T, val | (tail_truncated << (@truncate(Log2N, bit_count) -% tail_bits)));
+ } else return @bitCast(T, val);
+}
+
+fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const bit_count = @as(usize, @bitSizeOf(T));
+ const bit_shift = @intCast(u3, bit_offset % 8);
+ const byte_count = (@as(usize, bit_shift) + bit_count + 7) / 8;
+
+ const load_size = (bit_count + 7) / 8;
+ const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+ const LoadInt = std.meta.Int(.unsigned, load_size * 8);
+
+ if (bit_count == 0)
+ return 0;
+
+ // Read by loading a LoadInt, and then follow it up with a 1-byte read
+ // of the tail if bit_offset pushed us over a byte boundary.
+ const end = bytes.len - (bit_offset / 8);
+ const read_bytes = bytes[(end - byte_count)..end];
+ const val = @truncate(uN, readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift);
+ if (bit_shift > load_tail_bits) {
+ const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
+ const tail_byte = if (bit_count < 8) @truncate(uN, read_bytes[0]) else @as(uN, read_bytes[0]);
+ return @bitCast(T, val | (tail_byte << (@truncate(Log2N, bit_count) -% tail_bits)));
+ } else return @bitCast(T, val);
+}
+
+pub const readPackedIntNative = switch (native_endian) {
+ .Little => readPackedIntLittle,
+ .Big => readPackedIntBig,
+};
+
+pub const readPackedIntForeign = switch (native_endian) {
+ .Little => readPackedIntBig,
+ .Big => readPackedIntLittle,
+};
+
+/// Loads an integer from packed memory.
+/// Asserts that buffer contains at least bit_offset + @bitSizeOf(T) bits.
+///
+/// Example:
+/// const T = packed struct(u16){ a: u3, b: u7, c: u6 };
+/// var st = T{ .a = 1, .b = 2, .c = 4 };
+/// const b_field = readPackedInt(u7, std.mem.asBytes(&st), @bitOffsetOf(T, "b"), builtin.cpu.arch.endian());
+///
+pub fn readPackedInt(comptime T: type, bytes: []const u8, bit_offset: usize, endian: Endian) T {
+ switch (endian) {
+ .Little => return readPackedIntLittle(T, bytes, bit_offset),
+ .Big => return readPackedIntBig(T, bytes, bit_offset),
+ }
+}
+
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
@@ -1447,6 +1596,100 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]
}
}
+pub fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value: T) void {
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const bit_count = @as(usize, @bitSizeOf(T));
+ const bit_shift = @intCast(u3, bit_offset % 8);
+
+ const store_size = (@bitSizeOf(T) + 7) / 8;
+ const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+ const StoreInt = std.meta.Int(.unsigned, store_size * 8);
+
+ if (bit_count == 0)
+ return;
+
+ // Write by storing a StoreInt, and then follow it up with a 1-byte tail
+ // if bit_offset pushed us over a byte boundary.
+ const write_bytes = bytes[bit_offset / 8 ..];
+ const head = write_bytes[0] & ((@as(u8, 1) << bit_shift) - 1);
+
+ var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+ if (bit_shift > store_tail_bits) {
+ const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
+ write_bytes[store_size] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
+ write_bytes[store_size] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+ } else if (bit_shift < store_tail_bits) {
+ const tail_len = store_tail_bits - bit_shift;
+ const tail = write_bytes[store_size - 1] & (@as(u8, 0xfe) << (7 - tail_len));
+ write_value |= @as(StoreInt, tail) << (8 * (store_size - 1));
+ }
+
+ writeIntLittle(StoreInt, write_bytes[0..store_size], write_value);
+}
+
+pub fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T) void {
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const bit_count = @as(usize, @bitSizeOf(T));
+ const bit_shift = @intCast(u3, bit_offset % 8);
+ const byte_count = (bit_shift + bit_count + 7) / 8;
+
+ const store_size = (@bitSizeOf(T) + 7) / 8;
+ const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+ const StoreInt = std.meta.Int(.unsigned, store_size * 8);
+
+ if (bit_count == 0)
+ return;
+
+ // Write by storing a StoreInt, and then follow it up with a 1-byte tail
+ // if bit_offset pushed us over a byte boundary.
+ const end = bytes.len - (bit_offset / 8);
+ const write_bytes = bytes[(end - byte_count)..end];
+ const head = write_bytes[byte_count - 1] & ((@as(u8, 1) << bit_shift) - 1);
+
+ var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+ if (bit_shift > store_tail_bits) {
+ const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
+ write_bytes[0] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
+ write_bytes[0] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+ } else if (bit_shift < store_tail_bits) {
+ const tail_len = store_tail_bits - bit_shift;
+ const tail = write_bytes[0] & (@as(u8, 0xfe) << (7 - tail_len));
+ write_value |= @as(StoreInt, tail) << (8 * (store_size - 1));
+ }
+
+ writeIntBig(StoreInt, write_bytes[(byte_count - store_size)..][0..store_size], write_value);
+}
+
+pub const writePackedIntNative = switch (native_endian) {
+ .Little => writePackedIntLittle,
+ .Big => writePackedIntBig,
+};
+
+pub const writePackedIntForeign = switch (native_endian) {
+ .Little => writePackedIntBig,
+ .Big => writePackedIntLittle,
+};
+
+/// Stores an integer to packed memory.
+/// Asserts that buffer contains at least bit_offset + @bitSizeOf(T) bits.
+///
+/// Example:
+/// const T = packed struct(u16){ a: u3, b: u7, c: u6 };
+/// var st = T{ .a = 1, .b = 2, .c = 4 };
+/// // st.b = 0x7f;
+/// writePackedInt(u7, std.mem.asBytes(&st), @bitOffsetOf(T, "b"), 0x7f, builtin.cpu.arch.endian());
+///
+pub fn writePackedInt(comptime T: type, bytes: []u8, bit_offset: usize, value: T, endian: Endian) void {
+ switch (endian) {
+ .Little => writePackedIntLittle(T, bytes, bit_offset, value),
+ .Big => writePackedIntBig(T, bytes, bit_offset, value),
+ }
+}
+
/// Writes a twos-complement little-endian integer to memory.
/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
@@ -1523,6 +1766,69 @@ pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: Endian) v
};
}
+/// Stores an integer to packed memory with provided bit_count, bit_offset, and signedness.
+/// If negative, the written value is sign-extended.
+///
+/// Example:
+/// const T = packed struct(u16){ a: u3, b: u7, c: u6 };
+/// var st = T{ .a = 1, .b = 2, .c = 4 };
+/// // st.b = 0x7f;
+/// var value: u64 = 0x7f;
+/// writeVarPackedInt(std.mem.asBytes(&st), @bitOffsetOf(T, "b"), 7, value, builtin.cpu.arch.endian());
+///
+pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value: anytype, endian: std.builtin.Endian) void {
+ const T = @TypeOf(value);
+ const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
+ const Log2N = std.math.Log2Int(T);
+
+ const bit_shift = @intCast(u3, bit_offset % 8);
+ const write_size = (bit_count + bit_shift + 7) / 8;
+ const lowest_byte = switch (endian) {
+ .Big => bytes.len - (bit_offset / 8) - write_size,
+ .Little => bit_offset / 8,
+ };
+ const write_bytes = bytes[lowest_byte..][0..write_size];
+
+ if (write_size == 1) {
+ // Single byte writes are handled specially, since we need to mask bits
+ // on both ends of the byte.
+ const mask = (@as(u8, 0xff) >> @intCast(u3, 8 - bit_count));
+ const new_bits = @intCast(u8, @bitCast(uN, value) & mask) << bit_shift;
+ write_bytes[0] = (write_bytes[0] & ~(mask << bit_shift)) | new_bits;
+ return;
+ }
+
+ var remaining: T = value;
+
+ // Iterate bytes forward for Little-endian, backward for Big-endian
+ const delta: i2 = if (endian == .Big) -1 else 1;
+ const start = if (endian == .Big) @intCast(isize, write_bytes.len - 1) else 0;
+
+ var i: isize = start; // isize for signed index arithmetic
+
+ // Write first byte, using a mask to protects bits preceding bit_offset
+ const head_mask = @as(u8, 0xff) >> bit_shift;
+ write_bytes[@intCast(usize, i)] &= ~(head_mask << bit_shift);
+ write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & head_mask) << bit_shift;
+ remaining >>= @intCast(Log2N, @as(u4, 8) - bit_shift);
+ i += delta;
+
+ // Write bytes[1..bytes.len - 1]
+ if (@bitSizeOf(T) > 8) {
+ const loop_end = start + delta * (@intCast(isize, write_size) - 1);
+ while (i != loop_end) : (i += delta) {
+ write_bytes[@intCast(usize, i)] = @truncate(u8, @bitCast(uN, remaining));
+ remaining >>= 8;
+ }
+ }
+
+ // Write last byte, using a mask to protect bits following bit_offset + bit_count
+ const following_bits = -%@truncate(u3, bit_shift + bit_count);
+ const tail_mask = (@as(u8, 0xff) << following_bits) >> following_bits;
+ write_bytes[@intCast(usize, i)] &= ~tail_mask;
+ write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & tail_mask);
+}
+
test "writeIntBig and writeIntLittle" {
var buf0: [0]u8 = undefined;
var buf1: [1]u8 = undefined;
@@ -3393,3 +3699,165 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice
const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]);
return @alignCast(new_alignment, aligned_slice);
}
+
+test "read/write(Var)PackedInt" {
+ switch (builtin.cpu.arch) {
+ // This test generates too much code to execute on WASI.
+ // LLVM backend fails with "too many locals: locals exceed maximum"
+ .wasm32, .wasm64 => return error.SkipZigTest,
+ else => {},
+ }
+
+ const foreign_endian: Endian = if (native_endian == .Big) .Little else .Big;
+ const expect = std.testing.expect;
+ var prng = std.rand.DefaultPrng.init(1234);
+ const random = prng.random();
+
+ @setEvalBranchQuota(10_000);
+ inline for ([_]type{ u8, u16, u32, u128 }) |BackingType| {
+ for ([_]BackingType{
+ @as(BackingType, 0), // all zeros
+ -%@as(BackingType, 1), // all ones
+ random.int(BackingType), // random
+ random.int(BackingType), // random
+ random.int(BackingType), // random
+ }) |init_value| {
+ const uTs = [_]type{ u1, u3, u7, u8, u9, u10, u15, u16, u86 };
+ const iTs = [_]type{ i1, i3, i7, i8, i9, i10, i15, i16, i86 };
+ inline for (uTs ++ iTs) |PackedType| {
+ if (@bitSizeOf(PackedType) > @bitSizeOf(BackingType))
+ continue;
+
+ const iPackedType = std.meta.Int(.signed, @bitSizeOf(PackedType));
+ const uPackedType = std.meta.Int(.unsigned, @bitSizeOf(PackedType));
+ const Log2T = std.math.Log2Int(BackingType);
+
+ const offset_at_end = @bitSizeOf(BackingType) - @bitSizeOf(PackedType);
+ for ([_]usize{ 0, 1, 7, 8, 9, 10, 15, 16, 86, offset_at_end }) |offset| {
+ if (offset > offset_at_end or offset == @bitSizeOf(BackingType))
+ continue;
+
+ for ([_]PackedType{
+ ~@as(PackedType, 0), // all ones: -1 iN / maxInt uN
+ @as(PackedType, 0), // all zeros: 0 iN / 0 uN
+ @bitCast(PackedType, @as(iPackedType, math.maxInt(iPackedType))), // maxInt iN
+ @bitCast(PackedType, @as(iPackedType, math.minInt(iPackedType))), // maxInt iN
+ random.int(PackedType), // random
+ random.int(PackedType), // random
+ }) |write_value| {
+ { // Fixed-size Read/Write (Native-endian)
+
+ // Initialize Value
+ var value: BackingType = init_value;
+
+ // Read
+ const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
+ try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+
+ // Write
+ writePackedInt(PackedType, asBytes(&value), offset, write_value, native_endian);
+ try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+
+ // Read again
+ const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
+ try expect(read_value2 == write_value);
+
+ // Verify bits outside of the target integer are unmodified
+ const diff_bits = init_value ^ value;
+ if (offset != offset_at_end)
+ try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ if (offset != 0)
+ try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ }
+
+ { // Fixed-size Read/Write (Foreign-endian)
+
+ // Initialize Value
+ var value: BackingType = @byteSwap(init_value);
+
+ // Read
+ const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
+ try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+
+ // Write
+ writePackedInt(PackedType, asBytes(&value), offset, write_value, foreign_endian);
+ try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+
+ // Read again
+ const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
+ try expect(read_value2 == write_value);
+
+ // Verify bits outside of the target integer are unmodified
+ const diff_bits = init_value ^ @byteSwap(value);
+ if (offset != offset_at_end)
+ try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ if (offset != 0)
+ try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ }
+
+ const signedness = @typeInfo(PackedType).Int.signedness;
+ const NextPowerOfTwoInt = std.meta.Int(signedness, comptime try std.math.ceilPowerOfTwo(u16, @bitSizeOf(PackedType)));
+ const ui64 = std.meta.Int(signedness, 64);
+ inline for ([_]type{ PackedType, NextPowerOfTwoInt, ui64 }) |U| {
+ { // Variable-size Read/Write (Native-endian)
+
+ if (@bitSizeOf(U) < @bitSizeOf(PackedType))
+ continue;
+
+ // Initialize Value
+ var value: BackingType = init_value;
+
+ // Read
+ const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
+ try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+
+ // Write
+ writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), native_endian);
+ try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+
+ // Read again
+ const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
+ try expect(read_value2 == write_value);
+
+ // Verify bits outside of the target integer are unmodified
+ const diff_bits = init_value ^ value;
+ if (offset != offset_at_end)
+ try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ if (offset != 0)
+ try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ }
+
+ { // Variable-size Read/Write (Foreign-endian)
+
+ if (@bitSizeOf(U) < @bitSizeOf(PackedType))
+ continue;
+
+ // Initialize Value
+ var value: BackingType = @byteSwap(init_value);
+
+ // Read
+ const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
+ try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+
+ // Write
+ writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), foreign_endian);
+ try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+
+ // Read again
+ const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
+ try expect(read_value2 == write_value);
+
+ // Verify bits outside of the target integer are unmodified
+ const diff_bits = init_value ^ @byteSwap(value);
+ if (offset != offset_at_end)
+ try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+ if (offset != 0)
+ try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 5997a06b9f..a574f8f37d 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -286,7 +286,8 @@ pub fn allocAdvancedWithRetAddr(
} else @alignOf(T);
if (n == 0) {
- return @as([*]align(a) T, undefined)[0..0];
+ const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), a);
+ return @intToPtr([*]align(a) T, ptr)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
@@ -383,7 +384,8 @@ pub fn reallocAdvancedWithRetAddr(
}
if (new_n == 0) {
self.free(old_mem);
- return @as([*]align(new_alignment) T, undefined)[0..0];
+ const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), new_alignment);
+ return @intToPtr([*]align(new_alignment) T, ptr)[0..0];
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
@@ -462,7 +464,8 @@ pub fn alignedShrinkWithRetAddr(
return old_mem;
if (new_n == 0) {
self.free(old_mem);
- return @as([*]align(new_alignment) T, undefined)[0..0];
+ const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), new_alignment);
+ return @intToPtr([*]align(new_alignment) T, ptr)[0..0];
}
assert(new_n < old_mem.len);
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 74ffd4de12..e69a2a7943 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -366,6 +366,56 @@ pub fn fchown(fd: fd_t, owner: ?uid_t, group: ?gid_t) FChownError!void {
}
}
+pub const RebootError = error{
+ PermissionDenied,
+} || UnexpectedError;
+
+pub const RebootCommand = switch (builtin.os.tag) {
+ .linux => union(linux.LINUX_REBOOT.CMD) {
+ RESTART: void,
+ HALT: void,
+ CAD_ON: void,
+ CAD_OFF: void,
+ POWER_OFF: void,
+ RESTART2: [*:0]const u8,
+ SW_SUSPEND: void,
+ KEXEC: void,
+ },
+ else => @compileError("Unsupported OS"),
+};
+
+pub fn reboot(cmd: RebootCommand) RebootError!void {
+ switch (builtin.os.tag) {
+ .linux => {
+ switch (system.getErrno(linux.reboot(
+ .MAGIC1,
+ .MAGIC2,
+ @as(linux.LINUX_REBOOT.CMD, cmd),
+ switch (cmd) {
+ .RESTART2 => |s| s,
+ else => null,
+ },
+ ))) {
+ .SUCCESS => {},
+ .PERM => return error.PermissionDenied,
+ else => |err| return std.os.unexpectedErrno(err),
+ }
+ switch (cmd) {
+ .CAD_OFF => {},
+ .CAD_ON => {},
+ .SW_SUSPEND => {},
+
+ .HALT => unreachable,
+ .KEXEC => unreachable,
+ .POWER_OFF => unreachable,
+ .RESTART => unreachable,
+ .RESTART2 => unreachable,
+ }
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
pub const GetRandomError = OpenError;
/// Obtain a series of random bytes. These bytes can be used to seed user-space
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 8ca20bc330..da9ea74327 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -804,6 +804,63 @@ pub fn exit_group(status: i32) noreturn {
unreachable;
}
+/// flags for the `reboot' system call.
+pub const LINUX_REBOOT = struct {
+ /// First magic value required to use _reboot() system call.
+ pub const MAGIC1 = enum(u32) {
+ MAGIC1 = 0xfee1dead,
+ _,
+ };
+
+ /// Second magic value required to use _reboot() system call.
+ pub const MAGIC2 = enum(u32) {
+ MAGIC2 = 672274793,
+ MAGIC2A = 85072278,
+ MAGIC2B = 369367448,
+ MAGIC2C = 537993216,
+ _,
+ };
+
+ /// Commands accepted by the _reboot() system call.
+ pub const CMD = enum(u32) {
+ /// Restart system using default command and mode.
+ RESTART = 0x01234567,
+
+ /// Stop OS and give system control to ROM monitor, if any.
+ HALT = 0xCDEF0123,
+
+ /// Ctrl-Alt-Del sequence causes RESTART command.
+ CAD_ON = 0x89ABCDEF,
+
+ /// Ctrl-Alt-Del sequence sends SIGINT to init task.
+ CAD_OFF = 0x00000000,
+
+ /// Stop OS and remove all power from system, if possible.
+ POWER_OFF = 0x4321FEDC,
+
+ /// Restart system using given command string.
+ RESTART2 = 0xA1B2C3D4,
+
+ /// Suspend system using software suspend if compiled in.
+ SW_SUSPEND = 0xD000FCE2,
+
+ /// Restart system using a previously loaded Linux kernel
+ KEXEC = 0x45584543,
+
+ _,
+ };
+};
+
+pub fn reboot(magic: LINUX_REBOOT.MAGIC1, magic2: LINUX_REBOOT.MAGIC2, cmd: LINUX_REBOOT.CMD, arg: ?*const anyopaque) usize {
+ return std.os.linux.syscall4(
+ .reboot,
+ @enumToInt(magic),
+ @enumToInt(magic2),
+ @enumToInt(cmd),
+ @ptrToInt(arg),
+ );
+}
+
pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
return syscall3(.getrandom, @ptrToInt(buf), count, flags);
}
@@ -3208,6 +3265,21 @@ pub const sockaddr = extern struct {
queue_id: u32,
shared_umem_fd: u32,
};
+
+ /// Address structure for vSockets
+ pub const vm = extern struct {
+ family: sa_family_t = AF.VSOCK,
+ reserved1: u16 = 0,
+ port: u32,
+ cid: u32,
+ flags: u8,
+
+ /// The total size of this structure should be exactly the same as that of struct sockaddr.
+ zero: [3]u8 = [_]u8{0} ** 3,
+ comptime {
+ std.debug.assert(@sizeOf(vm) == @sizeOf(sockaddr));
+ }
+ };
};
pub const mmsghdr = extern struct {
diff --git a/lib/std/target.zig b/lib/std/target.zig
index d791e3b035..342e535c27 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -276,14 +276,14 @@ pub const Target = struct {
.macos => return switch (arch) {
.aarch64 => VersionRange{
.semver = .{
- .min = .{ .major = 11, .minor = 6, .patch = 6 },
- .max = .{ .major = 12, .minor = 4 },
+ .min = .{ .major = 11, .minor = 7, .patch = 1 },
+ .max = .{ .major = 13, .minor = 0 },
},
},
.x86_64 => VersionRange{
.semver = .{
- .min = .{ .major = 10, .minor = 15, .patch = 7 },
- .max = .{ .major = 12, .minor = 4 },
+ .min = .{ .major = 11, .minor = 7, .patch = 1 },
+ .max = .{ .major = 13, .minor = 0 },
},
},
else => unreachable,
@@ -1780,71 +1780,6 @@ pub const Target = struct {
};
}
- pub inline fn longDoubleIs(target: Target, comptime F: type) bool {
- if (target.abi == .msvc or (target.abi == .android and target.cpu.arch == .i386)) {
- return F == f64;
- }
- return switch (F) {
- f128 => switch (target.cpu.arch) {
- .aarch64 => {
- // According to Apple's official guide:
- // > The long double type is a double precision IEEE754 binary floating-point type,
- // > which makes it identical to the double type. This behavior contrasts to the
- // > standard specification, in which a long double is a quad-precision, IEEE754
- // > binary, floating-point type.
- // https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms
- return !target.isDarwin();
- },
-
- .riscv64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .wasm32,
- .wasm64,
- => true,
-
- else => false,
- },
- f80 => switch (target.cpu.arch) {
- .x86_64, .i386 => true,
- else => false,
- },
- f64 => switch (target.cpu.arch) {
- .aarch64 => target.isDarwin(),
-
- .x86_64,
- .i386,
- .riscv64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- => false,
-
- else => true,
- },
- else => false,
- };
- }
-
pub inline fn maxIntAlignment(target: Target) u16 {
return switch (target.cpu.arch) {
.avr => 1,
@@ -1872,7 +1807,7 @@ pub const Target = struct {
=> 8,
.i386 => return switch (target.os.tag) {
- .windows => 8,
+ .windows, .uefi => 8,
else => 4,
},
diff --git a/lib/std/x/net/bpf.zig b/lib/std/x/net/bpf.zig
index e8db9a3e0e..8fd318b03b 100644
--- a/lib/std/x/net/bpf.zig
+++ b/lib/std/x/net/bpf.zig
@@ -691,14 +691,14 @@ test "tcpdump filter" {
);
}
-fn expectPass(data: anytype, filter: []Insn) !void {
+fn expectPass(data: anytype, filter: []const Insn) !void {
try expectEqual(
@as(u32, 0),
try simulate(mem.asBytes(data), filter, .Big),
);
}
-fn expectFail(expected_error: anyerror, data: anytype, filter: []Insn) !void {
+fn expectFail(expected_error: anyerror, data: anytype, filter: []const Insn) !void {
try expectError(
expected_error,
simulate(mem.asBytes(data), filter, native_endian),
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index 6bc664f04c..664cb09ae4 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -40,6 +40,17 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
.Fn => {
return castInt(DestType, @ptrToInt(&target));
},
+ .Bool => {
+ return @boolToInt(target);
+ },
+ else => {},
+ }
+ },
+ .Float => {
+ switch (@typeInfo(SourceType)) {
+ .Int => return @intToFloat(DestType, target),
+ .Float => return @floatCast(DestType, target),
+ .Bool => return @intToFloat(DestType, @boolToInt(target)),
else => {},
}
},
@@ -446,6 +457,121 @@ pub const Macros = struct {
}
};
+/// Integer promotion described in C11 6.3.1.1.2
+fn PromotedIntType(comptime T: type) type {
+ return switch (T) {
+ bool, u8, i8, c_short => c_int,
+ c_ushort => if (@sizeOf(c_ushort) == @sizeOf(c_int)) c_uint else c_int,
+ c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong => T,
+ else => if (T == comptime_int) {
+ @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a fixed-size number type is required");
+ } else if (@typeInfo(T) == .Int) {
+ @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a C ABI type is required");
+ } else {
+ @compileError("Attempted to promote invalid type `" ++ @typeName(T) ++ "`");
+ },
+ };
+}
+
+/// C11 6.3.1.1.1
+fn integerRank(comptime T: type) u8 {
+ return switch (T) {
+ bool => 0,
+ u8, i8 => 1,
+ c_short, c_ushort => 2,
+ c_int, c_uint => 3,
+ c_long, c_ulong => 4,
+ c_longlong, c_ulonglong => 5,
+ else => @compileError("integer rank not supported for `" ++ @typeName(T) ++ "`"),
+ };
+}
+
+fn ToUnsigned(comptime T: type) type {
+ return switch (T) {
+ c_int => c_uint,
+ c_long => c_ulong,
+ c_longlong => c_ulonglong,
+ else => @compileError("Cannot convert `" ++ @typeName(T) ++ "` to unsigned"),
+ };
+}
+
+/// "Usual arithmetic conversions" from C11 standard 6.3.1.8
+fn ArithmeticConversion(comptime A: type, comptime B: type) type {
+ if (A == c_longdouble or B == c_longdouble) return c_longdouble;
+ if (A == f80 or B == f80) return f80;
+ if (A == f64 or B == f64) return f64;
+ if (A == f32 or B == f32) return f32;
+
+ const A_Promoted = PromotedIntType(A);
+ const B_Promoted = PromotedIntType(B);
+ comptime {
+ std.debug.assert(integerRank(A_Promoted) >= integerRank(c_int));
+ std.debug.assert(integerRank(B_Promoted) >= integerRank(c_int));
+ }
+
+ if (A_Promoted == B_Promoted) return A_Promoted;
+
+ const a_signed = @typeInfo(A_Promoted).Int.signedness == .signed;
+ const b_signed = @typeInfo(B_Promoted).Int.signedness == .signed;
+
+ if (a_signed == b_signed) {
+ return if (integerRank(A_Promoted) > integerRank(B_Promoted)) A_Promoted else B_Promoted;
+ }
+
+ const SignedType = if (a_signed) A_Promoted else B_Promoted;
+ const UnsignedType = if (!a_signed) A_Promoted else B_Promoted;
+
+ if (integerRank(UnsignedType) >= integerRank(SignedType)) return UnsignedType;
+
+ if (std.math.maxInt(SignedType) >= std.math.maxInt(UnsignedType)) return SignedType;
+
+ return ToUnsigned(SignedType);
+}
+
+test "ArithmeticConversion" {
+ // Promotions not necessarily the same for other platforms
+ if (builtin.target.cpu.arch != .x86_64 or builtin.target.os.tag != .linux) return error.SkipZigTest;
+
+ const Test = struct {
+ /// Order of operands should not matter for arithmetic conversions
+ fn checkPromotion(comptime A: type, comptime B: type, comptime Expected: type) !void {
+ try std.testing.expect(ArithmeticConversion(A, B) == Expected);
+ try std.testing.expect(ArithmeticConversion(B, A) == Expected);
+ }
+ };
+
+ try Test.checkPromotion(c_longdouble, c_int, c_longdouble);
+ try Test.checkPromotion(c_int, f64, f64);
+ try Test.checkPromotion(f32, bool, f32);
+
+ try Test.checkPromotion(bool, c_short, c_int);
+ try Test.checkPromotion(c_int, c_int, c_int);
+ try Test.checkPromotion(c_short, c_int, c_int);
+
+ try Test.checkPromotion(c_int, c_long, c_long);
+
+ try Test.checkPromotion(c_ulonglong, c_uint, c_ulonglong);
+
+ try Test.checkPromotion(c_uint, c_int, c_uint);
+
+ try Test.checkPromotion(c_uint, c_long, c_long);
+
+ try Test.checkPromotion(c_ulong, c_longlong, c_ulonglong);
+}
+
+pub const MacroArithmetic = struct {
+ pub fn div(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) {
+ const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b));
+ const a_casted = cast(ResType, a);
+ const b_casted = cast(ResType, b);
+ switch (@typeInfo(ResType)) {
+ .Float => return a_casted / b_casted,
+ .Int => return @divTrunc(a_casted, b_casted),
+ else => unreachable,
+ }
+ }
+};
+
test "Macro suffix functions" {
try testing.expect(@TypeOf(Macros.F_SUFFIX(1)) == f32);
diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig
index 7a31dfa44f..c7b3f73f89 100644
--- a/lib/std/zig/system/NativeTargetInfo.zig
+++ b/lib/std/zig/system/NativeTargetInfo.zig
@@ -20,7 +20,6 @@ dynamic_linker: DynamicLinker = DynamicLinker{},
pub const DynamicLinker = Target.DynamicLinker;
pub const DetectError = error{
- OutOfMemory,
FileSystem,
SystemResources,
SymLinkLoop,