aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2021-03-04 18:23:11 -0700
committerAndrew Kelley <andrew@ziglang.org>2021-03-04 18:23:11 -0700
commitbccef441963434b452a623abbb9315fd92c1e255 (patch)
tree4c0ea89b8fa895b3d34749ec089e45438d79466b /lib
parent0c06a1885fad9a9bb85342632a0b7c8a3a0733e9 (diff)
parent041212a41cfaf029dc3eb9740467b721c76f406c (diff)
downloadzig-bccef441963434b452a623abbb9315fd92c1e255.tar.gz
zig-bccef441963434b452a623abbb9315fd92c1e255.zip
Merge remote-tracking branch 'origin/master' into llvm12
Syncing with master branch because I want to re-run update_clang_options.zig in the llvm12 branch.
Diffstat (limited to 'lib')
-rw-r--r--lib/std/bit_set.zig1255
-rw-r--r--lib/std/build/emit_raw.zig6
-rw-r--r--lib/std/c/tokenizer.zig32
-rw-r--r--lib/std/crypto/25519/curve25519.zig4
-rw-r--r--lib/std/crypto/25519/ed25519.zig6
-rw-r--r--lib/std/crypto/25519/edwards25519.zig2
-rw-r--r--lib/std/crypto/25519/ristretto255.zig8
-rw-r--r--lib/std/crypto/25519/scalar.zig6
-rw-r--r--lib/std/crypto/chacha20.zig4
-rw-r--r--lib/std/debug.zig20
-rw-r--r--lib/std/elf.zig355
-rw-r--r--lib/std/fmt.zig130
-rw-r--r--lib/std/fs/get_app_data_dir.zig2
-rw-r--r--lib/std/io/writer.zig7
-rw-r--r--lib/std/math.zig56
-rw-r--r--lib/std/mem.zig8
-rw-r--r--lib/std/multi_array_list.zig2
-rw-r--r--lib/std/os/bits/haiku.zig13
-rw-r--r--lib/std/os/bits/linux.zig5
-rw-r--r--lib/std/os/linux/io_uring.zig10
-rw-r--r--lib/std/std.zig4
-rw-r--r--lib/std/zig/parse.zig32
-rw-r--r--lib/std/zig/parser_test.zig31
-rw-r--r--lib/std/zig/render.zig28
24 files changed, 1750 insertions, 276 deletions
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
new file mode 100644
index 0000000000..29ad0d7963
--- /dev/null
+++ b/lib/std/bit_set.zig
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2021 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
+//! This file defines several variants of bit sets. A bit set
+//! is a densely stored set of integers with a known maximum,
+//! in which each integer gets a single bit. Bit sets have very
+//! fast presence checks, update operations, and union and intersection
+//! operations. However, if the number of possible items is very
+//! large and the number of actual items in a given set is usually
+//! small, they may be less memory efficient than an array set.
+//!
+//! There are five variants defined here:
+//!
+//! IntegerBitSet:
+//! A bit set with static size, which is backed by a single integer.
+//! This set is good for sets with a small size, but may generate
+//! inefficient code for larger sets, especially in debug mode.
+//!
+//! ArrayBitSet:
+//! A bit set with static size, which is backed by an array of usize.
+//! This set is good for sets with a larger size, but may use
+//! more bytes than necessary if your set is small.
+//!
+//! StaticBitSet:
+//! Picks either IntegerBitSet or ArrayBitSet depending on the requested
+//! size. The interfaces of these two types match exactly, except for fields.
+//!
+//! DynamicBitSet:
+//! A bit set with runtime known size, backed by an allocated slice
+//! of usize.
+//!
+//! DynamicBitSetUnmanaged:
+//! A variant of DynamicBitSet which does not store a pointer to its
+//! allocator, in order to save space.
+
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+/// Returns the optimal static bit set type for the specified number
+/// of elements. The returned type will perform no allocations,
+/// can be copied by value, and does not require deinitialization.
+/// Both possible implementations fulfill the same interface.
+pub fn StaticBitSet(comptime size: usize) type {
+ if (size <= @bitSizeOf(usize)) {
+ return IntegerBitSet(size);
+ } else {
+ return ArrayBitSet(usize, size);
+ }
+}
+
+/// A bit set with static size, which is backed by a single integer.
+/// This set is good for sets with a small size, but may generate
+/// inefficient code for larger sets, especially in debug mode.
+pub fn IntegerBitSet(comptime size: u16) type {
+ return struct {
+ const Self = @This();
+
+ // TODO: Make this a comptime field once those are fixed
+ /// The number of items in this bit set
+ pub const bit_length: usize = size;
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = std.meta.Int(.unsigned, size);
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The bit mask, as a single integer
+ mask: MaskInt,
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty() Self {
+ return .{ .mask = 0 };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull() Self {
+ return .{ .mask = ~@as(MaskInt, 0) };
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < bit_length);
+ return (self.mask & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ return @popCount(MaskInt, self.mask);
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < bit_length);
+ if (MaskInt == u0) return;
+ const bit = maskBit(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.mask = (self.mask & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ self.mask |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ // Workaround for #7953
+ if (MaskInt == u0) return;
+ self.mask &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ self.mask ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ self.mask ^= toggles.mask;
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ self.mask = ~self.mask;
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ pub fn setUnion(self: *Self, other: Self) void {
+ self.mask |= other.mask;
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ self.mask &= other.mask;
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ const mask = self.mask;
+ if (mask == 0) return null;
+ return @ctz(MaskInt, mask);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ const mask = self.mask;
+ if (mask == 0) return null;
+ const index = @ctz(MaskInt, mask);
+ self.mask = mask & (mask - 1);
+ return index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options.direction) {
+ return .{
+ .bits_remain = switch (options.kind) {
+ .set => self.mask,
+ .unset => ~self.mask,
+ },
+ };
+ }
+
+ fn Iterator(comptime direction: IteratorOptions.Direction) type {
+ return struct {
+ const IterSelf = @This();
+ // all bits which have not yet been iterated over
+ bits_remain: MaskInt,
+
+ /// Returns the index of the next unvisited set bit
+ /// in the bit set, in ascending order.
+ pub fn next(self: *IterSelf) ?usize {
+ if (self.bits_remain == 0) return null;
+
+ switch (direction) {
+ .forward => {
+ const next_index = @ctz(MaskInt, self.bits_remain);
+ self.bits_remain &= self.bits_remain - 1;
+ return next_index;
+ },
+ .reverse => {
+ const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
+ self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ return top_bit;
+ },
+ }
+ }
+ };
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ if (MaskInt == u0) return 0;
+ return @as(MaskInt, 1) << @intCast(ShiftInt, index);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ if (MaskInt == u0) return 0;
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ };
+}
+
+/// A bit set with static size, which is backed by an array of usize.
+/// This set is good for sets with a larger size, but may use
+/// more bytes than necessary if your set is small.
+pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
+ const mask_info: std.builtin.TypeInfo = @typeInfo(MaskIntType);
+
+ // Make sure the mask int is indeed an int
+ if (mask_info != .Int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType));
+
+ // It must also be unsigned.
+ if (mask_info.Int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType));
+
+ // And it must not be empty.
+ if (MaskIntType == u0)
+ @compileError("ArrayBitSet requires a sized integer for its mask int. u0 does not work.");
+
+ const byte_size = std.mem.byte_size_in_bits;
+
+ // We use shift and truncate to decompose indices into mask indices and bit indices.
+ // This operation requires that the mask has an exact power of two number of bits.
+ if (!std.math.isPowerOfTwo(@bitSizeOf(MaskIntType))) {
+ var desired_bits = std.math.ceilPowerOfTwoAssert(usize, @bitSizeOf(MaskIntType));
+ if (desired_bits < byte_size) desired_bits = byte_size;
+ const FixedMaskType = std.meta.Int(.unsigned, desired_bits);
+ @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++
+ ", which is not a power of two. Please round this up to a power of two integer size (i.e. " ++ @typeName(FixedMaskType) ++ ").");
+ }
+
+ // Make sure the integer has no padding bits.
+ // Those would be wasteful here and are probably a mistake by the user.
+ // This case may be hit with small powers of two, like u4.
+ if (@bitSizeOf(MaskIntType) != @sizeOf(MaskIntType) * byte_size) {
+ var desired_bits = @sizeOf(MaskIntType) * byte_size;
+ desired_bits = std.math.ceilPowerOfTwoAssert(usize, desired_bits);
+ const FixedMaskType = std.meta.Int(.unsigned, desired_bits);
+ @compileError("ArrayBitSet was passed integer type " ++ @typeName(MaskIntType) ++
+ ", which contains padding bits. Please round this up to an unpadded integer size (i.e. " ++ @typeName(FixedMaskType) ++ ").");
+ }
+
+ return struct {
+ const Self = @This();
+
+ // TODO: Make this a comptime field once those are fixed
+ /// The number of items in this bit set
+ pub const bit_length: usize = size;
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = MaskIntType;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ // bits in one mask
+ const mask_len = @bitSizeOf(MaskInt);
+ // total number of masks
+ const num_masks = (size + mask_len - 1) / mask_len;
+ // padding bits in the last mask (may be 0)
+ const last_pad_bits = mask_len * num_masks - size;
+ // Mask of valid bits in the last mask.
+ // All functions will ensure that the invalid
+ // bits in the last mask are zero.
+ pub const last_item_mask = ~@as(MaskInt, 0) >> last_pad_bits;
+
+ /// The bit masks, ordered with lower indices first.
+ /// Padding bits at the end are undefined.
+ masks: [num_masks]MaskInt,
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty() Self {
+ return .{ .masks = [_]MaskInt{0} ** num_masks };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull() Self {
+ if (num_masks == 0) {
+ return .{ .masks = .{} };
+ } else {
+ return .{ .masks = [_]MaskInt{~@as(MaskInt, 0)} ** (num_masks - 1) ++ [_]MaskInt{last_item_mask} };
+ }
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < bit_length);
+ if (num_masks == 0) return false; // doesn't compile in this case
+ return (self.masks[maskIndex(index)] & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ var total: usize = 0;
+ for (self.masks) |mask| {
+ total += @popCount(MaskInt, mask);
+ }
+ return total;
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ const bit = maskBit(index);
+ const mask_index = maskIndex(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < bit_length);
+ if (num_masks == 0) return; // doesn't compile in this case
+ self.masks[maskIndex(index)] ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* ^= toggles.masks[i];
+ }
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* = ~mask.*;
+ }
+
+ // Zero the padding bits
+ if (num_masks > 0) {
+ self.masks[num_masks - 1] &= last_item_mask;
+ }
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ pub fn setUnion(self: *Self, other: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* |= other.masks[i];
+ }
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ for (self.masks) |*mask, i| {
+ mask.* &= other.masks[i];
+ }
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ var offset: usize = 0;
+ const mask = for (self.masks) |mask| {
+ if (mask != 0) break mask;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ return offset + @ctz(MaskInt, mask);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ var offset: usize = 0;
+ const mask = for (self.masks) |*mask| {
+ if (mask.* != 0) break mask;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ const index = @ctz(MaskInt, mask.*);
+ mask.* &= (mask.* - 1);
+ return offset + index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ return BitSetIterator(MaskInt, options).init(&self.masks, last_item_mask);
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ }
+ fn maskIndex(index: usize) usize {
+ return index >> @bitSizeOf(ShiftInt);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ };
+}
+
+/// A bit set with runtime known size, backed by an allocated slice
+/// of usize. The allocator must be tracked externally by the user.
+pub const DynamicBitSetUnmanaged = struct {
+ const Self = @This();
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = usize;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The number of valid items in this bit set
+ bit_length: usize = 0,
+
+ /// The bit masks, ordered with lower indices first.
+ /// Padding bits at the end must be zeroed.
+ masks: [*]MaskInt = empty_masks_ptr,
+ // This pointer is one usize after the actual allocation.
+ // That slot holds the size of the true allocation, which
+ // is needed by Zig's allocator interface in case a shrink
+ // fails.
+
+ // Don't modify this value. Ideally it would go in const data so
+ // modifications would cause a bus error, but the only way
+ // to discard a const qualifier is through ptrToInt, which
+ // cannot currently round trip at comptime.
+ var empty_masks_data = [_]MaskInt{ 0, undefined };
+ const empty_masks_ptr = empty_masks_data[1..2];
+
+ /// Creates a bit set with no elements present.
+ /// If bit_length is not zero, deinit must eventually be called.
+ pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ var self = Self{};
+ try self.resize(bit_length, false, allocator);
+ return self;
+ }
+
+ /// Creates a bit set with all elements present.
+ /// If bit_length is not zero, deinit must eventually be called.
+ pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ var self = Self{};
+ try self.resize(bit_length, true, allocator);
+ return self;
+ }
+
+ /// Resizes to a new bit_length. If the new length is larger
+ /// than the old length, fills any added bits with `fill`.
+ /// If new_len is not zero, deinit must eventually be called.
+ pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
+ const old_len = self.bit_length;
+
+ const old_masks = numMasks(old_len);
+ const new_masks = numMasks(new_len);
+
+ const old_allocation = (self.masks - 1)[0..(self.masks - 1)[0]];
+
+ if (new_masks == 0) {
+ assert(new_len == 0);
+ allocator.free(old_allocation);
+ self.masks = empty_masks_ptr;
+ self.bit_length = 0;
+ return;
+ }
+
+ if (old_allocation.len != new_masks + 1) realloc: {
+ // If realloc fails, it may mean one of two things.
+ // If we are growing, it means we are out of memory.
+ // If we are shrinking, it means the allocator doesn't
+ // want to move the allocation. This means we need to
+ // hold on to the extra 8 bytes required to be able to free
+ // this allocation properly.
+ const new_allocation = allocator.realloc(old_allocation, new_masks + 1) catch |err| {
+ if (new_masks + 1 > old_allocation.len) return err;
+ break :realloc;
+ };
+
+ new_allocation[0] = new_allocation.len;
+ self.masks = new_allocation.ptr + 1;
+ }
+
+ // If we increased in size, we need to set any new bits
+ // to the fill value.
+ if (new_len > old_len) {
+ // set the padding bits in the old last item to 1
+ if (fill and old_masks > 0) {
+ const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len;
+ const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits);
+ self.masks[old_masks - 1] |= ~old_mask;
+ }
+
+ // fill in any new masks
+ if (new_masks > old_masks) {
+ const fill_value = std.math.boolMask(MaskInt, fill);
+ std.mem.set(MaskInt, self.masks[old_masks..new_masks], fill_value);
+ }
+ }
+
+ // Zero out the padding bits
+ if (new_len > 0) {
+ const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ self.masks[new_masks - 1] &= last_item_mask;
+ }
+
+ // And finally, save the new length.
+ self.bit_length = new_len;
+ }
+
+ /// deinitializes the array and releases its memory.
+ /// The passed allocator must be the same one used for
+ /// init* or resize in the past.
+ pub fn deinit(self: *Self, allocator: *Allocator) void {
+ self.resize(0, false, allocator) catch unreachable;
+ }
+
+ /// Creates a duplicate of this bit set, using the new allocator.
+ pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ const num_masks = numMasks(self.bit_length);
+ var copy = Self{};
+ try copy.resize(self.bit_length, false, new_allocator);
+ std.mem.copy(MaskInt, copy.masks[0..num_masks], self.masks[0..num_masks]);
+ return copy;
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return self.bit_length;
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ assert(index < self.bit_length);
+ return (self.masks[maskIndex(index)] & maskBit(index)) != 0;
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ const num_masks = (self.bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
+ var total: usize = 0;
+ for (self.masks[0..num_masks]) |mask| {
+ // Note: This is where we depend on padding bits being zero
+ total += @popCount(MaskInt, mask);
+ }
+ return total;
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ assert(index < self.bit_length);
+ const bit = maskBit(index);
+ const mask_index = maskIndex(index);
+ const new_bit = bit & std.math.boolMask(MaskInt, value);
+ self.masks[mask_index] = (self.masks[mask_index] & ~bit) | new_bit;
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] |= maskBit(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] &= ~maskBit(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ assert(index < self.bit_length);
+ self.masks[maskIndex(index)] ^= maskBit(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set. Both sets must have the
+ /// same bit_length.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ assert(toggles.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* ^= toggles.masks[i];
+ }
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ const bit_length = self.bit_length;
+ // avoid underflow if bit_length is zero
+ if (bit_length == 0) return;
+
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* = ~mask.*;
+ }
+
+ const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ self.masks[num_masks - 1] &= last_item_mask;
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ /// The two sets must both be the same bit_length.
+ pub fn setUnion(self: *Self, other: Self) void {
+ assert(other.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* |= other.masks[i];
+ }
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ /// The two sets must both be the same bit_length.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ assert(other.bit_length == self.bit_length);
+ const num_masks = numMasks(self.bit_length);
+ for (self.masks[0..num_masks]) |*mask, i| {
+ mask.* &= other.masks[i];
+ }
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ var offset: usize = 0;
+ var mask = self.masks;
+ while (offset < self.bit_length) {
+ if (mask[0] != 0) break;
+ mask += 1;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ return offset + @ctz(MaskInt, mask[0]);
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ var offset: usize = 0;
+ var mask = self.masks;
+ while (offset < self.bit_length) {
+ if (mask[0] != 0) break;
+ mask += 1;
+ offset += @bitSizeOf(MaskInt);
+ } else return null;
+ const index = @ctz(MaskInt, mask[0]);
+ mask[0] &= (mask[0] - 1);
+ return offset + index;
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator. Resizing the underlying
+ /// bit set invalidates the iterator.
+ pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ const num_masks = numMasks(self.bit_length);
+ const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length;
+ const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+ return BitSetIterator(MaskInt, options).init(self.masks[0..num_masks], last_item_mask);
+ }
+
+ fn maskBit(index: usize) MaskInt {
+ return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+ }
+ fn maskIndex(index: usize) usize {
+ return index >> @bitSizeOf(ShiftInt);
+ }
+ fn boolMaskBit(index: usize, value: bool) MaskInt {
+ return @as(MaskInt, @boolToInt(value)) << @intCast(ShiftInt, index);
+ }
+ fn numMasks(bit_length: usize) usize {
+ return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
+ }
+};
+
+/// A bit set with runtime known size, backed by an allocated slice
+/// of usize. Thin wrapper around DynamicBitSetUnmanaged which keeps
+/// track of the allocator instance.
+pub const DynamicBitSet = struct {
+ const Self = @This();
+
+ /// The integer type used to represent a mask in this bit set
+ pub const MaskInt = usize;
+
+ /// The integer type used to shift a mask in this bit set
+ pub const ShiftInt = std.math.Log2Int(MaskInt);
+
+ /// The allocator used by this bit set
+ allocator: *Allocator,
+
+ /// The number of valid items in this bit set
+ unmanaged: DynamicBitSetUnmanaged = .{},
+
+ /// Creates a bit set with no elements present.
+ pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
+ .allocator = allocator,
+ };
+ }
+
+ /// Creates a bit set with all elements present.
+ pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
+ .allocator = allocator,
+ };
+ }
+
+ /// Resizes to a new length. If the new length is larger
+ /// than the old length, fills any added bits with `fill`.
+ pub fn resize(self: *@This(), new_len: usize, fill: bool) !void {
+ try self.unmanaged.resize(new_len, fill, self.allocator);
+ }
+
+ /// deinitializes the array and releases its memory.
+ /// The passed allocator must be the same one used for
+ /// init* or resize in the past.
+ pub fn deinit(self: *Self) void {
+ self.unmanaged.deinit(self.allocator);
+ }
+
+ /// Creates a duplicate of this bit set, using the new allocator.
+ pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ return Self{
+ .unmanaged = try self.unmanaged.clone(new_allocator),
+ .allocator = new_allocator,
+ };
+ }
+
+ /// Returns the number of bits in this bit set
+ pub fn capacity(self: Self) callconv(.Inline) usize {
+ return self.unmanaged.capacity();
+ }
+
+ /// Returns true if the bit at the specified index
+ /// is present in the set, false otherwise.
+ pub fn isSet(self: Self, index: usize) bool {
+ return self.unmanaged.isSet(index);
+ }
+
+ /// Returns the total number of set bits in this bit set.
+ pub fn count(self: Self) usize {
+ return self.unmanaged.count();
+ }
+
+ /// Changes the value of the specified bit of the bit
+ /// set to match the passed boolean.
+ pub fn setValue(self: *Self, index: usize, value: bool) void {
+ self.unmanaged.setValue(index, value);
+ }
+
+ /// Adds a specific bit to the bit set
+ pub fn set(self: *Self, index: usize) void {
+ self.unmanaged.set(index);
+ }
+
+ /// Removes a specific bit from the bit set
+ pub fn unset(self: *Self, index: usize) void {
+ self.unmanaged.unset(index);
+ }
+
+ /// Flips a specific bit in the bit set
+ pub fn toggle(self: *Self, index: usize) void {
+ self.unmanaged.toggle(index);
+ }
+
+ /// Flips all bits in this bit set which are present
+ /// in the toggles bit set. Both sets must have the
+ /// same bit_length.
+ pub fn toggleSet(self: *Self, toggles: Self) void {
+ self.unmanaged.toggleSet(toggles.unmanaged);
+ }
+
+ /// Flips every bit in the bit set.
+ pub fn toggleAll(self: *Self) void {
+ self.unmanaged.toggleAll();
+ }
+
+ /// Performs a union of two bit sets, and stores the
+ /// result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in either input.
+ /// The two sets must both be the same bit_length.
+ pub fn setUnion(self: *Self, other: Self) void {
+ self.unmanaged.setUnion(other.unmanaged);
+ }
+
+ /// Performs an intersection of two bit sets, and stores
+ /// the result in the first one. Bits in the result are
+ /// set if the corresponding bits were set in both inputs.
+ /// The two sets must both be the same bit_length.
+ pub fn setIntersection(self: *Self, other: Self) void {
+ self.unmanaged.setIntersection(other.unmanaged);
+ }
+
+ /// Finds the index of the first set bit.
+ /// If no bits are set, returns null.
+ pub fn findFirstSet(self: Self) ?usize {
+ return self.unmanaged.findFirstSet();
+ }
+
+ /// Finds the index of the first set bit, and unsets it.
+ /// If no bits are set, returns null.
+ pub fn toggleFirstSet(self: *Self) ?usize {
+ return self.unmanaged.toggleFirstSet();
+ }
+
+ /// Iterates through the items in the set, according to the options.
+ /// The default options (.{}) will iterate indices of set bits in
+ /// ascending order. Modifications to the underlying bit set may
+ /// or may not be observed by the iterator. Resizing the underlying
+ /// bit set invalidates the iterator.
+ pub fn iterator(self: *Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
+ return self.unmanaged.iterator(options);
+ }
+};
+
+/// Options for configuring an iterator over a bit set
+pub const IteratorOptions = struct {
+ /// determines which bits should be visited
+ kind: Type = .set,
+ /// determines the order in which bit indices should be visited
+ direction: Direction = .forward,
+
+ pub const Type = enum {
+ /// visit indexes of set bits
+ set,
+ /// visit indexes of unset bits
+ unset,
+ };
+
+ pub const Direction = enum {
+ /// visit indices in ascending order
+ forward,
+ /// visit indices in descending order.
+ /// Note that this may be slightly more expensive than forward iteration.
+ reverse,
+ };
+};
+
+// The iterator is reusable between several bit set types
+fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) type {
+ const ShiftInt = std.math.Log2Int(MaskInt);
+ const kind = options.kind;
+ const direction = options.direction;
+ return struct {
+ const Self = @This();
+
+ // all bits which have not yet been iterated over
+ bits_remain: MaskInt,
+ // all words which have not yet been iterated over
+ words_remain: []const MaskInt,
+ // the offset of the current word
+ bit_offset: usize,
+ // the mask of the last word
+ last_word_mask: MaskInt,
+
+ fn init(masks: []const MaskInt, last_word_mask: MaskInt) Self {
+ if (masks.len == 0) {
+ return Self{
+ .bits_remain = 0,
+ .words_remain = &[_]MaskInt{},
+ .last_word_mask = last_word_mask,
+ .bit_offset = 0,
+ };
+ } else {
+ var result = Self{
+ .bits_remain = 0,
+ .words_remain = masks,
+ .last_word_mask = last_word_mask,
+ .bit_offset = if (direction == .forward) 0 else (masks.len - 1) * @bitSizeOf(MaskInt),
+ };
+ result.nextWord(true);
+ return result;
+ }
+ }
+
+ /// Returns the index of the next unvisited set bit
+ /// in the bit set, in ascending order.
+ pub fn next(self: *Self) ?usize {
+ while (self.bits_remain == 0) {
+ if (self.words_remain.len == 0) return null;
+ self.nextWord(false);
+ switch (direction) {
+ .forward => self.bit_offset += @bitSizeOf(MaskInt),
+ .reverse => self.bit_offset -= @bitSizeOf(MaskInt),
+ }
+ }
+
+ switch (direction) {
+ .forward => {
+ const next_index = @ctz(MaskInt, self.bits_remain) + self.bit_offset;
+ self.bits_remain &= self.bits_remain - 1;
+ return next_index;
+ },
+ .reverse => {
+ const leading_zeroes = @clz(MaskInt, self.bits_remain);
+ const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
+ const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+ self.bits_remain &= no_top_bit_mask;
+ return top_bit + self.bit_offset;
+ },
+ }
+ }
+
+ // Load the next word. Don't call this if there
+ // isn't a next word. If the next word is the
+ // last word, mask off the padding bits so we
+ // don't visit them.
+ fn nextWord(self: *Self, comptime is_first_word: bool) callconv(.Inline) void {
+ var word = switch (direction) {
+ .forward => self.words_remain[0],
+ .reverse => self.words_remain[self.words_remain.len - 1],
+ };
+ switch (kind) {
+ .set => {},
+ .unset => {
+ word = ~word;
+ if ((direction == .reverse and is_first_word) or
+ (direction == .forward and self.words_remain.len == 1))
+ {
+ word &= self.last_word_mask;
+ }
+ },
+ }
+ switch (direction) {
+ .forward => self.words_remain = self.words_remain[1..],
+ .reverse => self.words_remain.len -= 1,
+ }
+ self.bits_remain = word;
+ }
+ };
+}
+
+// ---------------- Tests -----------------
+
+const testing = std.testing;
+
+fn testBitSet(a: anytype, b: anytype, len: usize) void {
+ testing.expectEqual(len, a.capacity());
+ testing.expectEqual(len, b.capacity());
+
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ a.setValue(i, i & 1 == 0);
+ b.setValue(i, i & 2 == 0);
+ }
+ }
+
+ testing.expectEqual((len + 1) / 2, a.count());
+ testing.expectEqual((len + 3) / 4 + (len + 2) / 4, b.count());
+
+ {
+ var iter = a.iterator(.{});
+ var i: usize = 0;
+ while (i < len) : (i += 2) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+ a.toggleAll();
+ {
+ var iter = a.iterator(.{});
+ var i: usize = 1;
+ while (i < len) : (i += 2) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+
+ {
+ var iter = b.iterator(.{ .kind = .unset });
+ var i: usize = 2;
+ while (i < len) : (i += 4) {
+ testing.expectEqual(@as(?usize, i), iter.next());
+ if (i + 1 < len) {
+ testing.expectEqual(@as(?usize, i + 1), iter.next());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ }
+
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+ }
+
+ a.setUnion(b.*);
+ {
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0 or i & 2 == 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+
+ i = len;
+ var set = a.iterator(.{ .direction = .reverse });
+ var unset = a.iterator(.{ .kind = .unset, .direction = .reverse });
+ while (i > 0) {
+ i -= 1;
+ if (i & 1 != 0 or i & 2 == 0) {
+ testing.expectEqual(@as(?usize, i), set.next());
+ } else {
+ testing.expectEqual(@as(?usize, i), unset.next());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), set.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ testing.expectEqual(@as(?usize, null), unset.next());
+ }
+
+ a.toggleSet(b.*);
+ {
+ testing.expectEqual(len / 4, a.count());
+
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 != 0 and i & 2 != 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ if (i & 1 == 0) {
+ a.set(i);
+ } else {
+ a.unset(i);
+ }
+ }
+ }
+
+ a.setIntersection(b.*);
+ {
+ testing.expectEqual((len + 3) / 4, a.count());
+
+ var i: usize = 0;
+ while (i < len) : (i += 1) {
+ testing.expectEqual(i & 1 == 0 and i & 2 == 0, a.isSet(i));
+ testing.expectEqual(i & 2 == 0, b.isSet(i));
+ }
+ }
+
+ a.toggleSet(a.*);
+ {
+ var iter = a.iterator(.{});
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ {
+ var iter = a.iterator(.{ .direction = .reverse });
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(?usize, null), iter.next());
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+
+ const test_bits = [_]usize{
+ 0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 22, 31, 32, 63, 64,
+ 66, 95, 127, 160, 192, 1000,
+ };
+ for (test_bits) |i| {
+ if (i < a.capacity()) {
+ a.set(i);
+ }
+ }
+
+ for (test_bits) |i| {
+ if (i < a.capacity()) {
+ testing.expectEqual(@as(?usize, i), a.findFirstSet());
+ testing.expectEqual(@as(?usize, i), a.toggleFirstSet());
+ }
+ }
+ testing.expectEqual(@as(?usize, null), a.findFirstSet());
+ testing.expectEqual(@as(?usize, null), a.toggleFirstSet());
+ testing.expectEqual(@as(?usize, null), a.findFirstSet());
+ testing.expectEqual(@as(?usize, null), a.toggleFirstSet());
+ testing.expectEqual(@as(usize, 0), a.count());
+}
+
+fn testStaticBitSet(comptime Set: type) void {
+ var a = Set.initEmpty();
+ var b = Set.initFull();
+ testing.expectEqual(@as(usize, 0), a.count());
+ testing.expectEqual(@as(usize, Set.bit_length), b.count());
+
+ testBitSet(&a, &b, Set.bit_length);
+}
+
+test "IntegerBitSet" {
+ testStaticBitSet(IntegerBitSet(0));
+ testStaticBitSet(IntegerBitSet(1));
+ testStaticBitSet(IntegerBitSet(2));
+ testStaticBitSet(IntegerBitSet(5));
+ testStaticBitSet(IntegerBitSet(8));
+ testStaticBitSet(IntegerBitSet(32));
+ testStaticBitSet(IntegerBitSet(64));
+ testStaticBitSet(IntegerBitSet(127));
+}
+
+test "ArrayBitSet" {
+ inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| {
+ testStaticBitSet(ArrayBitSet(u8, size));
+ testStaticBitSet(ArrayBitSet(u16, size));
+ testStaticBitSet(ArrayBitSet(u32, size));
+ testStaticBitSet(ArrayBitSet(u64, size));
+ testStaticBitSet(ArrayBitSet(u128, size));
+ }
+}
+
+test "DynamicBitSetUnmanaged" {
+ const allocator = std.testing.allocator;
+ var a = try DynamicBitSetUnmanaged.initEmpty(300, allocator);
+ testing.expectEqual(@as(usize, 0), a.count());
+ a.deinit(allocator);
+
+ a = try DynamicBitSetUnmanaged.initEmpty(0, allocator);
+ defer a.deinit(allocator);
+ for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| {
+ const old_len = a.capacity();
+
+ var tmp = try a.clone(allocator);
+ defer tmp.deinit(allocator);
+ testing.expectEqual(old_len, tmp.capacity());
+ var i: usize = 0;
+ while (i < old_len) : (i += 1) {
+ testing.expectEqual(a.isSet(i), tmp.isSet(i));
+ }
+
+ a.toggleSet(a); // zero a
+ tmp.toggleSet(tmp);
+
+ try a.resize(size, true, allocator);
+ try tmp.resize(size, false, allocator);
+
+ if (size > old_len) {
+ testing.expectEqual(size - old_len, a.count());
+ } else {
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ testing.expectEqual(@as(usize, 0), tmp.count());
+
+ var b = try DynamicBitSetUnmanaged.initFull(size, allocator);
+ defer b.deinit(allocator);
+ testing.expectEqual(@as(usize, size), b.count());
+
+ testBitSet(&a, &b, size);
+ }
+}
+
+test "DynamicBitSet" {
+ const allocator = std.testing.allocator;
+ var a = try DynamicBitSet.initEmpty(300, allocator);
+ testing.expectEqual(@as(usize, 0), a.count());
+ a.deinit();
+
+ a = try DynamicBitSet.initEmpty(0, allocator);
+ defer a.deinit();
+ for ([_]usize{ 1, 2, 31, 32, 33, 0, 65, 64, 63, 500, 254, 3000 }) |size| {
+ const old_len = a.capacity();
+
+ var tmp = try a.clone(allocator);
+ defer tmp.deinit();
+ testing.expectEqual(old_len, tmp.capacity());
+ var i: usize = 0;
+ while (i < old_len) : (i += 1) {
+ testing.expectEqual(a.isSet(i), tmp.isSet(i));
+ }
+
+ a.toggleSet(a); // zero a
+ tmp.toggleSet(tmp); // zero tmp
+
+ try a.resize(size, true);
+ try tmp.resize(size, false);
+
+ if (size > old_len) {
+ testing.expectEqual(size - old_len, a.count());
+ } else {
+ testing.expectEqual(@as(usize, 0), a.count());
+ }
+ testing.expectEqual(@as(usize, 0), tmp.count());
+
+ var b = try DynamicBitSet.initFull(size, allocator);
+ defer b.deinit();
+ testing.expectEqual(@as(usize, size), b.count());
+
+ testBitSet(&a, &b, size);
+ }
+}
+
+test "StaticBitSet" {
+ testing.expectEqual(IntegerBitSet(0), StaticBitSet(0));
+ testing.expectEqual(IntegerBitSet(5), StaticBitSet(5));
+ testing.expectEqual(IntegerBitSet(@bitSizeOf(usize)), StaticBitSet(@bitSizeOf(usize)));
+ testing.expectEqual(ArrayBitSet(usize, @bitSizeOf(usize) + 1), StaticBitSet(@bitSizeOf(usize) + 1));
+ testing.expectEqual(ArrayBitSet(usize, 500), StaticBitSet(500));
+}
diff --git a/lib/std/build/emit_raw.zig b/lib/std/build/emit_raw.zig
index 721b38b7a2..0932e117fe 100644
--- a/lib/std/build/emit_raw.zig
+++ b/lib/std/build/emit_raw.zig
@@ -51,9 +51,9 @@ const BinaryElfOutput = struct {
.segments = ArrayList(*BinaryElfSegment).init(allocator),
.sections = ArrayList(*BinaryElfSection).init(allocator),
};
- const elf_hdr = try std.elf.readHeader(elf_file);
+ const elf_hdr = try std.elf.Header.read(&elf_file);
- var section_headers = elf_hdr.section_header_iterator(elf_file);
+ var section_headers = elf_hdr.section_header_iterator(&elf_file);
while (try section_headers.next()) |section| {
if (sectionValidForOutput(section)) {
const newSection = try allocator.create(BinaryElfSection);
@@ -67,7 +67,7 @@ const BinaryElfOutput = struct {
}
}
- var program_headers = elf_hdr.program_header_iterator(elf_file);
+ var program_headers = elf_hdr.program_header_iterator(&elf_file);
while (try program_headers.next()) |phdr| {
if (phdr.p_type == elf.PT_LOAD) {
const newSegment = try allocator.create(BinaryElfSegment);
diff --git a/lib/std/c/tokenizer.zig b/lib/std/c/tokenizer.zig
index 2e1969e269..4399d3dc6c 100644
--- a/lib/std/c/tokenizer.zig
+++ b/lib/std/c/tokenizer.zig
@@ -401,7 +401,9 @@ pub const Tokenizer = struct {
Zero,
IntegerLiteralOct,
IntegerLiteralBinary,
+ IntegerLiteralBinaryFirst,
IntegerLiteralHex,
+ IntegerLiteralHexFirst,
IntegerLiteral,
IntegerSuffix,
IntegerSuffixU,
@@ -1046,10 +1048,10 @@ pub const Tokenizer = struct {
state = .IntegerLiteralOct;
},
'b', 'B' => {
- state = .IntegerLiteralBinary;
+ state = .IntegerLiteralBinaryFirst;
},
'x', 'X' => {
- state = .IntegerLiteralHex;
+ state = .IntegerLiteralHexFirst;
},
'.' => {
state = .FloatFraction;
@@ -1066,6 +1068,13 @@ pub const Tokenizer = struct {
self.index -= 1;
},
},
+ .IntegerLiteralBinaryFirst => switch (c) {
+ '0'...'7' => state = .IntegerLiteralBinary,
+ else => {
+ result.id = .Invalid;
+ break;
+ },
+ },
.IntegerLiteralBinary => switch (c) {
'0', '1' => {},
else => {
@@ -1073,6 +1082,19 @@ pub const Tokenizer = struct {
self.index -= 1;
},
},
+ .IntegerLiteralHexFirst => switch (c) {
+ '0'...'9', 'a'...'f', 'A'...'F' => state = .IntegerLiteralHex,
+ '.' => {
+ state = .FloatFractionHex;
+ },
+ 'p', 'P' => {
+ state = .FloatExponent;
+ },
+ else => {
+ result.id = .Invalid;
+ break;
+ },
+ },
.IntegerLiteralHex => switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => {},
'.' => {
@@ -1238,6 +1260,8 @@ pub const Tokenizer = struct {
.MultiLineCommentAsterisk,
.FloatExponent,
.MacroString,
+ .IntegerLiteralBinaryFirst,
+ .IntegerLiteralHexFirst,
=> result.id = .Invalid,
.FloatExponentDigits => result.id = if (counter == 0) .Invalid else .{ .FloatLiteral = .none },
@@ -1523,6 +1547,7 @@ test "num suffixes" {
\\ 1.0f 1.0L 1.0 .0 1.
\\ 0l 0lu 0ll 0llu 0
\\ 1u 1ul 1ull 1
+ \\ 0x 0b
\\
, &[_]Token.Id{
.{ .FloatLiteral = .f },
@@ -1542,6 +1567,9 @@ test "num suffixes" {
.{ .IntegerLiteral = .llu },
.{ .IntegerLiteral = .none },
.Nl,
+ .Invalid,
+ .Invalid,
+ .Nl,
});
}
diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig
index 765ffa1629..e01b024360 100644
--- a/lib/std/crypto/25519/curve25519.zig
+++ b/lib/std/crypto/25519/curve25519.zig
@@ -115,9 +115,9 @@ test "curve25519" {
const p = try Curve25519.basePoint.clampedMul(s);
try p.rejectIdentity();
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E6F2A4D1C28EE5C7AD0329268255A468AD407D2672824C0C0EB30EA6EF450145");
const q = try p.clampedMul(s);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "3614E119FFE55EC55B87D6B19971A9F4CBC78EFE80BEC55B96392BABCC712537");
try Curve25519.rejectNonCanonical(s);
s[31] |= 0x80;
diff --git a/lib/std/crypto/25519/ed25519.zig b/lib/std/crypto/25519/ed25519.zig
index 5c7ec0cdac..06a4826f58 100644
--- a/lib/std/crypto/25519/ed25519.zig
+++ b/lib/std/crypto/25519/ed25519.zig
@@ -210,8 +210,8 @@ test "ed25519 key pair creation" {
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
var buf: [256]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.secret_key}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{key_pair.public_key}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key)}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key)}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
}
test "ed25519 signature" {
@@ -221,7 +221,7 @@ test "ed25519 signature" {
const sig = try Ed25519.sign("test", key_pair, null);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{sig}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&sig)}), "10A442B4A80CC4225B154F43BEF28D2472CA80221951262EB8E0DF9091575E2687CC486E77263C3418C757522D54F84B0359236ABBBD4ACD20DC297FDCA66808");
try Ed25519.verify(sig, "test", key_pair.public_key);
std.testing.expectError(error.InvalidSignature, Ed25519.verify(sig, "TEST", key_pair.public_key));
}
diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig
index d4238f87bb..8d9922d80c 100644
--- a/lib/std/crypto/25519/edwards25519.zig
+++ b/lib/std/crypto/25519/edwards25519.zig
@@ -450,7 +450,7 @@ test "edwards25519 packing/unpacking" {
var b = Edwards25519.basePoint;
const pk = try b.mul(s);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{pk.toBytes()}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&pk.toBytes())}), "074BC7E0FCBD587FDBC0969444245FADC562809C8F6E97E949AF62484B5B81A6");
const small_order_ss: [7][32]u8 = .{
.{
diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig
index df85422f65..46bb9697e2 100644
--- a/lib/std/crypto/25519/ristretto255.zig
+++ b/lib/std/crypto/25519/ristretto255.zig
@@ -170,21 +170,21 @@ pub const Ristretto255 = struct {
test "ristretto255" {
const p = Ristretto255.basePoint;
var buf: [256]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{p.toBytes()}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&p.toBytes())}), "E2F2AE0A6ABC4E71A884A961C500515F58E30B6AA582DD8DB6A65945E08D2D76");
var r: [Ristretto255.encoded_length]u8 = undefined;
_ = try fmt.hexToBytes(r[0..], "6a493210f7499cd17fecb510ae0cea23a110e8d5b901f8acadd3095c73a3b919");
var q = try Ristretto255.fromBytes(r);
q = q.dbl().add(p);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{q.toBytes()}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&q.toBytes())}), "E882B131016B52C1D3337080187CF768423EFCCBB517BB495AB812C4160FF44E");
const s = [_]u8{15} ++ [_]u8{0} ** 31;
const w = try p.mul(s);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{w.toBytes()}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&w.toBytes())}), "E0C418F7C8D9C4CDD7395B93EA124F3AD99021BB681DFC3302A9D99A2E53E64E");
std.testing.expect(p.dbl().dbl().dbl().dbl().equivalent(w.add(p)));
const h = [_]u8{69} ** 32 ++ [_]u8{42} ** 32;
const ph = Ristretto255.fromUniform(h);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ph.toBytes()}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ph.toBytes())}), "DCCA54E037A4311EFBEEF413ACD21D35276518970B7A61DC88F8587B493D5E19");
}
diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig
index ceff153bff..e4fb277807 100644
--- a/lib/std/crypto/25519/scalar.zig
+++ b/lib/std/crypto/25519/scalar.zig
@@ -771,10 +771,10 @@ test "scalar25519" {
var y = x.toBytes();
try rejectNonCanonical(y);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{y}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&y)}), "1E979B917937F3DE71D18077F961F6CEFF01030405060708010203040506070F");
const reduced = reduce(field_size);
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{reduced}), "0000000000000000000000000000000000000000000000000000000000000000");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&reduced)}), "0000000000000000000000000000000000000000000000000000000000000000");
}
test "non-canonical scalar25519" {
@@ -788,5 +788,5 @@ test "mulAdd overflow check" {
const c: [32]u8 = [_]u8{0xff} ** 32;
const x = mulAdd(a, b, c);
var buf: [128]u8 = undefined;
- std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{x}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
+ std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&x)}), "D14DF91389432C25AD60FF9791B9FD1D67BEF517D273ECCE3D9A307C1B419903");
}
diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig
index 0f79707279..e01888e793 100644
--- a/lib/std/crypto/chacha20.zig
+++ b/lib/std/crypto/chacha20.zig
@@ -876,7 +876,7 @@ test "crypto.xchacha20" {
var ciphertext: [input.len]u8 = undefined;
XChaCha20IETF.xor(ciphertext[0..], input[0..], 0, key, nonce);
var buf: [2 * ciphertext.len]u8 = undefined;
- testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ciphertext)}), "E0A1BCF939654AFDBDC1746EC49832647C19D891F0D1A81FC0C1703B4514BDEA584B512F6908C2C5E9DD18D5CBC1805DE5803FE3B9CA5F193FB8359E91FAB0C3BB40309A292EB1CF49685C65C4A3ADF4F11DB0CD2B6B67FBC174BC2E860E8F769FD3565BBFAD1C845E05A0FED9BE167C240D");
}
{
const data = "Additional data";
@@ -885,7 +885,7 @@ test "crypto.xchacha20" {
var out: [input.len]u8 = undefined;
try xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce);
var buf: [2 * ciphertext.len]u8 = undefined;
- testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{X}", .{ciphertext}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
+ testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&ciphertext)}), "994D2DD32333F48E53650C02C7A2ABB8E018B0836D7175AEC779F52E961780768F815C58F1AA52D211498DB89B9216763F569C9433A6BBFCEFB4D4A49387A4C5207FBB3B5A92B5941294DF30588C6740D39DC16FA1F0E634F7246CF7CDCB978E44347D89381B7A74EB7084F754B90BDE9AAF5A94B8F2A85EFD0B50692AE2D425E234");
testing.expectEqualSlices(u8, out[0..], input);
ciphertext[0] += 1;
testing.expectError(error.AuthenticationFailed, xchacha20poly1305Open(out[0..], ciphertext[0..], data, key, nonce));
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index f32c1a6156..74fb95ffa8 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -360,14 +360,24 @@ pub const StackIterator = struct {
};
}
- // Negative offset of the saved BP wrt the frame pointer.
+ // Offset of the saved BP wrt the frame pointer.
const fp_offset = if (builtin.arch.isRISCV())
// On RISC-V the frame pointer points to the top of the saved register
// area, on pretty much every other architecture it points to the stack
// slot where the previous frame pointer is saved.
2 * @sizeOf(usize)
+ else if (builtin.arch.isSPARC())
+ // On SPARC the previous frame pointer is stored at 14 slots past %fp+BIAS.
+ 14 * @sizeOf(usize)
else
0;
+
+ const fp_bias = if (builtin.arch.isSPARC())
+ // On SPARC frame pointers are biased by a constant.
+ 2047
+ else
+ 0;
+
// Positive offset of the saved PC wrt the frame pointer.
const pc_offset = if (builtin.arch == .powerpc64le)
2 * @sizeOf(usize)
@@ -388,13 +398,17 @@ pub const StackIterator = struct {
}
fn next_internal(self: *StackIterator) ?usize {
- const fp = math.sub(usize, self.fp, fp_offset) catch return null;
+ const fp = if (builtin.arch.isSPARC())
+ // On SPARC the offset is positive. (!)
+ math.add(usize, self.fp, fp_offset) catch return null
+ else
+ math.sub(usize, self.fp, fp_offset) catch return null;
// Sanity check.
if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)))
return null;
- const new_fp = @intToPtr(*const usize, fp).*;
+ const new_fp = math.add(usize, @intToPtr(*const usize, fp).*, fp_bias) catch return null;
// Sanity check: the stack grows down thus all the parent frames must be
// be at addresses that are greater (or equal) than the previous one.
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index cfb6b448c0..e644c6631a 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -335,7 +335,7 @@ pub const ET = extern enum(u16) {
};
/// All integers are native endian.
-const Header = struct {
+pub const Header = struct {
endian: builtin.Endian,
is_64: bool,
entry: u64,
@@ -347,187 +347,200 @@ const Header = struct {
shnum: u16,
shstrndx: u16,
- pub fn program_header_iterator(self: Header, file: File) ProgramHeaderIterator {
- return .{
+ pub fn program_header_iterator(self: Header, parse_source: anytype) ProgramHeaderIterator(@TypeOf(parse_source)) {
+ return ProgramHeaderIterator(@TypeOf(parse_source)){
.elf_header = self,
- .file = file,
+ .parse_source = parse_source,
};
}
- pub fn section_header_iterator(self: Header, file: File) SectionHeaderIterator {
- return .{
+ pub fn section_header_iterator(self: Header, parse_source: anytype) SectionHeaderIterator(@TypeOf(parse_source)) {
+ return SectionHeaderIterator(@TypeOf(parse_source)){
.elf_header = self,
- .file = file,
+ .parse_source = parse_source,
};
}
-};
-pub fn readHeader(file: File) !Header {
- var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
- try preadNoEof(file, &hdr_buf, 0);
- const hdr32 = @ptrCast(*Elf32_Ehdr, &hdr_buf);
- const hdr64 = @ptrCast(*Elf64_Ehdr, &hdr_buf);
- if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
- if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
-
- const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
- ELFDATA2LSB => .Little,
- ELFDATA2MSB => .Big,
- else => return error.InvalidElfEndian,
- };
- const need_bswap = endian != std.builtin.endian;
+ pub fn read(parse_source: anytype) !Header {
+ var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
+ try parse_source.seekableStream().seekTo(0);
+ try parse_source.reader().readNoEof(&hdr_buf);
+ return Header.parse(&hdr_buf);
+ }
- const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
- ELFCLASS32 => false,
- ELFCLASS64 => true,
- else => return error.InvalidElfClass,
- };
+ pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header {
+ const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf);
+ const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf);
+ if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
+ if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
- return @as(Header, .{
- .endian = endian,
- .is_64 = is_64,
- .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
- .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
- .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
- .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
- .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
- .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
- .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
- .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
- });
-}
+ const endian: std.builtin.Endian = switch (hdr32.e_ident[EI_DATA]) {
+ ELFDATA2LSB => .Little,
+ ELFDATA2MSB => .Big,
+ else => return error.InvalidElfEndian,
+ };
+ const need_bswap = endian != std.builtin.endian;
-pub const ProgramHeaderIterator = struct {
- elf_header: Header,
- file: File,
- index: usize = 0,
+ const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
+ ELFCLASS32 => false,
+ ELFCLASS64 => true,
+ else => return error.InvalidElfClass,
+ };
- pub fn next(self: *ProgramHeaderIterator) !?Elf64_Phdr {
- if (self.index >= self.elf_header.phnum) return null;
- defer self.index += 1;
+ return @as(Header, .{
+ .endian = endian,
+ .is_64 = is_64,
+ .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
+ .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
+ .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
+ .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
+ .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
+ .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
+ .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
+ .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
+ });
+ }
+};
- if (self.elf_header.is_64) {
- var phdr: Elf64_Phdr = undefined;
+pub fn ProgramHeaderIterator(ParseSource: anytype) type {
+ return struct {
+ elf_header: Header,
+ parse_source: ParseSource,
+ index: usize = 0,
+
+ pub fn next(self: *@This()) !?Elf64_Phdr {
+ if (self.index >= self.elf_header.phnum) return null;
+ defer self.index += 1;
+
+ if (self.elf_header.is_64) {
+ var phdr: Elf64_Phdr = undefined;
+ const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
+
+ // ELF endianness matches native endianness.
+ if (self.elf_header.endian == std.builtin.endian) return phdr;
+
+ // Convert fields to native endianness.
+ return Elf64_Phdr{
+ .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
+ .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
+ .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
+ .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
+ .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
+ .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
+ .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
+ .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
+ };
+ }
+
+ var phdr: Elf32_Phdr = undefined;
const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&phdr), offset);
-
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == std.builtin.endian) return phdr;
-
- // Convert fields to native endianness.
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
+
+ // ELF endianness does NOT match native endianness.
+ if (self.elf_header.endian != std.builtin.endian) {
+ // Convert fields to native endianness.
+ phdr = .{
+ .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
+ .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
+ .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
+ .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
+ .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
+ .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
+ .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
+ .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
+ };
+ }
+
+ // Convert 32-bit header to 64-bit.
return Elf64_Phdr{
- .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
- .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
- .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
- .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
- .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
- .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
- .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
- .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
+ .p_type = phdr.p_type,
+ .p_offset = phdr.p_offset,
+ .p_vaddr = phdr.p_vaddr,
+ .p_paddr = phdr.p_paddr,
+ .p_filesz = phdr.p_filesz,
+ .p_memsz = phdr.p_memsz,
+ .p_flags = phdr.p_flags,
+ .p_align = phdr.p_align,
};
}
+ };
+}
- var phdr: Elf32_Phdr = undefined;
- const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&phdr), offset);
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != std.builtin.endian) {
- // Convert fields to native endianness.
- phdr = .{
- .p_type = @byteSwap(@TypeOf(phdr.p_type), phdr.p_type),
- .p_offset = @byteSwap(@TypeOf(phdr.p_offset), phdr.p_offset),
- .p_vaddr = @byteSwap(@TypeOf(phdr.p_vaddr), phdr.p_vaddr),
- .p_paddr = @byteSwap(@TypeOf(phdr.p_paddr), phdr.p_paddr),
- .p_filesz = @byteSwap(@TypeOf(phdr.p_filesz), phdr.p_filesz),
- .p_memsz = @byteSwap(@TypeOf(phdr.p_memsz), phdr.p_memsz),
- .p_flags = @byteSwap(@TypeOf(phdr.p_flags), phdr.p_flags),
- .p_align = @byteSwap(@TypeOf(phdr.p_align), phdr.p_align),
- };
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Phdr{
- .p_type = phdr.p_type,
- .p_offset = phdr.p_offset,
- .p_vaddr = phdr.p_vaddr,
- .p_paddr = phdr.p_paddr,
- .p_filesz = phdr.p_filesz,
- .p_memsz = phdr.p_memsz,
- .p_flags = phdr.p_flags,
- .p_align = phdr.p_align,
- };
- }
-};
-
-pub const SectionHeaderIterator = struct {
- elf_header: Header,
- file: File,
- index: usize = 0,
-
- pub fn next(self: *SectionHeaderIterator) !?Elf64_Shdr {
- if (self.index >= self.elf_header.shnum) return null;
- defer self.index += 1;
-
- if (self.elf_header.is_64) {
- var shdr: Elf64_Shdr = undefined;
+pub fn SectionHeaderIterator(ParseSource: anytype) type {
+ return struct {
+ elf_header: Header,
+ parse_source: ParseSource,
+ index: usize = 0,
+
+ pub fn next(self: *@This()) !?Elf64_Shdr {
+ if (self.index >= self.elf_header.shnum) return null;
+ defer self.index += 1;
+
+ if (self.elf_header.is_64) {
+ var shdr: Elf64_Shdr = undefined;
+ const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
+
+ // ELF endianness matches native endianness.
+ if (self.elf_header.endian == std.builtin.endian) return shdr;
+
+ // Convert fields to native endianness.
+ return Elf64_Shdr{
+ .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
+ .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
+ .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
+ .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
+ .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
+ .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
+ .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
+ .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
+ .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
+ .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ };
+ }
+
+ var shdr: Elf32_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&shdr), offset);
-
- // ELF endianness matches native endianness.
- if (self.elf_header.endian == std.builtin.endian) return shdr;
-
- // Convert fields to native endianness.
+ try self.parse_source.seekableStream().seekTo(offset);
+ try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
+
+ // ELF endianness does NOT match native endianness.
+ if (self.elf_header.endian != std.builtin.endian) {
+ // Convert fields to native endianness.
+ shdr = .{
+ .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
+ .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
+ .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
+ .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
+ .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
+ .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
+ .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
+ .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
+ .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
+ .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ };
+ }
+
+ // Convert 32-bit header to 64-bit.
return Elf64_Shdr{
- .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
- .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
- .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
- .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
- .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
- .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
- .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
- .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
- .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
- .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
+ .sh_name = shdr.sh_name,
+ .sh_type = shdr.sh_type,
+ .sh_flags = shdr.sh_flags,
+ .sh_addr = shdr.sh_addr,
+ .sh_offset = shdr.sh_offset,
+ .sh_size = shdr.sh_size,
+ .sh_link = shdr.sh_link,
+ .sh_info = shdr.sh_info,
+ .sh_addralign = shdr.sh_addralign,
+ .sh_entsize = shdr.sh_entsize,
};
}
-
- var shdr: Elf32_Shdr = undefined;
- const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
- try preadNoEof(self.file, mem.asBytes(&shdr), offset);
-
- // ELF endianness does NOT match native endianness.
- if (self.elf_header.endian != std.builtin.endian) {
- // Convert fields to native endianness.
- shdr = .{
- .sh_name = @byteSwap(@TypeOf(shdr.sh_name), shdr.sh_name),
- .sh_type = @byteSwap(@TypeOf(shdr.sh_type), shdr.sh_type),
- .sh_flags = @byteSwap(@TypeOf(shdr.sh_flags), shdr.sh_flags),
- .sh_addr = @byteSwap(@TypeOf(shdr.sh_addr), shdr.sh_addr),
- .sh_offset = @byteSwap(@TypeOf(shdr.sh_offset), shdr.sh_offset),
- .sh_size = @byteSwap(@TypeOf(shdr.sh_size), shdr.sh_size),
- .sh_link = @byteSwap(@TypeOf(shdr.sh_link), shdr.sh_link),
- .sh_info = @byteSwap(@TypeOf(shdr.sh_info), shdr.sh_info),
- .sh_addralign = @byteSwap(@TypeOf(shdr.sh_addralign), shdr.sh_addralign),
- .sh_entsize = @byteSwap(@TypeOf(shdr.sh_entsize), shdr.sh_entsize),
- };
- }
-
- // Convert 32-bit header to 64-bit.
- return Elf64_Shdr{
- .sh_name = shdr.sh_name,
- .sh_type = shdr.sh_type,
- .sh_flags = shdr.sh_flags,
- .sh_addr = shdr.sh_addr,
- .sh_offset = shdr.sh_offset,
- .sh_size = shdr.sh_size,
- .sh_link = shdr.sh_link,
- .sh_info = shdr.sh_info,
- .sh_addralign = shdr.sh_addralign,
- .sh_entsize = shdr.sh_entsize,
- };
- }
-};
+ };
+}
pub fn int(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
@@ -549,28 +562,6 @@ pub fn int32(need_bswap: bool, int_32: anytype, comptime Int64: anytype) Int64 {
}
}
-fn preadNoEof(file: std.fs.File, buf: []u8, offset: u64) !void {
- var i: usize = 0;
- while (i < buf.len) {
- const len = file.pread(buf[i .. buf.len - i], offset + i) catch |err| switch (err) {
- error.SystemResources => return error.SystemResources,
- error.IsDir => return error.UnableToReadElfFile,
- error.OperationAborted => return error.UnableToReadElfFile,
- error.BrokenPipe => return error.UnableToReadElfFile,
- error.Unseekable => return error.UnableToReadElfFile,
- error.ConnectionResetByPeer => return error.UnableToReadElfFile,
- error.ConnectionTimedOut => return error.UnableToReadElfFile,
- error.InputOutput => return error.FileSystem,
- error.Unexpected => return error.Unexpected,
- error.WouldBlock => return error.Unexpected,
- error.NotOpenForReading => return error.Unexpected,
- error.AccessDenied => return error.Unexpected,
- };
- if (len == 0) return error.UnexpectedEndOfFile;
- i += len;
- }
-}
-
pub const EI_NIDENT = 16;
pub const EI_CLASS = 4;
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index fca21000cf..1f924bf00c 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -709,6 +709,87 @@ fn formatFloatValue(
return formatBuf(buf_stream.getWritten(), options, writer);
}
+fn formatSliceHexImpl(comptime uppercase: bool) type {
+ const charset = "0123456789" ++ if (uppercase) "ABCDEF" else "abcdef";
+
+ return struct {
+ pub fn f(
+ bytes: []const u8,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ var buf: [2]u8 = undefined;
+
+ for (bytes) |c| {
+ buf[0] = charset[c >> 4];
+ buf[1] = charset[c & 15];
+ try writer.writeAll(&buf);
+ }
+ }
+ };
+}
+
+const formatSliceHexLower = formatSliceHexImpl(false).f;
+const formatSliceHexUpper = formatSliceHexImpl(true).f;
+
+/// Return a Formatter for a []const u8 where every byte is formatted as a pair
+/// of lowercase hexadecimal digits.
+pub fn fmtSliceHexLower(bytes: []const u8) std.fmt.Formatter(formatSliceHexLower) {
+ return .{ .data = bytes };
+}
+
+/// Return a Formatter for a []const u8 where every byte is formatted as a pair
+/// of uppercase hexadecimal digits.
+pub fn fmtSliceHexUpper(bytes: []const u8) std.fmt.Formatter(formatSliceHexUpper) {
+ return .{ .data = bytes };
+}
+
+fn formatSliceEscapeImpl(comptime uppercase: bool) type {
+ const charset = "0123456789" ++ if (uppercase) "ABCDEF" else "abcdef";
+
+ return struct {
+ pub fn f(
+ bytes: []const u8,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ var buf: [4]u8 = undefined;
+
+ buf[0] = '\\';
+ buf[1] = 'x';
+
+ for (bytes) |c| {
+ if (std.ascii.isPrint(c)) {
+ try writer.writeByte(c);
+ } else {
+ buf[2] = charset[c >> 4];
+ buf[3] = charset[c & 15];
+ try writer.writeAll(&buf);
+ }
+ }
+ }
+ };
+}
+
+const formatSliceEscapeLower = formatSliceEscapeImpl(false).f;
+const formatSliceEscapeUpper = formatSliceEscapeImpl(true).f;
+
+/// Return a Formatter for a []const u8 where every non-printable ASCII
+/// character is escaped as \xNN, where NN is the character in lowercase
+/// hexadecimal notation.
+pub fn fmtSliceEscapeLower(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeLower) {
+ return .{ .data = bytes };
+}
+
+/// Return a Formatter for a []const u8 where every non-printable ASCII
+/// character is escaped as \xNN, where NN is the character in uppercase
+/// hexadecimal notation.
+pub fn fmtSliceEscapeUpper(bytes: []const u8) std.fmt.Formatter(formatSliceEscapeUpper) {
+ return .{ .data = bytes };
+}
+
pub fn formatText(
bytes: []const u8,
comptime fmt: []const u8,
@@ -717,21 +798,18 @@ pub fn formatText(
) !void {
if (comptime std.mem.eql(u8, fmt, "s")) {
return formatBuf(bytes, options, writer);
- } else if (comptime (std.mem.eql(u8, fmt, "x") or std.mem.eql(u8, fmt, "X"))) {
- for (bytes) |c| {
- try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
- }
- return;
- } else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) {
- for (bytes) |c| {
- if (std.ascii.isPrint(c)) {
- try writer.writeByte(c);
- } else {
- try writer.writeAll("\\x");
- try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer);
- }
- }
- return;
+ } else if (comptime (std.mem.eql(u8, fmt, "x"))) {
+ @compileError("specifier 'x' has been deprecated, wrap your argument in std.fmt.fmtSliceHexLower instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "X"))) {
+ @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceHexUpper instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "e"))) {
+ @compileError("specifier 'e' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeLower instead");
+ } else if (comptime (std.mem.eql(u8, fmt, "E"))) {
+ @compileError("specifier 'X' has been deprecated, wrap your argument in std.fmt.fmtSliceEscapeUpper instead");
+ } else if (comptime std.mem.eql(u8, fmt, "z")) {
+ @compileError("specifier 'z' has been deprecated, wrap your argument in std.zig.fmtId instead");
+ } else if (comptime std.mem.eql(u8, fmt, "Z")) {
+ @compileError("specifier 'Z' has been deprecated, wrap your argument in std.zig.fmtEscapes instead");
} else {
@compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");
}
@@ -1693,9 +1771,9 @@ test "slice" {
}
test "escape non-printable" {
- try expectFmt("abc", "{e}", .{"abc"});
- try expectFmt("ab\\xffc", "{e}", .{"ab\xffc"});
- try expectFmt("ab\\xFFc", "{E}", .{"ab\xffc"});
+ try expectFmt("abc", "{s}", .{fmtSliceEscapeLower("abc")});
+ try expectFmt("ab\\xffc", "{s}", .{fmtSliceEscapeLower("ab\xffc")});
+ try expectFmt("ab\\xFFc", "{s}", .{fmtSliceEscapeUpper("ab\xffc")});
}
test "pointer" {
@@ -1968,13 +2046,13 @@ test "struct.zero-size" {
test "bytes.hex" {
const some_bytes = "\xCA\xFE\xBA\xBE";
- try expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{some_bytes});
- try expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{some_bytes});
+ try expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes)});
+ try expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes)});
//Test Slices
- try expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{some_bytes[0..2]});
- try expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{some_bytes[2..]});
+ try expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{fmtSliceHexUpper(some_bytes[0..2])});
+ try expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{fmtSliceHexLower(some_bytes[2..])});
const bytes_with_zeros = "\x00\x0E\xBA\xBE";
- try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros});
+ try expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{fmtSliceHexLower(bytes_with_zeros)});
}
pub const trim = @compileError("deprecated; use std.mem.trim with std.ascii.spaces instead");
@@ -2002,9 +2080,9 @@ pub fn hexToBytes(out: []u8, input: []const u8) ![]u8 {
test "hexToBytes" {
var buf: [32]u8 = undefined;
- try expectFmt("90" ** 32, "{X}", .{try hexToBytes(&buf, "90" ** 32)});
- try expectFmt("ABCD", "{X}", .{try hexToBytes(&buf, "ABCD")});
- try expectFmt("", "{X}", .{try hexToBytes(&buf, "")});
+ try expectFmt("90" ** 32, "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "90" ** 32))});
+ try expectFmt("ABCD", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, "ABCD"))});
+ try expectFmt("", "{s}", .{fmtSliceHexUpper(try hexToBytes(&buf, ""))});
std.testing.expectError(error.InvalidCharacter, hexToBytes(&buf, "012Z"));
std.testing.expectError(error.InvalidLength, hexToBytes(&buf, "AAA"));
std.testing.expectError(error.NoSpaceLeft, hexToBytes(buf[0..1], "ABAB"));
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index 18f8458eb2..02c36f736a 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -60,7 +60,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
var dir_path_ptr: [*:0]u8 = undefined;
// TODO look into directory_which
const be_user_settings = 0xbbe;
- const rc = os.system.find_directory(be_user_settings, -1, true, dir_path_ptr, 1) ;
+ const rc = os.system.find_directory(be_user_settings, -1, true, dir_path_ptr, 1);
const settings_dir = try allocator.dupeZ(u8, mem.spanZ(dir_path_ptr));
defer allocator.free(settings_dir);
switch (rc) {
diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig
index 0a9edb425a..6f9386b8de 100644
--- a/lib/std/io/writer.zig
+++ b/lib/std/io/writer.zig
@@ -4,6 +4,7 @@
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../std.zig");
+const assert = std.debug.assert;
const builtin = std.builtin;
const mem = std.mem;
@@ -86,5 +87,11 @@ pub fn Writer(
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
}
+
+ pub fn writeStruct(self: Self, value: anytype) Error!void {
+ // Only extern and packed structs have defined in-memory layout.
+ comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
+ return self.writeAll(mem.asBytes(&value));
+ }
};
}
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 6e7c5c0915..d71cafe5ef 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1330,3 +1330,59 @@ test "math.comptime" {
comptime const v = sin(@as(f32, 1)) + ln(@as(f32, 5));
testing.expect(v == sin(@as(f32, 1)) + ln(@as(f32, 5)));
}
+
+/// Returns a mask of all ones if value is true,
+/// and a mask of all zeroes if value is false.
+/// Compiles to one instruction for register sized integers.
+pub fn boolMask(comptime MaskInt: type, value: bool) callconv(.Inline) MaskInt {
+ if (@typeInfo(MaskInt) != .Int)
+ @compileError("boolMask requires an integer mask type.");
+
+ if (MaskInt == u0 or MaskInt == i0)
+ @compileError("boolMask cannot convert to u0 or i0, they are too small.");
+
+ // The u1 and i1 cases tend to overflow,
+ // so we special case them here.
+ if (MaskInt == u1) return @boolToInt(value);
+ if (MaskInt == i1) {
+ // The @as here is a workaround for #7950
+ return @bitCast(i1, @as(u1, @boolToInt(value)));
+ }
+
+ // At comptime, -% is disallowed on unsigned values.
+ // So we need to jump through some hoops in that case.
+ // This is a workaround for #7951
+ if (@typeInfo(@TypeOf(.{value})).Struct.fields[0].is_comptime) {
+ // Since it's comptime, we don't need this to generate nice code.
+ // We can just do a branch here.
+ return if (value) ~@as(MaskInt, 0) else 0;
+ }
+
+ return -%@intCast(MaskInt, @boolToInt(value));
+}
+
+test "boolMask" {
+ const runTest = struct {
+ fn runTest() void {
+ testing.expectEqual(@as(u1, 0), boolMask(u1, false));
+ testing.expectEqual(@as(u1, 1), boolMask(u1, true));
+
+ testing.expectEqual(@as(i1, 0), boolMask(i1, false));
+ testing.expectEqual(@as(i1, -1), boolMask(i1, true));
+
+ testing.expectEqual(@as(u13, 0), boolMask(u13, false));
+ testing.expectEqual(@as(u13, 0x1FFF), boolMask(u13, true));
+
+ testing.expectEqual(@as(i13, 0), boolMask(i13, false));
+ testing.expectEqual(@as(i13, -1), boolMask(i13, true));
+
+ testing.expectEqual(@as(u32, 0), boolMask(u32, false));
+ testing.expectEqual(@as(u32, 0xFFFF_FFFF), boolMask(u32, true));
+
+ testing.expectEqual(@as(i32, 0), boolMask(i32, false));
+ testing.expectEqual(@as(i32, -1), boolMask(i32, true));
+ }
+ }.runTest;
+ runTest();
+ comptime runTest();
+}
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 5f23a10401..581fb16e6c 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -25,6 +25,14 @@ pub const page_size = switch (builtin.arch) {
else => 4 * 1024,
};
+/// The standard library currently thoroughly depends on byte size
+/// being 8 bits. (see the use of u8 throughout allocation code as
+/// the "byte" type.) Code which depends on this can reference this
+/// declaration. If we ever try to port the standard library to a
+/// non-8-bit-byte platform, this will allow us to search for things
+/// which need to be updated.
+pub const byte_size_in_bits = 8;
+
pub const Allocator = @import("mem/Allocator.zig");
/// Detects and asserts if the std.mem.Allocator interface is violated by the caller
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 3306fd3ef0..99a9fff7f0 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -136,7 +136,7 @@ pub fn MultiArrayList(comptime S: type) type {
const slices = self.slice();
var result: S = undefined;
inline for (fields) |field_info, i| {
- @field(elem, field_info.name) = slices.items(@intToEnum(Field, i))[index];
+ @field(result, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
}
diff --git a/lib/std/os/bits/haiku.zig b/lib/std/os/bits/haiku.zig
index 59631fd40e..32093570d7 100644
--- a/lib/std/os/bits/haiku.zig
+++ b/lib/std/os/bits/haiku.zig
@@ -180,8 +180,8 @@ pub const dirent = extern struct {
};
pub const image_info = extern struct {
- id: u32, //image_id
- type: u32, // image_type
+ id: u32,
+ type: u32,
sequence: i32,
init_order: i32,
init_routine: *c_void,
@@ -806,17 +806,16 @@ pub const Sigaction = extern struct {
pub const _SIG_WORDS = 4;
pub const _SIG_MAXSIG = 128;
-
-pub inline fn _SIG_IDX(sig: usize) usize {
+pub fn _SIG_IDX(sig: usize) callconv(.Inline) usize {
return sig - 1;
}
-pub inline fn _SIG_WORD(sig: usize) usize {
+pub fn _SIG_WORD(sig: usize) callconv(.Inline) usize {
return_SIG_IDX(sig) >> 5;
}
-pub inline fn _SIG_BIT(sig: usize) usize {
+pub fn _SIG_BIT(sig: usize) callconv(.Inline) usize {
return 1 << (_SIG_IDX(sig) & 31);
}
-pub inline fn _SIG_VALID(sig: usize) usize {
+pub fn _SIG_VALID(sig: usize) callconv(.Inline) usize {
return sig <= _SIG_MAXSIG and sig > 0;
}
diff --git a/lib/std/os/bits/linux.zig b/lib/std/os/bits/linux.zig
index 8d3d5c49a3..21fa058aef 100644
--- a/lib/std/os/bits/linux.zig
+++ b/lib/std/os/bits/linux.zig
@@ -2244,3 +2244,8 @@ pub const MADV_COLD = 20;
pub const MADV_PAGEOUT = 21;
pub const MADV_HWPOISON = 100;
pub const MADV_SOFT_OFFLINE = 101;
+
+pub const __kernel_timespec = extern struct {
+ tv_sec: i64,
+ tv_nsec: i64,
+};
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index b47d4c7b32..340020cf9b 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -526,7 +526,7 @@ pub const IO_Uring = struct {
pub fn timeout(
self: *IO_Uring,
user_data: u64,
- ts: *const os.timespec,
+ ts: *const os.__kernel_timespec,
count: u32,
flags: u32,
) !*io_uring_sqe {
@@ -884,7 +884,7 @@ pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
pub fn io_uring_prep_timeout(
sqe: *io_uring_sqe,
- ts: *const os.timespec,
+ ts: *const os.__kernel_timespec,
count: u32,
flags: u32,
) void {
@@ -1339,7 +1339,7 @@ test "timeout (after a relative time)" {
const ms = 10;
const margin = 5;
- const ts = os.timespec{ .tv_sec = 0, .tv_nsec = ms * 1000000 };
+ const ts = os.__kernel_timespec{ .tv_sec = 0, .tv_nsec = ms * 1000000 };
const started = std.time.milliTimestamp();
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
@@ -1366,7 +1366,7 @@ test "timeout (after a number of completions)" {
};
defer ring.deinit();
- const ts = os.timespec{ .tv_sec = 3, .tv_nsec = 0 };
+ const ts = os.__kernel_timespec{ .tv_sec = 3, .tv_nsec = 0 };
const count_completions: u64 = 1;
const sqe_timeout = try ring.timeout(0x66666666, &ts, count_completions, 0);
testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
@@ -1399,7 +1399,7 @@ test "timeout_remove" {
};
defer ring.deinit();
- const ts = os.timespec{ .tv_sec = 3, .tv_nsec = 0 };
+ const ts = os.__kernel_timespec{ .tv_sec = 3, .tv_nsec = 0 };
const sqe_timeout = try ring.timeout(0x88888888, &ts, 0, 0);
testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe_timeout.opcode);
testing.expectEqual(@as(u64, 0x88888888), sqe_timeout.user_data);
diff --git a/lib/std/std.zig b/lib/std/std.zig
index c0d97a9d9c..a7e5bcb682 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -18,6 +18,8 @@ pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess;
pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringMap;
pub const DynLib = @import("dynamic_library.zig").DynLib;
+pub const DynamicBitSet = bit_set.DynamicBitSet;
+pub const DynamicBitSetUnmanaged = bit_set.DynamicBitSetUnmanaged;
pub const HashMap = hash_map.HashMap;
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
pub const MultiArrayList = @import("multi_array_list.zig").MultiArrayList;
@@ -29,6 +31,7 @@ pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
pub const Progress = @import("Progress.zig");
pub const SemanticVersion = @import("SemanticVersion.zig");
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
+pub const StaticBitSet = bit_set.StaticBitSet;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const StringArrayHashMap = array_hash_map.StringArrayHashMap;
@@ -40,6 +43,7 @@ pub const Thread = @import("Thread.zig");
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
+pub const bit_set = @import("bit_set.zig");
pub const build = @import("build.zig");
pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig");
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 7a6404fbb2..9b755c2033 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -3714,7 +3714,6 @@ const Parser = struct {
if (p.eatToken(.r_paren)) |_| {
return SmallSpan{ .zero_or_one = 0 };
}
- continue;
},
.r_paren => return SmallSpan{ .zero_or_one = 0 },
else => {
@@ -3728,14 +3727,7 @@ const Parser = struct {
const param_two = while (true) {
switch (p.token_tags[p.nextToken()]) {
- .comma => {
- if (p.eatToken(.r_paren)) |_| {
- return SmallSpan{ .zero_or_one = param_one };
- }
- const param = try p.expectParamDecl();
- if (param != 0) break param;
- continue;
- },
+ .comma => {},
.r_paren => return SmallSpan{ .zero_or_one = param_one },
.colon, .r_brace, .r_bracket => {
p.tok_i -= 1;
@@ -3748,6 +3740,11 @@ const Parser = struct {
try p.warnExpected(.comma);
},
}
+ if (p.eatToken(.r_paren)) |_| {
+ return SmallSpan{ .zero_or_one = param_one };
+ }
+ const param = try p.expectParamDecl();
+ if (param != 0) break param;
} else unreachable;
var list = std.ArrayList(Node.Index).init(p.gpa);
@@ -3757,17 +3754,7 @@ const Parser = struct {
while (true) {
switch (p.token_tags[p.nextToken()]) {
- .comma => {
- if (p.token_tags[p.tok_i] == .r_paren) {
- p.tok_i += 1;
- return SmallSpan{ .multi = list.toOwnedSlice() };
- }
- const param = try p.expectParamDecl();
- if (param != 0) {
- try list.append(param);
- }
- continue;
- },
+ .comma => {},
.r_paren => return SmallSpan{ .multi = list.toOwnedSlice() },
.colon, .r_brace, .r_bracket => {
p.tok_i -= 1;
@@ -3780,6 +3767,11 @@ const Parser = struct {
try p.warnExpected(.comma);
},
}
+ if (p.eatToken(.r_paren)) |_| {
+ return SmallSpan{ .multi = list.toOwnedSlice() };
+ }
+ const param = try p.expectParamDecl();
+ if (param != 0) try list.append(param);
}
}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 2b9e3fb03c..c083d23932 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1108,6 +1108,25 @@ test "zig fmt: comment to disable/enable zig fmt first" {
);
}
+test "zig fmt: 'zig fmt: (off|on)' can be surrounded by arbitrary whitespace" {
+ try testTransform(
+ \\// Test trailing comma syntax
+ \\// zig fmt: off
+ \\
+ \\const struct_trailing_comma = struct { x: i32, y: i32, };
+ \\
+ \\// zig fmt: on
+ ,
+ \\// Test trailing comma syntax
+ \\// zig fmt: off
+ \\
+ \\const struct_trailing_comma = struct { x: i32, y: i32, };
+ \\
+ \\// zig fmt: on
+ \\
+ );
+}
+
test "zig fmt: comment to disable/enable zig fmt" {
try testTransform(
\\const a = b;
@@ -4549,6 +4568,18 @@ test "recovery: missing for payload" {
});
}
+test "recovery: missing comma in params" {
+ try testError(
+ \\fn foo(comptime bool what what) void { }
+ \\fn bar(a: i32, b: i32 c) void { }
+ \\
+ , &[_]Error{
+ .expected_token,
+ .expected_token,
+ .expected_token,
+ });
+}
+
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index e12f7bc733..069b62af79 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -2352,18 +2352,24 @@ fn renderComments(ais: *Ais, tree: ast.Tree, start: usize, end: usize) Error!boo
}
}
- try ais.writer().print("{s}\n", .{trimmed_comment});
- index = 1 + (newline orelse return true);
-
- if (ais.disabled_offset) |disabled_offset| {
- if (mem.eql(u8, trimmed_comment, "// zig fmt: on")) {
- // write the source for which formatting was disabled directly
- // to the underlying writer, fixing up invaild whitespace
- try writeFixingWhitespace(ais.underlying_writer, tree.source[disabled_offset..index]);
- ais.disabled_offset = null;
- }
- } else if (mem.eql(u8, trimmed_comment, "// zig fmt: off")) {
+ index = 1 + (newline orelse end - 1);
+
+ const comment_content = mem.trimLeft(u8, trimmed_comment["//".len..], &std.ascii.spaces);
+ if (ais.disabled_offset != null and mem.eql(u8, comment_content, "zig fmt: on")) {
+ // Write the source for which formatting was disabled directly
+ // to the underlying writer, fixing up invaild whitespace.
+ const disabled_source = tree.source[ais.disabled_offset.?..comment_start];
+ try writeFixingWhitespace(ais.underlying_writer, disabled_source);
+ ais.disabled_offset = null;
+ // Write with the canonical single space.
+ try ais.writer().writeAll("// zig fmt: on\n");
+ } else if (ais.disabled_offset == null and mem.eql(u8, comment_content, "zig fmt: off")) {
+ // Write with the canonical single space.
+ try ais.writer().writeAll("// zig fmt: off\n");
ais.disabled_offset = index;
+ } else {
+ // Write the comment minus trailing whitespace.
+ try ais.writer().print("{s}\n", .{trimmed_comment});
}
}