diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2023-06-24 16:58:19 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-06-24 16:58:19 -0700 |
| commit | 146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch) | |
| tree | 67e3db8b444d65c667e314770fc983a7fc8ba293 /lib/std/hash_map.zig | |
| parent | 13853bef0df3c90633021850cc6d6abaeea03282 (diff) | |
| parent | 21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff) | |
| download | zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip | |
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'lib/std/hash_map.zig')
| -rw-r--r-- | lib/std/hash_map.zig | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 4f1639cd60..0afe6f9643 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -101,7 +101,7 @@ pub const StringIndexContext = struct { } pub fn hash(self: @This(), x: u32) u64 { - const x_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + x, 0); + const x_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x, 0); return hashString(x_slice); } }; @@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct { bytes: *std.ArrayListUnmanaged(u8), pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool { - const b_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + b, 0); + const b_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b, 0); return mem.eql(u8, a_slice, b_slice); } @@ -777,25 +777,25 @@ pub fn HashMapUnmanaged( fingerprint: FingerPrint = free, used: u1 = 0, - const slot_free = @bitCast(u8, Metadata{ .fingerprint = free }); - const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone }); + const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free })); + const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone })); pub fn isUsed(self: Metadata) bool { return self.used == 1; } pub fn isTombstone(self: Metadata) bool { - return @bitCast(u8, self) == slot_tombstone; + return @as(u8, @bitCast(self)) == slot_tombstone; } pub fn isFree(self: Metadata) bool { - return @bitCast(u8, self) == slot_free; + return @as(u8, @bitCast(self)) == slot_free; } pub fn takeFingerprint(hash: Hash) FingerPrint { const hash_bits = @typeInfo(Hash).Int.bits; const fp_bits = @typeInfo(FingerPrint).Int.bits; - return @truncate(FingerPrint, hash >> (hash_bits - fp_bits)); + return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits))); } pub fn fill(self: *Metadata, fp: FingerPrint) void { @@ -899,7 +899,7 @@ pub fn HashMapUnmanaged( } fn capacityForSize(size: Size) Size { - var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1); + var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1)); new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable; return new_cap; } @@ -927,7 +927,7 @@ pub fn HashMapUnmanaged( if (self.metadata) |_| { self.initMetadatas(); self.size = 0; - self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100); + self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100)); } } @@ -942,7 +942,7 @@ pub fn HashMapUnmanaged( } fn header(self: *const Self) *Header { - return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1); + return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1); } fn keys(self: *const Self) [*]K { @@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged( const hash = ctx.hash(key); const mask = self.capacity() - 1; - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (metadata[0].isUsed()) { @@ -1147,7 +1147,7 @@ pub fn HashMapUnmanaged( const fingerprint = Metadata.takeFingerprint(hash); // Don't loop indefinitely when there are no empty slots. var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var metadata = self.metadata.? + idx; while (!metadata[0].isFree() and limit != 0) { @@ -1325,7 +1325,7 @@ pub fn HashMapUnmanaged( const mask = self.capacity() - 1; const fingerprint = Metadata.takeFingerprint(hash); var limit = self.capacity(); - var idx = @truncate(usize, hash & mask); + var idx = @as(usize, @truncate(hash & mask)); var first_tombstone_idx: usize = self.capacity(); // invalid index var metadata = self.metadata.? + idx; @@ -1450,7 +1450,7 @@ pub fn HashMapUnmanaged( } fn initMetadatas(self: *Self) void { - @memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0); + @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0); } // This counts the number of occupied slots (not counting tombstones), which is @@ -1458,7 +1458,7 @@ pub fn HashMapUnmanaged( fn load(self: *const Self) Size { const max_load = (self.capacity() * max_load_percentage) / 100; assert(max_load >= self.available); - return @truncate(Size, max_load - self.available); + return @as(Size, @truncate(max_load - self.available)); } fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void { @@ -1480,7 +1480,7 @@ pub fn HashMapUnmanaged( const new_cap = capacityForSize(self.size); try other.allocate(allocator, new_cap); other.initMetadatas(); - other.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); var i: Size = 0; var metadata = self.metadata.?; @@ -1515,7 +1515,7 @@ pub fn HashMapUnmanaged( defer map.deinit(allocator); try map.allocate(allocator, new_cap); map.initMetadatas(); - map.available = @truncate(u32, (new_cap * max_load_percentage) / 100); + map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100)); if (self.size != 0) { const old_capacity = self.capacity(); @@ -1558,15 +1558,15 @@ pub fn HashMapUnmanaged( const metadata = ptr + @sizeOf(Header); - const hdr = @ptrFromInt(*Header, ptr); + const hdr = @as(*Header, @ptrFromInt(ptr)); if (@sizeOf([*]V) != 0) { - hdr.values = @ptrFromInt([*]V, ptr + vals_start); + hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start)); } if (@sizeOf([*]K) != 0) { - hdr.keys = @ptrFromInt([*]K, ptr + keys_start); + hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start)); } hdr.capacity = new_capacity; - self.metadata = @ptrFromInt([*]Metadata, metadata); + self.metadata = @as([*]Metadata, @ptrFromInt(metadata)); } fn deallocate(self: *Self, allocator: Allocator) void { @@ -1589,7 +1589,7 @@ pub fn HashMapUnmanaged( const total_size = std.mem.alignForward(usize, vals_end, max_align); - const slice = @ptrFromInt([*]align(max_align) u8, @intFromPtr(self.header()))[0..total_size]; + const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size]; allocator.free(slice); self.metadata = null; |
