From 0fe3fd01ddc2cd49c6a2b939577d16b9d2c65ea9 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 28 Aug 2024 02:35:53 +0100 Subject: std: update `std.builtin.Type` fields to follow naming conventions The compiler actually doesn't need any functional changes for this: Sema does reification based on the tag indices of `std.builtin.Type` already! So, no zig1.wasm update is necessary. This change is necessary to disallow name clashes between fields and decls on a type, which is a prerequisite of #9938. --- src/codegen/c/Type.zig | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'src/codegen/c/Type.zig') diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 018b0586d0..1e0c23a96b 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -669,7 +669,7 @@ const Index = enum(u32) { _, - const first_pool_index: u32 = @typeInfo(CType.Index).Enum.fields.len; + const first_pool_index: u32 = @typeInfo(CType.Index).@"enum".fields.len; const basic_hashes = init: { @setEvalBranchQuota(1_600); var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined; @@ -740,7 +740,7 @@ pub const Info = union(enum) { aggregate: Aggregate, function: Function, - const Tag = @typeInfo(Info).Union.tag_type.?; + const Tag = @typeInfo(Info).@"union".tag_type.?; pub const Pointer = struct { elem_ctype: CType, @@ -783,7 +783,7 @@ pub const Info = union(enum) { pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field { assert(index < slice.len); const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index + - index * @typeInfo(Pool.Field).Struct.fields.len)); + index * @typeInfo(Pool.Field).@"struct".fields.len)); return .{ .name = .{ .index = extra.name }, .ctype = .{ .index = extra.ctype }, @@ -991,7 +991,7 @@ pub const Pool = struct { _, const first_named_index: u32 = 1 << 31; - const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).Enum.fields.len; + const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).@"enum".fields.len; }; const Adapter = struct { @@ -1127,7 +1127,7 @@ pub const Pool = struct { allocator, FwdDeclAnon, extra, - fields.len * @typeInfo(Field).Struct.fields.len, + fields.len * @typeInfo(Field).@"struct".fields.len, ); for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity( &hasher, @@ -1184,7 +1184,7 @@ pub const Pool = struct { allocator, AggregateAnon, extra, - aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, ); for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ .name = field.name.index, @@ -1213,7 +1213,7 @@ pub const Pool = struct { allocator, Aggregate, extra, - aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, ); for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ .name = field.name.index, @@ -1672,7 +1672,7 @@ pub const Pool = struct { defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity( allocator, - loaded_struct.field_types.len * @typeInfo(Field).Struct.fields.len, + loaded_struct.field_types.len * @typeInfo(Field).@"struct".fields.len, ); var hasher = Hasher.init; var tag: Pool.Tag = .aggregate_struct; @@ -1709,14 +1709,14 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (fields_len == 0) return CType.void; try pool.ensureUnusedCapacity(allocator, 1); const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len, - }, fields_len * @typeInfo(Field).Struct.fields.len); + }, fields_len * @typeInfo(Field).@"struct".fields.len); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); }, @@ -1734,7 +1734,7 @@ pub const Pool = struct { const scratch_top = scratch.items.len; defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len * - @typeInfo(Field).Struct.fields.len); + @typeInfo(Field).@"struct".fields.len); var hasher = Hasher.init; for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; @@ -1765,7 +1765,7 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (fields_len == 0) return CType.void; if (kind.isForward()) { @@ -1775,7 +1775,7 @@ pub const Pool = struct { &hasher, FwdDeclAnon, .{ .fields_len = fields_len }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtra( @@ -1790,7 +1790,7 @@ pub const Pool = struct { const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len, - }, fields_len * @typeInfo(Field).Struct.fields.len); + }, fields_len * @typeInfo(Field).@"struct".fields.len); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index); }, @@ -1812,7 +1812,7 @@ pub const Pool = struct { defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity( allocator, - loaded_union.field_types.len * @typeInfo(Field).Struct.fields.len, + loaded_union.field_types.len * @typeInfo(Field).@"struct".fields.len, ); var hasher = Hasher.init; var tag: Pool.Tag = .aggregate_union; @@ -1850,7 +1850,7 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (!has_tag) { if (fields_len == 0) return CType.void; @@ -1860,7 +1860,7 @@ pub const Pool = struct { &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); @@ -1898,7 +1898,7 @@ pub const Pool = struct { .id = 0, .fields_len = fields_len, }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); break :payload_ctype pool.tagTrailingExtraAssumeCapacity( @@ -2087,7 +2087,7 @@ pub const Pool = struct { .tag = tag, .data = try pool.addExtra(allocator, FwdDeclAnon, .{ .fields_len = fields.len, - }, fields.len * @typeInfo(Field).Struct.fields.len), + }, fields.len * @typeInfo(Field).@"struct".fields.len), }); for (0..fields.len) |field_index| { const field = fields.at(field_index, source_pool); @@ -2115,11 +2115,11 @@ pub const Pool = struct { .index = anon.index, .id = anon.id, .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), .fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{ .fwd_decl = pool_adapter.copy(fwd_decl).index, .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), }, }); for (0..aggregate_info.fields.len) |field_index| { @@ -2182,7 +2182,7 @@ pub const Pool = struct { const init: Hasher = .{ .impl = Impl.init(0) }; fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void { - inline for (@typeInfo(Extra).Struct.fields) |field| { + inline for (@typeInfo(Extra).@"struct".fields) |field| { const value = @field(extra, field.name); switch (field.type) { Pool.Tag, String, CType => unreachable, @@ -2429,7 +2429,7 @@ pub const Pool = struct { ) !ExtraIndex { try pool.extra.ensureUnusedCapacity( allocator, - @typeInfo(Extra).Struct.fields.len + trailing_len, + @typeInfo(Extra).@"struct".fields.len + trailing_len, ); defer pool.addExtraAssumeCapacity(Extra, extra); return @intCast(pool.extra.items.len); @@ -2442,7 +2442,7 @@ pub const Pool = struct { comptime Extra: type, extra: Extra, ) void { - inline for (@typeInfo(Extra).Struct.fields) |field| { + inline for (@typeInfo(Extra).@"struct".fields) |field| { const value = @field(extra, field.name); array.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -2505,7 +2505,7 @@ pub const Pool = struct { extra_index: ExtraIndex, ) struct { extra: Extra, trail: ExtraTrail } { var extra: Extra = undefined; - const fields = @typeInfo(Extra).Struct.fields; + const fields = @typeInfo(Extra).@"struct".fields; inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value| @field(extra, field.name) = switch (field.type) { u32 => value, -- cgit v1.2.3