diff options
| author | mlugg <mlugg@mlugg.co.uk> | 2024-08-28 02:35:53 +0100 |
|---|---|---|
| committer | mlugg <mlugg@mlugg.co.uk> | 2024-08-28 08:39:59 +0100 |
| commit | 0fe3fd01ddc2cd49c6a2b939577d16b9d2c65ea9 (patch) | |
| tree | 2c07fddf2b6230360fe618c4de192bc2d24eeaf7 /src/codegen | |
| parent | 1a178d499537b922ff05c5d0186ed5a00dbb1a9b (diff) | |
| download | zig-0fe3fd01ddc2cd49c6a2b939577d16b9d2c65ea9.tar.gz zig-0fe3fd01ddc2cd49c6a2b939577d16b9d2c65ea9.zip | |
std: update `std.builtin.Type` fields to follow naming conventions
The compiler actually doesn't need any functional changes for this: Sema
does reification based on the tag indices of `std.builtin.Type` already!
So, no zig1.wasm update is necessary.
This change is necessary to disallow name clashes between fields and
decls on a type, which is a prerequisite of #9938.
Diffstat (limited to 'src/codegen')
| -rw-r--r-- | src/codegen/c.zig | 58 | ||||
| -rw-r--r-- | src/codegen/c/Type.zig | 50 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 226 | ||||
| -rw-r--r-- | src/codegen/llvm/BitcodeReader.zig | 6 | ||||
| -rw-r--r-- | src/codegen/llvm/Builder.zig | 94 | ||||
| -rw-r--r-- | src/codegen/llvm/bitcode_writer.zig | 18 | ||||
| -rw-r--r-- | src/codegen/spirv.zig | 130 | ||||
| -rw-r--r-- | src/codegen/spirv/Section.zig | 52 |
8 files changed, 317 insertions, 317 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8703e9b124..f3b8c7e72a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1401,7 +1401,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderCType(writer, ctype); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag(zcu) == .Float) { + } else if (field_ty.zigTypeTag(zcu) == .float) { try writer.writeByte('('); try dg.renderCType(writer, ctype); try writer.writeByte(')'); @@ -4473,8 +4473,8 @@ fn airCall( const callee_ty = f.typeOf(pl_op.operand); const fn_info = zcu.typeToFunc(switch (callee_ty.zigTypeTag(zcu)) { - .Fn => callee_ty, - .Pointer => callee_ty.childType(zcu), + .@"fn" => callee_ty, + .pointer => callee_ty.childType(zcu), else => unreachable, }).?; const ret_ty = Type.fromInterned(fn_info.return_type); @@ -5848,7 +5848,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .Pointer; + const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty; const error_ty = error_union_ty.errorUnionSet(zcu); const payload_ty = error_union_ty.errorUnionPayload(zcu); @@ -7011,23 +7011,23 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } }, .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } }, .Min => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" } }, - .Float => .{ .builtin = .{ .operation = "min" } }, + .int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" } }, + .float => .{ .builtin = .{ .operation = "min" } }, else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" } }, - .Float => .{ .builtin = .{ .operation = "max" } }, + .int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" } }, + .float => .{ .builtin = .{ .operation = "max" } }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits } }, - .Float => .{ .builtin = .{ .operation = "add" } }, + .int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits } }, + .float => .{ .builtin = .{ .operation = "add" } }, else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits } }, - .Float => .{ .builtin = .{ .operation = "mul" } }, + .int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits } }, + .float => .{ .builtin = .{ .operation = "mul" } }, else => unreachable, }, }; @@ -7050,38 +7050,38 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, switch (reduce.operation) { .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) { - .Bool => Value.false, - .Int => try pt.intValue(scalar_ty, 0), + .bool => Value.false, + .int => try pt.intValue(scalar_ty, 0), else => unreachable, }, .And => switch (scalar_ty.zigTypeTag(zcu)) { - .Bool => Value.true, - .Int => switch (scalar_ty.intInfo(zcu).signedness) { + .bool => Value.true, + .int => switch (scalar_ty.intInfo(zcu).signedness) { .unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty), .signed => try pt.intValue(scalar_ty, -1), }, else => unreachable, }, .Add => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try pt.intValue(scalar_ty, 0), - .Float => try pt.floatValue(scalar_ty, 0.0), + .int => try pt.intValue(scalar_ty, 0), + .float => try pt.floatValue(scalar_ty, 0.0), else => unreachable, }, .Mul => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => try pt.intValue(scalar_ty, 1), - .Float => try pt.floatValue(scalar_ty, 1.0), + .int => try pt.intValue(scalar_ty, 1), + .float => try pt.floatValue(scalar_ty, 1.0), else => unreachable, }, .Min => switch (scalar_ty.zigTypeTag(zcu)) { - .Bool => Value.true, - .Int => try scalar_ty.maxIntScalar(pt, scalar_ty), - .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), + .bool => Value.true, + .int => try scalar_ty.maxIntScalar(pt, scalar_ty), + .float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(zcu)) { - .Bool => Value.false, - .Int => try scalar_ty.minIntScalar(pt, scalar_ty), - .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)), + .bool => Value.false, + .int => try scalar_ty.minIntScalar(pt, scalar_ty), + .float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, }, .Initializer); @@ -7765,7 +7765,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri } fn undefPattern(comptime IntType: type) IntType { - const int_info = @typeInfo(IntType).Int; + const int_info = @typeInfo(IntType).int; const UnsignedType = std.meta.Int(.unsigned, int_info.bits); return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3))); } @@ -8027,7 +8027,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const pt = f.object.dg.pt; const zcu = pt.zcu; - return if (ty.zigTypeTag(zcu) == .Vector) index: { + return if (ty.zigTypeTag(zcu) == .vector) index: { const local = try f.allocLocal(inst, Type.usize); try writer.writeAll("for ("); @@ -8063,7 +8063,7 @@ const Vectorize = struct { fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool { const zcu = pt.zcu; return switch (ty.zigTypeTag(zcu)) { - .Array, .Vector => return true, + .array, .vector => return true, else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null, }; } diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 018b0586d0..1e0c23a96b 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -669,7 +669,7 @@ const Index = enum(u32) { _, - const first_pool_index: u32 = @typeInfo(CType.Index).Enum.fields.len; + const first_pool_index: u32 = @typeInfo(CType.Index).@"enum".fields.len; const basic_hashes = init: { @setEvalBranchQuota(1_600); var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined; @@ -740,7 +740,7 @@ pub const Info = union(enum) { aggregate: Aggregate, function: Function, - const Tag = @typeInfo(Info).Union.tag_type.?; + const Tag = @typeInfo(Info).@"union".tag_type.?; pub const Pointer = struct { elem_ctype: CType, @@ -783,7 +783,7 @@ pub const Info = union(enum) { pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field { assert(index < slice.len); const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index + - index * @typeInfo(Pool.Field).Struct.fields.len)); + index * @typeInfo(Pool.Field).@"struct".fields.len)); return .{ .name = .{ .index = extra.name }, .ctype = .{ .index = extra.ctype }, @@ -991,7 +991,7 @@ pub const Pool = struct { _, const first_named_index: u32 = 1 << 31; - const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).Enum.fields.len; + const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).@"enum".fields.len; }; const Adapter = struct { @@ -1127,7 +1127,7 @@ pub const Pool = struct { allocator, FwdDeclAnon, extra, - fields.len * @typeInfo(Field).Struct.fields.len, + fields.len * @typeInfo(Field).@"struct".fields.len, ); for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity( &hasher, @@ -1184,7 +1184,7 @@ pub const Pool = struct { allocator, AggregateAnon, extra, - aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, ); for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ .name = field.name.index, @@ -1213,7 +1213,7 @@ pub const Pool = struct { allocator, Aggregate, extra, - aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, ); for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ .name = field.name.index, @@ -1672,7 +1672,7 @@ pub const Pool = struct { defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity( allocator, - loaded_struct.field_types.len * @typeInfo(Field).Struct.fields.len, + loaded_struct.field_types.len * @typeInfo(Field).@"struct".fields.len, ); var hasher = Hasher.init; var tag: Pool.Tag = .aggregate_struct; @@ -1709,14 +1709,14 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (fields_len == 0) return CType.void; try pool.ensureUnusedCapacity(allocator, 1); const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len, - }, fields_len * @typeInfo(Field).Struct.fields.len); + }, fields_len * @typeInfo(Field).@"struct".fields.len); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); }, @@ -1734,7 +1734,7 @@ pub const Pool = struct { const scratch_top = scratch.items.len; defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len * - @typeInfo(Field).Struct.fields.len); + @typeInfo(Field).@"struct".fields.len); var hasher = Hasher.init; for (0..anon_struct_info.types.len) |field_index| { if (anon_struct_info.values.get(ip)[field_index] != .none) continue; @@ -1765,7 +1765,7 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (fields_len == 0) return CType.void; if (kind.isForward()) { @@ -1775,7 +1775,7 @@ pub const Pool = struct { &hasher, FwdDeclAnon, .{ .fields_len = fields_len }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtra( @@ -1790,7 +1790,7 @@ pub const Pool = struct { const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len, - }, fields_len * @typeInfo(Field).Struct.fields.len); + }, fields_len * @typeInfo(Field).@"struct".fields.len); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index); }, @@ -1812,7 +1812,7 @@ pub const Pool = struct { defer scratch.shrinkRetainingCapacity(scratch_top); try scratch.ensureUnusedCapacity( allocator, - loaded_union.field_types.len * @typeInfo(Field).Struct.fields.len, + loaded_union.field_types.len * @typeInfo(Field).@"struct".fields.len, ); var hasher = Hasher.init; var tag: Pool.Tag = .aggregate_union; @@ -1850,7 +1850,7 @@ pub const Pool = struct { } const fields_len: u32 = @intCast(@divExact( scratch.items.len - scratch_top, - @typeInfo(Field).Struct.fields.len, + @typeInfo(Field).@"struct".fields.len, )); if (!has_tag) { if (fields_len == 0) return CType.void; @@ -1860,7 +1860,7 @@ pub const Pool = struct { &hasher, Aggregate, .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); @@ -1898,7 +1898,7 @@ pub const Pool = struct { .id = 0, .fields_len = fields_len, }, - fields_len * @typeInfo(Field).Struct.fields.len, + fields_len * @typeInfo(Field).@"struct".fields.len, ); pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); break :payload_ctype pool.tagTrailingExtraAssumeCapacity( @@ -2087,7 +2087,7 @@ pub const Pool = struct { .tag = tag, .data = try pool.addExtra(allocator, FwdDeclAnon, .{ .fields_len = fields.len, - }, fields.len * @typeInfo(Field).Struct.fields.len), + }, fields.len * @typeInfo(Field).@"struct".fields.len), }); for (0..fields.len) |field_index| { const field = fields.at(field_index, source_pool); @@ -2115,11 +2115,11 @@ pub const Pool = struct { .index = anon.index, .id = anon.id, .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), .fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{ .fwd_decl = pool_adapter.copy(fwd_decl).index, .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), }, }); for (0..aggregate_info.fields.len) |field_index| { @@ -2182,7 +2182,7 @@ pub const Pool = struct { const init: Hasher = .{ .impl = Impl.init(0) }; fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void { - inline for (@typeInfo(Extra).Struct.fields) |field| { + inline for (@typeInfo(Extra).@"struct".fields) |field| { const value = @field(extra, field.name); switch (field.type) { Pool.Tag, String, CType => unreachable, @@ -2429,7 +2429,7 @@ pub const Pool = struct { ) !ExtraIndex { try pool.extra.ensureUnusedCapacity( allocator, - @typeInfo(Extra).Struct.fields.len + trailing_len, + @typeInfo(Extra).@"struct".fields.len + trailing_len, ); defer pool.addExtraAssumeCapacity(Extra, extra); return @intCast(pool.extra.items.len); @@ -2442,7 +2442,7 @@ pub const Pool = struct { comptime Extra: type, extra: Extra, ) void { - inline for (@typeInfo(Extra).Struct.fields) |field| { + inline for (@typeInfo(Extra).@"struct".fields) |field| { const value = @field(extra, field.name); array.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -2505,7 +2505,7 @@ pub const Pool = struct { extra_index: ExtraIndex, ) struct { extra: Extra, trail: ExtraTrail } { var extra: Extra = undefined; - const fields = @typeInfo(Extra).Struct.fields; + const fields = @typeInfo(Extra).@"struct".fields; inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value| @field(extra, field.name) = switch (field.type) { u32 => value, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1352e6e3a1..e7cb57d76e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1538,7 +1538,7 @@ pub const Object = struct { try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder); } } - if (param_ty.zigTypeTag(zcu) != .Optional) { + if (param_ty.zigTypeTag(zcu) != .optional) { try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); } if (ptr_info.flags.is_const) { @@ -1907,8 +1907,8 @@ pub const Object = struct { if (o.debug_type_map.get(ty)) |debug_type| return debug_type; switch (ty.zigTypeTag(zcu)) { - .Void, - .NoReturn, + .void, + .noreturn, => { const debug_void_type = try o.builder.debugSignedType( try o.builder.metadataString("void"), @@ -1917,7 +1917,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_void_type); return debug_void_type; }, - .Int => { + .int => { const info = ty.intInfo(zcu); assert(info.bits != 0); const name = try o.allocTypeName(ty); @@ -1931,7 +1931,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_int_type); return debug_int_type; }, - .Enum => { + .@"enum" => { if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) { const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_enum_type); @@ -1985,7 +1985,7 @@ pub const Object = struct { try o.debug_enums.append(gpa, debug_enum_type); return debug_enum_type; }, - .Float => { + .float => { const bits = ty.floatBits(target); const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -1996,7 +1996,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_float_type); return debug_float_type; }, - .Bool => { + .bool => { const debug_bool_type = try o.builder.debugBoolType( try o.builder.metadataString("bool"), 8, // lldb cannot handle non-byte sized types @@ -2004,7 +2004,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_bool_type); return debug_bool_type; }, - .Pointer => { + .pointer => { // Normalize everything that the debug info does not represent. const ptr_info = ty.ptrInfo(zcu); @@ -2126,7 +2126,7 @@ pub const Object = struct { return debug_ptr_type; }, - .Opaque => { + .@"opaque" => { if (ty.toIntern() == .anyopaque_type) { const debug_opaque_type = try o.builder.debugSignedType( try o.builder.metadataString("anyopaque"), @@ -2158,7 +2158,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_opaque_type); return debug_opaque_type; }, - .Array => { + .array => { const debug_array_type = try o.builder.debugArrayType( .none, // Name .none, // File @@ -2177,14 +2177,14 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_array_type); return debug_array_type; }, - .Vector => { + .vector => { const elem_ty = ty.elemType2(zcu); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. const debug_elem_type = switch (elem_ty.zigTypeTag(zcu)) { - .Int => blk: { + .int => blk: { const info = elem_ty.intInfo(zcu); assert(info.bits != 0); const name = try o.allocTypeName(ty); @@ -2195,7 +2195,7 @@ pub const Object = struct { .unsigned => try o.builder.debugUnsignedType(builder_name, info.bits), }; }, - .Bool => try o.builder.debugBoolType( + .bool => try o.builder.debugBoolType( try o.builder.metadataString("bool"), 1, ), @@ -2221,7 +2221,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_vector_type); return debug_vector_type; }, - .Optional => { + .optional => { const name = try o.allocTypeName(ty); defer gpa.free(name); const child_ty = ty.optionalChild(zcu); @@ -2302,7 +2302,7 @@ pub const Object = struct { return debug_optional_type; }, - .ErrorUnion => { + .error_union => { const payload_ty = ty.errorUnionPayload(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // TODO: Maybe remove? @@ -2375,7 +2375,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_error_union_type); return debug_error_union_type; }, - .ErrorSet => { + .error_set => { const debug_error_set = try o.builder.debugUnsignedType( try o.builder.metadataString("anyerror"), 16, @@ -2383,7 +2383,7 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_error_set); return debug_error_set; }, - .Struct => { + .@"struct" => { const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -2531,7 +2531,7 @@ pub const Object = struct { return debug_struct_type; }, - .Union => { + .@"union" => { const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -2693,7 +2693,7 @@ pub const Object = struct { return debug_tagged_union_type; }, - .Fn => { + .@"fn" => { const fn_info = zcu.typeToFunc(ty).?; var debug_param_types = std.ArrayList(Builder.Metadata).init(gpa); @@ -2741,15 +2741,15 @@ pub const Object = struct { try o.debug_type_map.put(gpa, ty, debug_function_type); return debug_function_type; }, - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .EnumLiteral => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .type => unreachable, + .undefined => unreachable, + .null => unreachable, + .enum_literal => unreachable, - .Frame => @panic("TODO implement lowerDebugType for Frame types"), - .AnyFrame => @panic("TODO implement lowerDebugType for AnyFrame types"), + .frame => @panic("TODO implement lowerDebugType for Frame types"), + .@"anyframe" => @panic("TODO implement lowerDebugType for AnyFrame types"), } } @@ -3539,9 +3539,9 @@ pub const Object = struct { const pt = o.pt; const zcu = pt.zcu; const lower_elem_ty = switch (elem_ty.zigTypeTag(zcu)) { - .Opaque => true, - .Fn => !zcu.typeToFunc(elem_ty).?.is_generic, - .Array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu), + .@"opaque" => true, + .@"fn" => !zcu.typeToFunc(elem_ty).?.is_generic, + .array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu), else => elem_ty.hasRuntimeBitsIgnoreComptime(zcu), }; return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8; @@ -3883,7 +3883,7 @@ pub const Object = struct { }, else => |payload| try o.lowerValue(payload), }; - assert(payload_ty.zigTypeTag(zcu) != .Fn); + assert(payload_ty.zigTypeTag(zcu) != .@"fn"); var fields: [3]Builder.Type = undefined; var vals: [3]Builder.Constant = undefined; @@ -4303,7 +4303,7 @@ pub const Object = struct { .field => |field| { const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu); const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) { - .Pointer => off: { + .pointer => off: { assert(agg_ty.isSlice(zcu)); break :off switch (field.index) { Value.slice_ptr_index => 0, @@ -4311,7 +4311,7 @@ pub const Object = struct { else => unreachable, }; }, - .Struct, .Union => switch (agg_ty.containerLayout(zcu)) { + .@"struct", .@"union" => switch (agg_ty.containerLayout(zcu)) { .auto => agg_ty.structFieldOffset(@intCast(field.index), zcu), .@"extern", .@"packed" => unreachable, }, @@ -4344,7 +4344,7 @@ pub const Object = struct { const ptr_ty = Type.fromInterned(uav.orig_ty); - const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn; + const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn"; if ((!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) or (is_fn_body and zcu.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); @@ -4383,7 +4383,7 @@ pub const Object = struct { const nav_ty = Type.fromInterned(owner_nav.typeOf(ip)); const ptr_ty = try pt.navPtrType(owner_nav_index); - const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn; + const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn"; if ((!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) or (is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic)) { @@ -4435,13 +4435,13 @@ pub const Object = struct { const pt = o.pt; const zcu = pt.zcu; const int_ty = switch (ty.zigTypeTag(zcu)) { - .Int => ty, - .Enum => ty.intTagType(zcu), - .Float => { + .int => ty, + .@"enum" => ty.intTagType(zcu), + .float => { if (!is_rmw_xchg) return .none; return o.builder.intType(@intCast(ty.abiSize(zcu) * 8)); }, - .Bool => return .i8, + .bool => return .i8, else => return .none, }; const bit_count = int_ty.intInfo(zcu).bits; @@ -4693,7 +4693,7 @@ pub const NavGen = struct { const global_index = o.nav_map.get(nav_index).?; const decl_name = decl_name: { - if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .Fn) { + if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .@"fn") { if (lib_name.toSlice(ip)) |lib_name_slice| { if (!std.mem.eql(u8, lib_name_slice, "c")) { break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice }); @@ -5192,8 +5192,8 @@ pub const FuncGen = struct { const ip = &zcu.intern_pool; const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) { - .Fn => callee_ty, - .Pointer => callee_ty.childType(zcu), + .@"fn" => callee_ty, + .pointer => callee_ty.childType(zcu), else => unreachable, }; const fn_info = zcu.typeToFunc(zig_fn_ty).?; @@ -5410,7 +5410,7 @@ pub const FuncGen = struct { try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder); } } - if (param_ty.zigTypeTag(zcu) != .Optional) { + if (param_ty.zigTypeTag(zcu) != .optional) { try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder); } if (ptr_info.flags.is_const) { @@ -5773,9 +5773,9 @@ pub const FuncGen = struct { const zcu = pt.zcu; const scalar_ty = operand_ty.scalarType(zcu); const int_ty = switch (scalar_ty.zigTypeTag(zcu)) { - .Enum => scalar_ty.intTagType(zcu), - .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, - .Optional => blk: { + .@"enum" => scalar_ty.intTagType(zcu), + .int, .bool, .pointer, .error_set => scalar_ty, + .optional => blk: { const payload_ty = operand_ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or operand_ty.optionalReprIsPayload(zcu)) @@ -5848,7 +5848,7 @@ pub const FuncGen = struct { ); return phi.toValue(); }, - .Float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }), + .float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }), else => unreachable, }; const is_signed = int_ty.isSignedInt(zcu); @@ -5909,7 +5909,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (inst_ty.zigTypeTag(zcu) == .Fn or isByRef(inst_ty, zcu)) { + if (inst_ty.zigTypeTag(zcu) == .@"fn" or isByRef(inst_ty, zcu)) { break :ty .ptr; } break :ty raw_llvm_ty; @@ -6605,7 +6605,7 @@ pub const FuncGen = struct { if (!isByRef(struct_ty, zcu)) { assert(!isByRef(field_ty, zcu)); switch (struct_ty.zigTypeTag(zcu)) { - .Struct => switch (struct_ty.containerLayout(zcu)) { + .@"struct" => switch (struct_ty.containerLayout(zcu)) { .@"packed" => { const struct_type = zcu.typeToStruct(struct_ty).?; const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index); @@ -6614,7 +6614,7 @@ pub const FuncGen = struct { try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset); const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, ""); const elem_llvm_ty = try o.lowerType(field_ty); - if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) { + if (field_ty.zigTypeTag(zcu) == .float or field_ty.zigTypeTag(zcu) == .vector) { const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu))); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); @@ -6632,11 +6632,11 @@ pub const FuncGen = struct { return self.wip.extractValue(struct_llvm_val, &.{llvm_field_index}, ""); }, }, - .Union => { + .@"union" => { assert(struct_ty.containerLayout(zcu) == .@"packed"); const containing_int = struct_llvm_val; const elem_llvm_ty = try o.lowerType(field_ty); - if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) { + if (field_ty.zigTypeTag(zcu) == .float or field_ty.zigTypeTag(zcu) == .vector) { const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu))); const truncated_int = try self.wip.cast(.trunc, containing_int, same_size_int, ""); @@ -6654,7 +6654,7 @@ pub const FuncGen = struct { } switch (struct_ty.zigTypeTag(zcu)) { - .Struct => { + .@"struct" => { const layout = struct_ty.containerLayout(zcu); assert(layout != .@"packed"); const struct_llvm_ty = try o.lowerType(struct_ty); @@ -6677,7 +6677,7 @@ pub const FuncGen = struct { return self.load(field_ptr, field_ptr_ty); } }, - .Union => { + .@"union" => { const union_llvm_ty = try o.lowerType(struct_ty); const layout = struct_ty.unionGetLayout(zcu); const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); @@ -6934,7 +6934,7 @@ pub const FuncGen = struct { if (output != .none) { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); - assert(output_ty.zigTypeTag(zcu) == .Pointer); + assert(output_ty.zigTypeTag(zcu) == .pointer); const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(zcu)); switch (constraint[0]) { @@ -8280,7 +8280,7 @@ pub const FuncGen = struct { .gte => .sge, }; - if (ty.zigTypeTag(zcu) == .Vector) { + if (ty.zigTypeTag(zcu) == .vector) { const vec_len = ty.vectorLen(zcu); const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32); @@ -8457,7 +8457,7 @@ pub const FuncGen = struct { ([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len], scalar_llvm_ty, ); - if (ty.zigTypeTag(zcu) == .Vector) { + if (ty.zigTypeTag(zcu) == .vector) { const result = try o.builder.poisonValue(llvm_ty); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(zcu)); } @@ -8658,7 +8658,7 @@ pub const FuncGen = struct { const scalar_ty = operand_ty.scalarType(zcu); switch (scalar_ty.zigTypeTag(zcu)) { - .Int => return self.wip.callIntrinsic( + .int => return self.wip.callIntrinsic( .normal, .none, .abs, @@ -8666,7 +8666,7 @@ pub const FuncGen = struct { &.{ operand, try o.builder.intValue(.i1, 0) }, "", ), - .Float => return self.buildFloatOp(.fabs, .normal, operand_ty, 1, .{operand}), + .float => return self.buildFloatOp(.fabs, .normal, operand_ty, 1, .{operand}), else => unreachable, } } @@ -8806,11 +8806,11 @@ pub const FuncGen = struct { return self.wip.conv(.unsigned, operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag(zcu) == .Int and inst_ty.isPtrAtRuntime(zcu)) { + if (operand_ty.zigTypeTag(zcu) == .int and inst_ty.isPtrAtRuntime(zcu)) { return self.wip.cast(.inttoptr, operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag(zcu) == .Vector and inst_ty.zigTypeTag(zcu) == .Array) { + if (operand_ty.zigTypeTag(zcu) == .vector and inst_ty.zigTypeTag(zcu) == .array) { const elem_ty = operand_ty.childType(zcu); if (!result_is_ref) { return self.ng.todo("implement bitcast vector to non-ref array", .{}); @@ -8837,7 +8837,7 @@ pub const FuncGen = struct { } } return array_ptr; - } else if (operand_ty.zigTypeTag(zcu) == .Array and inst_ty.zigTypeTag(zcu) == .Vector) { + } else if (operand_ty.zigTypeTag(zcu) == .array and inst_ty.zigTypeTag(zcu) == .vector) { const elem_ty = operand_ty.childType(zcu); const llvm_vector_ty = try o.lowerType(inst_ty); if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{}); @@ -8883,7 +8883,7 @@ pub const FuncGen = struct { } if (llvm_dest_ty.isStruct(&o.builder) or - ((operand_ty.zigTypeTag(zcu) == .Vector or inst_ty.zigTypeTag(zcu) == .Vector) and + ((operand_ty.zigTypeTag(zcu) == .vector or inst_ty.zigTypeTag(zcu) == .vector) and operand_ty.bitSize(zcu) != inst_ty.bitSize(zcu))) { // Both our operand and our result are values, not pointers, @@ -9687,7 +9687,7 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_ty = try o.builder.intType(@intCast(bits + 8)); - if (operand_ty.zigTypeTag(zcu) == .Vector) { + if (operand_ty.zigTypeTag(zcu) == .vector) { const vec_len = operand_ty.vectorLen(zcu); llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty); } else llvm_operand_ty = scalar_ty; @@ -9993,7 +9993,7 @@ pub const FuncGen = struct { else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), .Min, .Max => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { + .int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { .Min => if (scalar_ty.isSignedInt(zcu)) .@"vector.reduce.smin" else @@ -10004,7 +10004,7 @@ pub const FuncGen = struct { .@"vector.reduce.umax", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), - .Float => if (intrinsicsAllowed(scalar_ty, target)) + .float => if (intrinsicsAllowed(scalar_ty, target)) return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) { .Min => .@"vector.reduce.fmin", .Max => .@"vector.reduce.fmax", @@ -10013,12 +10013,12 @@ pub const FuncGen = struct { else => unreachable, }, .Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) { - .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { + .int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) { .Add => .@"vector.reduce.add", .Mul => .@"vector.reduce.mul", else => unreachable, }, &.{llvm_operand_ty}, &.{operand}, ""), - .Float => if (intrinsicsAllowed(scalar_ty, target)) + .float => if (intrinsicsAllowed(scalar_ty, target)) return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) { .Add => .@"vector.reduce.fadd", .Mul => .@"vector.reduce.fmul", @@ -10095,7 +10095,7 @@ pub const FuncGen = struct { const llvm_result_ty = try o.lowerType(result_ty); switch (result_ty.zigTypeTag(zcu)) { - .Vector => { + .vector => { var vector = try o.builder.poisonValue(llvm_result_ty); for (elements, 0..) |elem, i| { const index_u32 = try o.builder.intValue(.i32, i); @@ -10104,7 +10104,7 @@ pub const FuncGen = struct { } return vector; }, - .Struct => { + .@"struct" => { if (zcu.typeToPackedStruct(result_ty)) |struct_type| { const backing_int_ty = struct_type.backingIntTypeUnordered(ip); assert(backing_int_ty != .none); @@ -10170,7 +10170,7 @@ pub const FuncGen = struct { return result; } }, - .Array => { + .array => { assert(isByRef(result_ty, zcu)); const llvm_usize = try o.lowerType(Type.usize); @@ -10577,7 +10577,7 @@ pub const FuncGen = struct { const zcu = pt.zcu; const struct_ty = struct_ptr_ty.childType(zcu); switch (struct_ty.zigTypeTag(zcu)) { - .Struct => switch (struct_ty.containerLayout(zcu)) { + .@"struct" => switch (struct_ty.containerLayout(zcu)) { .@"packed" => { const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo(zcu); @@ -10618,7 +10618,7 @@ pub const FuncGen = struct { } }, }, - .Union => { + .@"union" => { const layout = struct_ty.unionGetLayout(zcu); if (layout.payload_size == 0 or struct_ty.containerLayout(zcu) == .@"packed") return struct_ptr; const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align)); @@ -10761,7 +10761,7 @@ pub const FuncGen = struct { return result_ptr; } - if (elem_ty.zigTypeTag(zcu) == .Float or elem_ty.zigTypeTag(zcu) == .Vector) { + if (elem_ty.zigTypeTag(zcu) == .float or elem_ty.zigTypeTag(zcu) == .vector) { const same_size_int = try o.builder.intType(@intCast(elem_bits)); const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, ""); return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, ""); @@ -11432,7 +11432,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; if (ty.isSlice(zcu) or - (ty.zigTypeTag(zcu) == .Optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu))) + (ty.zigTypeTag(zcu) == .optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu))) { it.llvm_index += 1; return .slice; @@ -11707,8 +11707,8 @@ fn ccAbiPromoteInt( else => {}, } const int_info = switch (ty.zigTypeTag(zcu)) { - .Bool => Type.u1.intInfo(zcu), - .Int, .Enum, .ErrorSet => ty.intInfo(zcu), + .bool => Type.u1.intInfo(zcu), + .int, .@"enum", .error_set => ty.intInfo(zcu), else => return null, }; return switch (target.os.tag) { @@ -11753,30 +11753,30 @@ fn isByRef(ty: Type, zcu: *Zcu) bool { const ip = &zcu.intern_pool; switch (ty.zigTypeTag(zcu)) { - .Type, - .ComptimeInt, - .ComptimeFloat, - .EnumLiteral, - .Undefined, - .Null, - .Opaque, + .type, + .comptime_int, + .comptime_float, + .enum_literal, + .undefined, + .null, + .@"opaque", => unreachable, - .NoReturn, - .Void, - .Bool, - .Int, - .Float, - .Pointer, - .ErrorSet, - .Fn, - .Enum, - .Vector, - .AnyFrame, + .noreturn, + .void, + .bool, + .int, + .float, + .pointer, + .error_set, + .@"fn", + .@"enum", + .vector, + .@"anyframe", => return false, - .Array, .Frame => return ty.hasRuntimeBits(zcu), - .Struct => { + .array, .frame => return ty.hasRuntimeBits(zcu), + .@"struct" => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; @@ -11807,18 +11807,18 @@ fn isByRef(ty: Type, zcu: *Zcu) bool { } return false; }, - .Union => switch (ty.containerLayout(zcu)) { + .@"union" => switch (ty.containerLayout(zcu)) { .@"packed" => return false, else => return ty.hasRuntimeBits(zcu), }, - .ErrorUnion => { + .error_union => { const payload_ty = ty.errorUnionPayload(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { return false; } return true; }, - .Optional => { + .optional => { const payload_ty = ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { return false; @@ -11833,21 +11833,21 @@ fn isByRef(ty: Type, zcu: *Zcu) bool { fn isScalar(zcu: *Zcu, ty: Type) bool { return switch (ty.zigTypeTag(zcu)) { - .Void, - .Bool, - .NoReturn, - .Int, - .Float, - .Pointer, - .Optional, - .ErrorSet, - .Enum, - .AnyFrame, - .Vector, + .void, + .bool, + .noreturn, + .int, + .float, + .pointer, + .optional, + .error_set, + .@"enum", + .@"anyframe", + .vector, => true, - .Struct => ty.containerLayout(zcu) == .@"packed", - .Union => ty.containerLayout(zcu) == .@"packed", + .@"struct" => ty.containerLayout(zcu) == .@"packed", + .@"union" => ty.containerLayout(zcu) == .@"packed", else => false, }; } diff --git a/src/codegen/llvm/BitcodeReader.zig b/src/codegen/llvm/BitcodeReader.zig index 668e610a69..940b965340 100644 --- a/src/codegen/llvm/BitcodeReader.zig +++ b/src/codegen/llvm/BitcodeReader.zig @@ -273,7 +273,7 @@ fn startBlock(bc: *BitcodeReader, block_id: ?u32, new_abbrev_len: u6) !void { }; try state.abbrevs.abbrevs.ensureTotalCapacity( bc.allocator, - @typeInfo(Abbrev.Builtin).Enum.fields.len + abbrevs.len, + @typeInfo(Abbrev.Builtin).@"enum".fields.len + abbrevs.len, ); assert(state.abbrevs.abbrevs.items.len == @intFromEnum(Abbrev.Builtin.end_block)); @@ -309,7 +309,7 @@ fn startBlock(bc: *BitcodeReader, block_id: ?u32, new_abbrev_len: u6) !void { .{ .encoding = .{ .vbr = 6 } }, // ops }, }); - assert(state.abbrevs.abbrevs.items.len == @typeInfo(Abbrev.Builtin).Enum.fields.len); + assert(state.abbrevs.abbrevs.items.len == @typeInfo(Abbrev.Builtin).@"enum".fields.len); for (abbrevs) |abbrev| try state.abbrevs.addAbbrevAssumeCapacity(bc.allocator, abbrev); } @@ -448,7 +448,7 @@ const Abbrev = struct { define_abbrev, unabbrev_record, - const first_record_id: u32 = std.math.maxInt(u32) - @typeInfo(Builtin).Enum.fields.len + 1; + const first_record_id: u32 = std.math.maxInt(u32) - @typeInfo(Builtin).@"enum".fields.len + 1; fn toRecordId(builtin: Builtin) u32 { return first_record_id + @intFromEnum(builtin); } diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 9ada51acad..50b43319da 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -1115,7 +1115,7 @@ pub const Attribute = union(Kind) { => |kind| { const field = comptime blk: { @setEvalBranchQuota(10_000); - for (@typeInfo(Attribute).Union.fields) |field| { + for (@typeInfo(Attribute).@"union".fields) |field| { if (std.mem.eql(u8, field.name, @tagName(kind))) break :blk field; } unreachable; @@ -1232,11 +1232,11 @@ pub const Attribute = union(Kind) { .dereferenceable_or_null, => |size| try writer.print(" {s}({d})", .{ @tagName(attribute), size }), .nofpclass => |fpclass| { - const Int = @typeInfo(FpClass).Struct.backing_integer.?; + const Int = @typeInfo(FpClass).@"struct".backing_integer.?; try writer.print(" {s}(", .{@tagName(attribute)}); var any = false; var remaining: Int = @bitCast(fpclass); - inline for (@typeInfo(FpClass).Struct.decls) |decl| { + inline for (@typeInfo(FpClass).@"struct".decls) |decl| { const pattern: Int = @bitCast(@field(FpClass, decl.name)); if (remaining & pattern == pattern) { if (!any) { @@ -1259,7 +1259,7 @@ pub const Attribute = union(Kind) { .allockind => |allockind| { try writer.print(" {s}(\"", .{@tagName(attribute)}); var any = false; - inline for (@typeInfo(AllocKind).Struct.fields) |field| { + inline for (@typeInfo(AllocKind).@"struct".fields) |field| { if (comptime std.mem.eql(u8, field.name, "_")) continue; if (@field(allockind, field.name)) { if (!any) { @@ -1418,7 +1418,7 @@ pub const Attribute = union(Kind) { none = std.math.maxInt(u32), _, - pub const len = @typeInfo(Kind).Enum.fields.len - 2; + pub const len = @typeInfo(Kind).@"enum".fields.len - 2; pub fn fromString(str: String) Kind { assert(!str.isAnon()); @@ -5037,7 +5037,7 @@ pub const Function = struct { index: Instruction.ExtraIndex, ) struct { data: T, trail: ExtraDataTrail } { var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; + const fields = @typeInfo(T).@"struct".fields; inline for (fields, self.extra[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { u32 => value, @@ -6151,7 +6151,7 @@ pub const WipFunction = struct { fn addExtra(wip_extra: *@This(), extra: anytype) Instruction.ExtraIndex { const result = wip_extra.index; - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| { const value = @field(extra, field.name); wip_extra.items[wip_extra.index] = switch (field.type) { u32 => value, @@ -6175,7 +6175,7 @@ pub const WipFunction = struct { } fn appendSlice(wip_extra: *@This(), slice: anytype) void { - if (@typeInfo(@TypeOf(slice)).Pointer.child == Value) + if (@typeInfo(@TypeOf(slice)).pointer.child == Value) @compileError("use appendMappedValues"); const data: []const u32 = @ptrCast(slice); @memcpy(wip_extra.items[wip_extra.index..][0..data.len], data); @@ -6760,7 +6760,7 @@ pub const WipFunction = struct { ) Allocator.Error!void { try self.extra.ensureUnusedCapacity( self.builder.gpa, - count * (@typeInfo(Extra).Struct.fields.len + trail_len), + count * (@typeInfo(Extra).@"struct".fields.len + trail_len), ); } @@ -6799,7 +6799,7 @@ pub const WipFunction = struct { fn addExtraAssumeCapacity(self: *WipFunction, extra: anytype) Instruction.ExtraIndex { const result: Instruction.ExtraIndex = @intCast(self.extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| { const value = @field(extra, field.name); self.extra.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -6848,7 +6848,7 @@ pub const WipFunction = struct { index: Instruction.ExtraIndex, ) struct { data: T, trail: ExtraDataTrail } { var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; + const fields = @typeInfo(T).@"struct".fields; inline for (fields, self.extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { u32 => value, @@ -7926,17 +7926,17 @@ pub const Metadata = enum(u32) { writer: anytype, ) @TypeOf(writer).Error!void { var need_pipe = false; - inline for (@typeInfo(DIFlags).Struct.fields) |field| { + inline for (@typeInfo(DIFlags).@"struct".fields) |field| { switch (@typeInfo(field.type)) { - .Bool => if (@field(self, field.name)) { + .bool => if (@field(self, field.name)) { if (need_pipe) try writer.writeAll(" | ") else need_pipe = true; try writer.print("DIFlag{s}", .{field.name}); }, - .Enum => if (@field(self, field.name) != .Zero) { + .@"enum" => if (@field(self, field.name) != .Zero) { if (need_pipe) try writer.writeAll(" | ") else need_pipe = true; try writer.print("DIFlag{s}", .{@tagName(@field(self, field.name))}); }, - .Int => assert(@field(self, field.name) == 0), + .int => assert(@field(self, field.name) == 0), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), } @@ -7988,17 +7988,17 @@ pub const Metadata = enum(u32) { writer: anytype, ) @TypeOf(writer).Error!void { var need_pipe = false; - inline for (@typeInfo(DISPFlags).Struct.fields) |field| { + inline for (@typeInfo(DISPFlags).@"struct".fields) |field| { switch (@typeInfo(field.type)) { - .Bool => if (@field(self, field.name)) { + .bool => if (@field(self, field.name)) { if (need_pipe) try writer.writeAll(" | ") else need_pipe = true; try writer.print("DISPFlag{s}", .{field.name}); }, - .Enum => if (@field(self, field.name) != .Zero) { + .@"enum" => if (@field(self, field.name) != .Zero) { if (need_pipe) try writer.writeAll(" | ") else need_pipe = true; try writer.print("DISPFlag{s}", .{@tagName(@field(self, field.name))}); }, - .Int => assert(@field(self, field.name) == 0), + .int => assert(@field(self, field.name) == 0), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), } @@ -8281,16 +8281,16 @@ pub const Metadata = enum(u32) { }!std.fmt.Formatter(format) { const Node = @TypeOf(node); const MaybeNode = switch (@typeInfo(Node)) { - .Optional => Node, - .Null => ?noreturn, + .optional => Node, + .null => ?noreturn, else => ?Node, }; - const Some = @typeInfo(MaybeNode).Optional.child; + const Some = @typeInfo(MaybeNode).optional.child; return .{ .data = .{ .formatter = formatter, .prefix = prefix, .node = if (@as(MaybeNode, node)) |some| switch (@typeInfo(Some)) { - .Enum => |enum_info| switch (Some) { + .@"enum" => |enum_info| switch (Some) { Metadata => switch (some) { .none => .none, else => try formatter.refUnwrapped(some.unwrap(formatter.builder)), @@ -8301,18 +8301,18 @@ pub const Metadata = enum(u32) { else @compileError("unknown type to format: " ++ @typeName(Node)), }, - .EnumLiteral => .{ .raw = @tagName(some) }, - .Bool => .{ .bool = some }, - .Struct => switch (Some) { + .enum_literal => .{ .raw = @tagName(some) }, + .bool => .{ .bool = some }, + .@"struct" => switch (Some) { DIFlags => .{ .di_flags = some }, Subprogram.DISPFlags => .{ .sp_flags = some }, else => @compileError("unknown type to format: " ++ @typeName(Node)), }, - .Int, .ComptimeInt => .{ .u64 = some }, - .Pointer => .{ .raw = some }, + .int, .comptime_int => .{ .u64 = some }, + .pointer => .{ .raw = some }, else => @compileError("unknown type to format: " ++ @typeName(Node)), } else switch (@typeInfo(Node)) { - .Optional, .Null => .none, + .optional, .null => .none, else => unreachable, }, } }; @@ -8414,7 +8414,7 @@ pub const Metadata = enum(u32) { } fmt_str = fmt_str ++ ")\n"; - var fmt_args: @Type(.{ .Struct = .{ + var fmt_args: @Type(.{ .@"struct" = .{ .layout = .auto, .fields = &fields, .decls = &.{}, @@ -8501,10 +8501,10 @@ pub fn init(options: Options) Allocator.Error!Builder { } { - const static_len = @typeInfo(Type).Enum.fields.len - 1; + const static_len = @typeInfo(Type).@"enum".fields.len - 1; try self.type_map.ensureTotalCapacity(self.gpa, static_len); try self.type_items.ensureTotalCapacity(self.gpa, static_len); - inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| { + inline for (@typeInfo(Type.Simple).@"enum".fields) |simple_field| { const result = self.getOrPutTypeNoExtraAssumeCapacity( .{ .tag = .simple, .data = simple_field.value }, ); @@ -9031,14 +9031,14 @@ pub fn getIntrinsic( pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Constant { const int_value = switch (@typeInfo(@TypeOf(value))) { - .Int, .ComptimeInt => value, - .Enum => @intFromEnum(value), + .int, .comptime_int => value, + .@"enum" => @intFromEnum(value), else => @compileError("intConst expected an integral value, got " ++ @typeName(@TypeOf(value))), }; var limbs: [ switch (@typeInfo(@TypeOf(int_value))) { - .Int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits), - .ComptimeInt => std.math.big.int.calcLimbLen(int_value), + .int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits), + .comptime_int => std.math.big.int.calcLimbLen(int_value), else => unreachable, } ]std.math.big.Limb = undefined; @@ -10759,7 +10759,7 @@ fn ensureUnusedTypeCapacity( try self.type_items.ensureUnusedCapacity(self.gpa, count); try self.type_extra.ensureUnusedCapacity( self.gpa, - count * (@typeInfo(Extra).Struct.fields.len + trail_len), + count * (@typeInfo(Extra).@"struct".fields.len + trail_len), ); } @@ -10789,7 +10789,7 @@ fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { n fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraIndex { const result: Type.Item.ExtraIndex = @intCast(self.type_extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| { const value = @field(extra, field.name); self.type_extra.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -10827,7 +10827,7 @@ fn typeExtraDataTrail( index: Type.Item.ExtraIndex, ) struct { data: T, trail: TypeExtraDataTrail } { var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; + const fields = @typeInfo(T).@"struct".fields; inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { u32 => value, @@ -11642,7 +11642,7 @@ fn ensureUnusedConstantCapacity( try self.constant_items.ensureUnusedCapacity(self.gpa, count); try self.constant_extra.ensureUnusedCapacity( self.gpa, - count * (@typeInfo(Extra).Struct.fields.len + trail_len), + count * (@typeInfo(Extra).@"struct".fields.len + trail_len), ); } @@ -11717,7 +11717,7 @@ fn getOrPutConstantAggregateAssumeCapacity( fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item.ExtraIndex { const result: Constant.Item.ExtraIndex = @intCast(self.constant_extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| { const value = @field(extra, field.name); self.constant_extra.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -11756,7 +11756,7 @@ fn constantExtraDataTrail( index: Constant.Item.ExtraIndex, ) struct { data: T, trail: ConstantExtraDataTrail } { var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; + const fields = @typeInfo(T).@"struct".fields; inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { u32 => value, @@ -11784,13 +11784,13 @@ fn ensureUnusedMetadataCapacity( try self.metadata_items.ensureUnusedCapacity(self.gpa, count); try self.metadata_extra.ensureUnusedCapacity( self.gpa, - count * (@typeInfo(Extra).Struct.fields.len + trail_len), + count * (@typeInfo(Extra).@"struct".fields.len + trail_len), ); } fn addMetadataExtraAssumeCapacity(self: *Builder, extra: anytype) Metadata.Item.ExtraIndex { const result: Metadata.Item.ExtraIndex = @intCast(self.metadata_extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| { const value = @field(extra, field.name); self.metadata_extra.appendAssumeCapacity(switch (field.type) { u32 => value, @@ -11829,7 +11829,7 @@ fn metadataExtraDataTrail( index: Metadata.Item.ExtraIndex, ) struct { data: T, trail: MetadataExtraDataTrail } { var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; + const fields = @typeInfo(T).@"struct".fields; inline for (fields, self.metadata_extra.items[index..][0..fields.len]) |field, value| @field(result, field.name) = switch (field.type) { u32 => value, @@ -13921,7 +13921,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co const MetadataKindBlock = ir.MetadataKindBlock; var metadata_kind_block = try module_block.enterSubBlock(MetadataKindBlock, true); - inline for (@typeInfo(ir.FixedMetadataKind).Enum.fields) |field| { + inline for (@typeInfo(ir.FixedMetadataKind).@"enum".fields) |field| { // don't include `dbg` in stripped functions if (!(self.strip and std.mem.eql(u8, field.name, "dbg"))) { try metadata_kind_block.writeAbbrev(MetadataKindBlock.Kind{ @@ -14197,7 +14197,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co const limbs_len = std.math.divCeil(u32, extra.bit_width, 64) catch unreachable; try record.ensureTotalCapacity(self.gpa, 3 + limbs_len); record.appendAssumeCapacity(@as( - @typeInfo(MetadataBlock.Enumerator.Flags).Struct.backing_integer.?, + @typeInfo(MetadataBlock.Enumerator.Flags).@"struct".backing_integer.?, @bitCast(flags), )); record.appendAssumeCapacity(extra.bit_width); diff --git a/src/codegen/llvm/bitcode_writer.zig b/src/codegen/llvm/bitcode_writer.zig index 0b821a32e7..049a15fe17 100644 --- a/src/codegen/llvm/bitcode_writer.zig +++ b/src/codegen/llvm/bitcode_writer.zig @@ -407,14 +407,14 @@ fn charTo6Bit(c: u8) u8 { fn BufType(comptime T: type, comptime min_len: usize) type { return std.meta.Int(.unsigned, @max(min_len, @bitSizeOf(switch (@typeInfo(T)) { - .ComptimeInt => u32, - .Int => |info| if (info.signedness == .unsigned) + .comptime_int => u32, + .int => |info| if (info.signedness == .unsigned) T else @compileError("Unsupported type: " ++ @typeName(T)), - .Enum => |info| info.tag_type, - .Bool => u1, - .Struct => |info| switch (info.layout) { + .@"enum" => |info| info.tag_type, + .bool => u1, + .@"struct" => |info| switch (info.layout) { .auto, .@"extern" => @compileError("Unsupported type: " ++ @typeName(T)), .@"packed" => std.meta.Int(.unsigned, @bitSizeOf(T)), }, @@ -424,10 +424,10 @@ fn BufType(comptime T: type, comptime min_len: usize) type { fn bufValue(value: anytype, comptime min_len: usize) BufType(@TypeOf(value), min_len) { return switch (@typeInfo(@TypeOf(value))) { - .ComptimeInt, .Int => @intCast(value), - .Enum => @intFromEnum(value), - .Bool => @intFromBool(value), - .Struct => @intCast(@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(value))), @bitCast(value))), + .comptime_int, .int => @intCast(value), + .@"enum" => @intFromEnum(value), + .bool => @intFromBool(value), + .@"struct" => @intCast(@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(value))), @bitCast(value))), else => unreachable, }; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index d1df1ba77e..345e80a23c 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -439,7 +439,7 @@ const NavGen = struct { const zcu = pt.zcu; if (try self.air.value(inst, pt)) |val| { const ty = self.typeOf(inst); - if (ty.zigTypeTag(zcu) == .Fn) { + if (ty.zigTypeTag(zcu) == .@"fn") { const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) { .@"extern" => |@"extern"| @"extern".owner_nav, .func => |func| func.owner_nav, @@ -641,16 +641,16 @@ const NavGen = struct { fn isSpvVector(self: *NavGen, ty: Type) bool { const zcu = self.pt.zcu; const target = self.getTarget(); - if (ty.zigTypeTag(zcu) != .Vector) return false; + if (ty.zigTypeTag(zcu) != .vector) return false; // TODO: This check must be expanded for types that can be represented // as integers (enums / packed structs?) and types that are represented // by multiple SPIR-V values. const scalar_ty = ty.scalarType(zcu); switch (scalar_ty.zigTypeTag(zcu)) { - .Bool, - .Int, - .Float, + .bool, + .int, + .float, => {}, else => return false, } @@ -668,26 +668,26 @@ const NavGen = struct { const zcu = self.pt.zcu; const target = self.getTarget(); var scalar_ty = ty.scalarType(zcu); - if (scalar_ty.zigTypeTag(zcu) == .Enum) { + if (scalar_ty.zigTypeTag(zcu) == .@"enum") { scalar_ty = scalar_ty.intTagType(zcu); } const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null; return switch (scalar_ty.zigTypeTag(zcu)) { - .Bool => ArithmeticTypeInfo{ + .bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .backing_bits = self.backingIntBits(1).?, .vector_len = vector_len, .signedness = .unsigned, // Technically, but doesn't matter for this class. .class = .bool, }, - .Float => ArithmeticTypeInfo{ + .float => ArithmeticTypeInfo{ .bits = scalar_ty.floatBits(target), .backing_bits = scalar_ty.floatBits(target), // TODO: F80? .vector_len = vector_len, .signedness = .signed, // Technically, but doesn't matter for this class. .class = .float, }, - .Int => blk: { + .int => blk: { const int_info = scalar_ty.intInfo(zcu); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); @@ -705,8 +705,8 @@ const NavGen = struct { .composite_integer, }; }, - .Enum => unreachable, - .Vector => unreachable, + .@"enum" => unreachable, + .vector => unreachable, else => unreachable, // Unhandled arithmetic type }; } @@ -748,8 +748,8 @@ const NavGen = struct { const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) { - .Int => |int| int.signedness, - .ComptimeInt => if (value < 0) .signed else .unsigned, + .int => |int| int.signedness, + .comptime_int => if (value < 0) .signed else .unsigned, else => unreachable, }; @@ -1243,7 +1243,7 @@ const NavGen = struct { else => {}, } - // const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn; + // const is_fn_body = decl_ty.zigTypeTag(zcu) == .@"fn"; if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { // Pointer to nothing - return undefined return self.spv.constUndef(ty_id); @@ -1539,11 +1539,11 @@ const NavGen = struct { const section = &self.spv.sections.types_globals_constants; switch (ty.zigTypeTag(zcu)) { - .NoReturn => { + .noreturn => { assert(repr == .direct); return try self.spv.voidType(); }, - .Void => switch (repr) { + .void => switch (repr) { .direct => { return try self.spv.voidType(); }, @@ -1557,11 +1557,11 @@ const NavGen = struct { return result_id; }, }, - .Bool => switch (repr) { + .bool => switch (repr) { .direct => return try self.spv.boolType(), .indirect => return try self.resolveType(Type.u1, .indirect), }, - .Int => { + .int => { const int_info = ty.intInfo(zcu); if (int_info.bits == 0) { // Some times, the backend will be asked to generate a pointer to i0. OpTypeInt @@ -1576,11 +1576,11 @@ const NavGen = struct { } return try self.intType(int_info.signedness, int_info.bits); }, - .Enum => { + .@"enum" => { const tag_ty = ty.intTagType(zcu); return try self.resolveType(tag_ty, repr); }, - .Float => { + .float => { // We can (and want) not really emulate floating points with other floating point types like with the integer types, // so if the float is not supported, just return an error. const bits = ty.floatBits(target); @@ -1598,7 +1598,7 @@ const NavGen = struct { return try self.spv.floatType(bits); }, - .Array => { + .array => { const elem_ty = ty.childType(zcu); const elem_ty_id = try self.resolveType(elem_ty, .indirect); const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse { @@ -1633,7 +1633,7 @@ const NavGen = struct { return try self.arrayType(total_len, elem_ty_id); } }, - .Fn => switch (repr) { + .@"fn" => switch (repr) { .direct => { const fn_info = zcu.typeToFunc(ty).?; @@ -1676,7 +1676,7 @@ const NavGen = struct { return try self.resolveType(Type.usize, .indirect); }, }, - .Pointer => { + .pointer => { const ptr_info = ty.ptrInfo(zcu); const storage_class = self.spvStorageClass(ptr_info.flags.address_space); @@ -1692,7 +1692,7 @@ const NavGen = struct { &.{ "ptr", "len" }, ); }, - .Vector => { + .vector => { const elem_ty = ty.childType(zcu); const elem_ty_id = try self.resolveType(elem_ty, repr); const len = ty.vectorLen(zcu); @@ -1703,7 +1703,7 @@ const NavGen = struct { return try self.arrayType(len, elem_ty_id); } }, - .Struct => { + .@"struct" => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { const member_types = try self.gpa.alloc(IdRef, tuple.values.len); @@ -1757,7 +1757,7 @@ const NavGen = struct { try self.spv.debugName(result_id, type_name); return result_id; }, - .Optional => { + .optional => { const payload_ty = ty.optionalChild(zcu); if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { // Just use a bool. @@ -1779,9 +1779,9 @@ const NavGen = struct { &.{ "payload", "valid" }, ); }, - .Union => return try self.resolveUnionType(ty), - .ErrorSet => return try self.resolveType(Type.u16, repr), - .ErrorUnion => { + .@"union" => return try self.resolveUnionType(ty), + .error_set => return try self.resolveType(Type.u16, repr), + .error_union => { const payload_ty = ty.errorUnionPayload(zcu); const error_ty_id = try self.resolveType(Type.anyerror, .indirect); @@ -1808,7 +1808,7 @@ const NavGen = struct { return try self.spv.structType(&member_types, &member_names); }, - .Opaque => { + .@"opaque" => { const type_name = try self.resolveTypeName(ty); defer self.gpa.free(type_name); @@ -1820,15 +1820,15 @@ const NavGen = struct { return result_id; }, - .Null, - .Undefined, - .EnumLiteral, - .ComptimeFloat, - .ComptimeInt, - .Type, + .null, + .undefined, + .enum_literal, + .comptime_float, + .comptime_int, + .type, => unreachable, // Must be comptime. - .Frame, .AnyFrame => unreachable, // TODO + .frame, .@"anyframe" => unreachable, // TODO } } @@ -2429,7 +2429,7 @@ const NavGen = struct { const op_result_ty_id = try self.resolveType(op_result_ty, .direct); const result_ty = try v.resultType(self, lhs.ty); - assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .Bool); + assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .bool); const cond = try v.prepare(self, condition); const object_1 = try v.prepare(self, lhs); @@ -3119,7 +3119,7 @@ const NavGen = struct { fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { const zcu = self.pt.zcu; switch (ty.scalarType(zcu).zigTypeTag(zcu)) { - .Bool => { + .bool => { const false_id = try self.constBool(false, .indirect); // The operation below requires inputs in direct representation, but the operand // is actually in indirect representation. @@ -3145,7 +3145,7 @@ const NavGen = struct { fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { const zcu = self.pt.zcu; switch (ty.scalarType(zcu).zigTypeTag(zcu)) { - .Bool => { + .bool => { const result = try self.intFromBool(Temporary.init(ty, operand_id)); return try result.materialize(self); }, @@ -4281,17 +4281,17 @@ const NavGen = struct { const is_vector = lhs.ty.isVector(zcu); switch (scalar_ty.zigTypeTag(zcu)) { - .Int, .Bool, .Float => {}, - .Enum => { + .int, .bool, .float => {}, + .@"enum" => { assert(!is_vector); const ty = lhs.ty.intTagType(zcu); return try self.cmp(op, lhs.pun(ty), rhs.pun(ty)); }, - .ErrorSet => { + .error_set => { assert(!is_vector); return try self.cmp(op, lhs.pun(Type.u16), rhs.pun(Type.u16)); }, - .Pointer => { + .pointer => { assert(!is_vector); // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are // currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using @@ -4317,7 +4317,7 @@ const NavGen = struct { const rhs_int = Temporary.init(Type.usize, rhs_int_id); return try self.cmp(op, lhs_int, rhs_int); }, - .Optional => { + .optional => { assert(!is_vector); const ty = lhs.ty; @@ -4478,7 +4478,7 @@ const NavGen = struct { // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag(zcu) == .Int and dst_ty.isPtrAtRuntime(zcu)) { + if (src_ty.zigTypeTag(zcu) == .int and dst_ty.isPtrAtRuntime(zcu)) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = dst_ty_id, @@ -4520,7 +4520,7 @@ const NavGen = struct { // the result here. // TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break // should we change the representation of strange integers? - if (dst_ty.zigTypeTag(zcu) == .Int) { + if (dst_ty.zigTypeTag(zcu) == .int) { const info = self.arithmeticTypeInfo(dst_ty); const result = try self.normalize(Temporary.init(dst_ty, result_id), info); return try result.materialize(self); @@ -4729,7 +4729,7 @@ const NavGen = struct { const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); switch (result_ty.zigTypeTag(zcu)) { - .Struct => { + .@"struct" => { if (zcu.typeToPackedStruct(result_ty)) |struct_type| { _ = struct_type; unreachable; // TODO @@ -4777,7 +4777,7 @@ const NavGen = struct { constituents[0..index], ); }, - .Vector => { + .vector => { const n_elems = result_ty.vectorLen(zcu); const elem_ids = try self.gpa.alloc(IdRef, n_elems); defer self.gpa.free(elem_ids); @@ -4788,7 +4788,7 @@ const NavGen = struct { return try self.constructVector(result_ty, elem_ids); }, - .Array => { + .array => { const array_info = result_ty.arrayInfo(zcu); const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu)); const elem_ids = try self.gpa.alloc(IdRef, n_elems); @@ -5153,11 +5153,11 @@ const NavGen = struct { if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null; switch (object_ty.zigTypeTag(zcu)) { - .Struct => switch (object_ty.containerLayout(zcu)) { + .@"struct" => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => return try self.extractField(field_ty, object_id, field_index), }, - .Union => switch (object_ty.containerLayout(zcu)) { + .@"union" => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { // Store, ptr-elem-ptr, pointer-cast, load @@ -5229,17 +5229,17 @@ const NavGen = struct { const zcu = self.pt.zcu; const object_ty = object_ptr_ty.childType(zcu); switch (object_ty.zigTypeTag(zcu)) { - .Pointer => { + .pointer => { assert(object_ty.isSlice(zcu)); return self.accessChain(result_ty_id, object_ptr, &.{field_index}); }, - .Struct => switch (object_ty.containerLayout(zcu)) { + .@"struct" => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { return try self.accessChain(result_ty_id, object_ptr, &.{field_index}); }, }, - .Union => switch (object_ty.containerLayout(zcu)) { + .@"union" => switch (object_ty.containerLayout(zcu)) { .@"packed" => unreachable, // TODO else => { const layout = self.unionLayout(object_ty); @@ -6179,15 +6179,15 @@ const NavGen = struct { var cond_indirect = try self.convertToIndirect(cond_ty, cond); const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) { - .Bool, .ErrorSet => 1, - .Int => blk: { + .bool, .error_set => 1, + .int => blk: { const bits = cond_ty.intInfo(zcu).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) 1 else 2; }, - .Enum => blk: { + .@"enum" => blk: { const int_ty = cond_ty.intTagType(zcu); const int_info = int_ty.intInfo(zcu); const backing_bits = self.backingIntBits(int_info.bits) orelse { @@ -6195,7 +6195,7 @@ const NavGen = struct { }; break :blk if (backing_bits <= 32) 1 else 2; }, - .Pointer => blk: { + .pointer => blk: { cond_indirect = try self.intFromPtr(cond_indirect); break :blk target.ptrBitWidth() / 32; }, @@ -6248,13 +6248,13 @@ const NavGen = struct { for (case.items) |item| { const value = (try self.air.value(item, pt)) orelse unreachable; const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) { - .Bool, .Int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu), - .Enum => blk: { + .bool, .int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu), + .@"enum" => blk: { // TODO: figure out of cond_ty is correct (something with enum literals) break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants }, - .ErrorSet => value.getErrorInt(zcu), - .Pointer => value.toUnsignedInt(zcu), + .error_set => value.getErrorInt(zcu), + .pointer => value.toUnsignedInt(zcu), else => unreachable, }; const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) { @@ -6496,8 +6496,8 @@ const NavGen = struct { const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) { - .Fn => callee_ty, - .Pointer => return self.fail("cannot call function pointers", .{}), + .@"fn" => callee_ty, + .pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; const fn_info = zcu.typeToFunc(zig_fn_ty).?; diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index d857ce7f46..20abf8ab70 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -98,7 +98,7 @@ pub fn emitSpecConstantOp( section.writeOperand(spec.IdRef, operands.id_result); section.writeOperand(Opcode, opcode); - const fields = @typeInfo(opcode.Operands()).Struct.fields; + const fields = @typeInfo(opcode.Operands()).@"struct".fields; // First 2 fields are always id_result_type and id_result. inline for (fields[2..]) |field| { section.writeOperand(field.type, @field(operands, field.name)); @@ -122,8 +122,8 @@ pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void { fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void { const fields = switch (@typeInfo(Operands)) { - .Struct => |info| info.fields, - .Void => return, + .@"struct" => |info| info.fields, + .void => return, else => unreachable, }; @@ -154,24 +154,24 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) spec.PairIdRefIdRef => section.writeWords(&.{ @intFromEnum(operand[0]), @intFromEnum(operand[1]) }), else => switch (@typeInfo(Operand)) { - .Enum => section.writeWord(@intFromEnum(operand)), - .Optional => |info| if (operand) |child| { + .@"enum" => section.writeWord(@intFromEnum(operand)), + .optional => |info| if (operand) |child| { section.writeOperand(info.child, child); }, - .Pointer => |info| { + .pointer => |info| { std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec. for (operand) |item| { section.writeOperand(info.child, item); } }, - .Struct => |info| { + .@"struct" => |info| { if (info.layout == .@"packed") { section.writeWord(@as(Word, @bitCast(operand))); } else { section.writeExtendedMask(Operand, operand); } }, - .Union => section.writeExtendedUnion(Operand, operand), + .@"union" => section.writeExtendedUnion(Operand, operand), else => unreachable, }, } @@ -207,12 +207,12 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void { var mask: Word = 0; - inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| { + inline for (@typeInfo(Operand).@"struct".fields, 0..) |field, bit| { switch (@typeInfo(field.type)) { - .Optional => if (@field(operand, field.name) != null) { + .optional => if (@field(operand, field.name) != null) { mask |= 1 << @as(u5, @intCast(bit)); }, - .Bool => if (@field(operand, field.name)) { + .bool => if (@field(operand, field.name)) { mask |= 1 << @as(u5, @intCast(bit)); }, else => unreachable, @@ -221,12 +221,12 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand section.writeWord(mask); - inline for (@typeInfo(Operand).Struct.fields) |field| { + inline for (@typeInfo(Operand).@"struct".fields) |field| { switch (@typeInfo(field.type)) { - .Optional => |info| if (@field(operand, field.name)) |child| { + .optional => |info| if (@field(operand, field.name)) |child| { section.writeOperands(info.child, child); }, - .Bool => {}, + .bool => {}, else => unreachable, } } @@ -236,7 +236,7 @@ fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operan const tag = std.meta.activeTag(operand); section.writeWord(@intFromEnum(tag)); - inline for (@typeInfo(Operand).Union.fields) |field| { + inline for (@typeInfo(Operand).@"union".fields) |field| { if (@field(Operand, field.name) == tag) { section.writeOperands(field.type, @field(operand, field.name)); return; @@ -251,8 +251,8 @@ fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) us fn operandsSize(comptime Operands: type, operands: Operands) usize { const fields = switch (@typeInfo(Operands)) { - .Struct => |info| info.fields, - .Void => return 0, + .@"struct" => |info| info.fields, + .void => return 0, else => unreachable, }; @@ -289,9 +289,9 @@ fn operandSize(comptime Operand: type, operand: Operand) usize { => 2, else => switch (@typeInfo(Operand)) { - .Enum => 1, - .Optional => |info| if (operand) |child| operandSize(info.child, child) else 0, - .Pointer => |info| blk: { + .@"enum" => 1, + .optional => |info| if (operand) |child| operandSize(info.child, child) else 0, + .pointer => |info| blk: { std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec. var total: usize = 0; for (operand) |item| { @@ -299,8 +299,8 @@ fn operandSize(comptime Operand: type, operand: Operand) usize { } break :blk total; }, - .Struct => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand), - .Union => extendedUnionSize(Operand, operand), + .@"struct" => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand), + .@"union" => extendedUnionSize(Operand, operand), else => unreachable, }, }; @@ -309,13 +309,13 @@ fn operandSize(comptime Operand: type, operand: Operand) usize { fn extendedMaskSize(comptime Operand: type, operand: Operand) usize { var total: usize = 0; var any_set = false; - inline for (@typeInfo(Operand).Struct.fields) |field| { + inline for (@typeInfo(Operand).@"struct".fields) |field| { switch (@typeInfo(field.type)) { - .Optional => |info| if (@field(operand, field.name)) |child| { + .optional => |info| if (@field(operand, field.name)) |child| { total += operandsSize(info.child, child); any_set = true; }, - .Bool => if (@field(operand, field.name)) { + .bool => if (@field(operand, field.name)) { any_set = true; }, else => unreachable, @@ -326,7 +326,7 @@ fn extendedMaskSize(comptime Operand: type, operand: Operand) usize { fn extendedUnionSize(comptime Operand: type, operand: Operand) usize { const tag = std.meta.activeTag(operand); - inline for (@typeInfo(Operand).Union.fields) |field| { + inline for (@typeInfo(Operand).@"union".fields) |field| { if (@field(Operand, field.name) == tag) { // Add one for the tag itself. return 1 + operandsSize(field.type, @field(operand, field.name)); |
