aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Air/types_resolved.zig50
-rw-r--r--src/Compilation.zig10
-rw-r--r--src/InternPool.zig186
-rw-r--r--src/Package/Fetch.zig4
-rw-r--r--src/Sema.zig1942
-rw-r--r--src/Sema/bitcast.zig14
-rw-r--r--src/Sema/comptime_ptr_access.zig128
-rw-r--r--src/Type.zig186
-rw-r--r--src/Value.zig260
-rw-r--r--src/Zcu.zig8
-rw-r--r--src/Zcu/PerThread.zig14
-rw-r--r--src/arch/aarch64/CodeGen.zig112
-rw-r--r--src/arch/aarch64/abi.zig54
-rw-r--r--src/arch/arm/CodeGen.zig116
-rw-r--r--src/arch/arm/abi.zig52
-rw-r--r--src/arch/arm/bits.zig4
-rw-r--r--src/arch/riscv64/CodeGen.zig110
-rw-r--r--src/arch/riscv64/abi.zig62
-rw-r--r--src/arch/riscv64/bits.zig6
-rw-r--r--src/arch/sparc64/CodeGen.zig78
-rw-r--r--src/arch/wasm/CodeGen.zig184
-rw-r--r--src/arch/wasm/abi.zig48
-rw-r--r--src/arch/x86_64/CodeGen.zig236
-rw-r--r--src/arch/x86_64/Encoding.zig6
-rw-r--r--src/arch/x86_64/Lower.zig2
-rw-r--r--src/arch/x86_64/Mir.zig4
-rw-r--r--src/arch/x86_64/abi.zig66
-rw-r--r--src/arch/x86_64/bits.zig4
-rw-r--r--src/arch/x86_64/encoder.zig8
-rw-r--r--src/codegen.zig50
-rw-r--r--src/codegen/c.zig58
-rw-r--r--src/codegen/c/Type.zig50
-rw-r--r--src/codegen/llvm.zig226
-rw-r--r--src/codegen/llvm/BitcodeReader.zig6
-rw-r--r--src/codegen/llvm/Builder.zig94
-rw-r--r--src/codegen/llvm/bitcode_writer.zig18
-rw-r--r--src/codegen/spirv.zig130
-rw-r--r--src/codegen/spirv/Section.zig52
-rw-r--r--src/link.zig12
-rw-r--r--src/link/Coff.zig2
-rw-r--r--src/link/Dwarf.zig28
-rw-r--r--src/link/Elf/Atom.zig2
-rw-r--r--src/link/Elf/LdScript.zig2
-rw-r--r--src/link/Elf/LinkerDefined.zig8
-rw-r--r--src/link/Elf/Object.zig16
-rw-r--r--src/link/Elf/SharedObject.zig8
-rw-r--r--src/link/Elf/Symbol.zig2
-rw-r--r--src/link/Elf/ZigObject.zig16
-rw-r--r--src/link/MachO/Archive.zig2
-rw-r--r--src/link/MachO/Atom.zig2
-rw-r--r--src/link/MachO/Dylib.zig8
-rw-r--r--src/link/MachO/InternalObject.zig16
-rw-r--r--src/link/MachO/Object.zig16
-rw-r--r--src/link/MachO/Symbol.zig2
-rw-r--r--src/link/MachO/ZigObject.zig16
-rw-r--r--src/link/Wasm.zig12
-rw-r--r--src/link/Wasm/Object.zig4
-rw-r--r--src/link/Wasm/ZigObject.zig2
-rw-r--r--src/link/riscv.zig2
-rw-r--r--src/link/tapi/yaml.zig56
-rw-r--r--src/main.zig4
-rw-r--r--src/mutable_value.zig24
-rw-r--r--src/print_env.zig2
-rw-r--r--src/print_value.zig12
-rw-r--r--src/print_zir.zig6
-rw-r--r--src/translate_c.zig8
66 files changed, 2467 insertions, 2461 deletions
diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig
index 06258f9130..e60f5ef311 100644
--- a/src/Air/types_resolved.zig
+++ b/src/Air/types_resolved.zig
@@ -272,7 +272,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
const elems_len: usize = @intCast(ty.arrayLen(zcu));
const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]);
if (!checkType(ty, zcu)) return false;
- if (ty.zigTypeTag(zcu) == .Struct) {
+ if (ty.zigTypeTag(zcu) == .@"struct") {
for (elems, 0..) |elem, elem_idx| {
if (ty.structFieldIsComptime(elem_idx, zcu)) continue;
if (!checkRef(elem, zcu)) return false;
@@ -453,35 +453,35 @@ pub fn checkType(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTagOrPoison(zcu) catch |err| switch (err) {
error.GenericPoison => return true,
}) {
- .Type,
- .Void,
- .Bool,
- .NoReturn,
- .Int,
- .Float,
- .ErrorSet,
- .Enum,
- .Opaque,
- .Vector,
+ .type,
+ .void,
+ .bool,
+ .noreturn,
+ .int,
+ .float,
+ .error_set,
+ .@"enum",
+ .@"opaque",
+ .vector,
// These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness.
// It's a little silly -- but fine, we'll return `true`.
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .EnumLiteral,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .enum_literal,
=> true,
- .Frame,
- .AnyFrame,
+ .frame,
+ .@"anyframe",
=> @panic("TODO Air.types_resolved.checkType async frames"),
- .Optional => checkType(ty.childType(zcu), zcu),
- .ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu),
- .Pointer => checkType(ty.childType(zcu), zcu),
- .Array => checkType(ty.childType(zcu), zcu),
+ .optional => checkType(ty.childType(zcu), zcu),
+ .error_union => checkType(ty.errorUnionPayload(zcu), zcu),
+ .pointer => checkType(ty.childType(zcu), zcu),
+ .array => checkType(ty.childType(zcu), zcu),
- .Fn => {
+ .@"fn" => {
const info = zcu.typeToFunc(ty).?;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
@@ -489,7 +489,7 @@ pub fn checkType(ty: Type, zcu: *Zcu) bool {
}
return checkType(Type.fromInterned(info.return_type), zcu);
},
- .Struct => switch (ip.indexToKey(ty.toIntern())) {
+ .@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_obj = zcu.typeToStruct(ty).?;
return switch (struct_obj.layout) {
@@ -508,6 +508,6 @@ pub fn checkType(ty: Type, zcu: *Zcu) bool {
},
else => unreachable,
},
- .Union => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved,
+ .@"union" => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved,
};
}
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 3ba53083ea..3d40957a72 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -384,7 +384,7 @@ const Job = union(enum) {
/// The value is the index into `system_libs`.
windows_import_lib: usize,
- const Tag = @typeInfo(Job).Union.tag_type.?;
+ const Tag = @typeInfo(Job).@"union".tag_type.?;
fn stage(tag: Tag) usize {
return switch (tag) {
// Prioritize functions so that codegen can get to work on them on a
@@ -911,7 +911,7 @@ pub const cache_helpers = struct {
}
pub fn addDebugFormat(hh: *Cache.HashHelper, x: Config.DebugFormat) void {
- const tag: @typeInfo(Config.DebugFormat).Union.tag_type.? = x;
+ const tag: @typeInfo(Config.DebugFormat).@"union".tag_type.? = x;
hh.add(tag);
switch (x) {
.strip, .code_view => {},
@@ -1486,7 +1486,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_asm = options.emit_asm,
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
- .work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).Array.len,
+ .work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).array.len,
.codegen_work = if (InternPool.single_threaded) {} else .{
.mutex = .{},
.cond = .{},
@@ -3113,8 +3113,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
err: *?Error,
const Error = @typeInfo(
- @typeInfo(@TypeOf(Zcu.SrcLoc.span)).Fn.return_type.?,
- ).ErrorUnion.error_set;
+ @typeInfo(@TypeOf(Zcu.SrcLoc.span)).@"fn".return_type.?,
+ ).error_union.error_set;
pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
if (ctx.err.*) |_| return lhs_index < rhs_index;
diff --git a/src/InternPool.zig b/src/InternPool.zig
index d0ade7026c..e09576c5aa 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -893,7 +893,7 @@ const Local = struct {
};
fn List(comptime Elem: type) type {
- assert(@typeInfo(Elem) == .Struct);
+ assert(@typeInfo(Elem) == .@"struct");
return struct {
bytes: [*]align(@alignOf(Elem)) u8,
@@ -907,7 +907,7 @@ const Local = struct {
const fields = std.enums.values(std.meta.FieldEnum(Elem));
fn PtrArrayElem(comptime len: usize) type {
- const elem_info = @typeInfo(Elem).Struct;
+ const elem_info = @typeInfo(Elem).@"struct";
const elem_fields = elem_info.fields;
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{
@@ -917,7 +917,7 @@ const Local = struct {
.is_comptime = false,
.alignment = 0,
};
- return @Type(.{ .Struct = .{
+ return @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &new_fields,
.decls = &.{},
@@ -928,12 +928,12 @@ const Local = struct {
size: std.builtin.Type.Pointer.Size,
is_const: bool = false,
}) type {
- const elem_info = @typeInfo(Elem).Struct;
+ const elem_info = @typeInfo(Elem).@"struct";
const elem_fields = elem_info.fields;
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{
.name = elem_field.name,
- .type = @Type(.{ .Pointer = .{
+ .type = @Type(.{ .pointer = .{
.size = opts.size,
.is_const = opts.is_const,
.is_volatile = false,
@@ -947,7 +947,7 @@ const Local = struct {
.is_comptime = false,
.alignment = 0,
};
- return @Type(.{ .Struct = .{
+ return @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &new_fields,
.decls = &.{},
@@ -1306,7 +1306,7 @@ const Shard = struct {
};
fn Map(comptime Value: type) type {
- comptime assert(@typeInfo(Value).Enum.tag_type == u32);
+ comptime assert(@typeInfo(Value).@"enum".tag_type == u32);
_ = @as(Value, .none); // expected .none key
return struct {
/// header: Header,
@@ -2254,7 +2254,7 @@ pub const Key = union(enum) {
byte_offset: u64,
pub const BaseAddr = union(enum) {
- const Tag = @typeInfo(BaseAddr).Union.tag_type.?;
+ const Tag = @typeInfo(BaseAddr).@"union".tag_type.?;
/// Points to the value of a single `Nav`, which may be constant or a `variable`.
nav: Nav.Index,
@@ -2411,7 +2411,7 @@ pub const Key = union(enum) {
pub fn hash64(key: Key, ip: *const InternPool) u64 {
const asBytes = std.mem.asBytes;
- const KeyTag = @typeInfo(Key).Union.tag_type.?;
+ const KeyTag = @typeInfo(Key).@"union".tag_type.?;
const seed = @intFromEnum(@as(KeyTag, key));
return switch (key) {
// TODO: assert no padding in these types
@@ -2487,7 +2487,7 @@ pub const Key = union(enum) {
.lazy_align, .lazy_size => |lazy_ty| {
std.hash.autoHash(
&hasher,
- @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage),
+ @as(@typeInfo(Key.Int.Storage).@"union".tag_type.?, int.storage),
);
std.hash.autoHash(&hasher, lazy_ty);
},
@@ -2651,7 +2651,7 @@ pub const Key = union(enum) {
}
pub fn eql(a: Key, b: Key, ip: *const InternPool) bool {
- const KeyTag = @typeInfo(Key).Union.tag_type.?;
+ const KeyTag = @typeInfo(Key).@"union".tag_type.?;
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
if (a_tag != b_tag) return false;
@@ -2861,7 +2861,7 @@ pub const Key = union(enum) {
return a_val == b_val;
}
- const StorageTag = @typeInfo(Key.Float.Storage).Union.tag_type.?;
+ const StorageTag = @typeInfo(Key.Float.Storage).@"union".tag_type.?;
assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage));
switch (a_info.storage) {
@@ -2905,7 +2905,7 @@ pub const Key = union(enum) {
if (a_info.ty != b_info.ty) return false;
const len = ip.aggregateTypeLen(a_info.ty);
- const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
+ const StorageTag = @typeInfo(Key.Aggregate.Storage).@"union".tag_type.?;
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
for (0..@intCast(len)) |elem_index| {
const a_elem = switch (a_info.storage) {
@@ -4000,7 +4000,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?];
const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
- var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len);
+ var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).@"struct".fields.len);
const captures_len = if (flags.any_captures) c: {
const len = extra_list.view().items(.@"0")[extra_index];
extra_index += 1;
@@ -4105,7 +4105,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]);
const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]);
const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
- var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len);
+ var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).@"struct".fields.len);
const has_inits = item.tag == .type_struct_packed_inits;
const captures_len = if (flags.any_captures) c: {
const len = extra_list.view().items(.@"0")[extra_index];
@@ -4711,9 +4711,9 @@ pub const Index = enum(u32) {
},
}) void {
_ = self;
- const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields;
+ const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).pointer.child).@"struct".fields;
@setEvalBranchQuota(2_000);
- inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| {
+ inline for (@typeInfo(Tag).@"enum".fields, 0..) |tag, start| {
inline for (0..map_fields.len) |offset| {
if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break;
} else {
@@ -6384,7 +6384,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const extra_items = extra_list.view().items(.@"0");
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
- const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len);
+ const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).@"struct".fields.len);
if (flags.is_reified) {
assert(!flags.any_captures);
break :ns .{ .reified = .{
@@ -6407,7 +6407,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const extra_items = extra_list.view().items(.@"0");
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]);
const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
- const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len);
+ const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).@"struct".fields.len);
if (flags.is_reified) {
assert(!flags.any_captures);
break :ns .{ .reified = .{
@@ -6922,7 +6922,7 @@ fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.
const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]);
const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]);
const func_decl = ip.funcDeclInfo(generic_owner);
- const end_extra_index = extra_index + @as(u32, @typeInfo(Tag.FuncInstance).Struct.fields.len);
+ const end_extra_index = extra_index + @as(u32, @typeInfo(Tag.FuncInstance).@"struct".fields.len);
return .{
.tid = tid,
.ty = ty,
@@ -7288,7 +7288,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
const names_map = try ip.addMap(gpa, tid, names.len);
ip.addStringsToMap(names_map, names);
const names_len = error_set_type.names.len;
- try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names_len);
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names_len);
items.appendAssumeCapacity(.{
.tag = .type_error_set,
.data = addExtraAssumeCapacity(extra, Tag.ErrorSet{
@@ -7883,7 +7883,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
.repeated_elem => |elem| elem,
};
- try extra.ensureUnusedCapacity(@typeInfo(Repeated).Struct.fields.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Repeated).@"struct".fields.len);
items.appendAssumeCapacity(.{
.tag = .repeated,
.data = addExtraAssumeCapacity(extra, Repeated{
@@ -7898,7 +7898,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
const strings = ip.getLocal(tid).getMutableStrings(gpa);
const start = strings.mutate.len;
try strings.ensureUnusedCapacity(@intCast(len_including_sentinel + 1));
- try extra.ensureUnusedCapacity(@typeInfo(Bytes).Struct.fields.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Bytes).@"struct".fields.len);
switch (aggregate.storage) {
.bytes => |bytes| strings.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}),
.elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) {
@@ -7938,7 +7938,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
}
try extra.ensureUnusedCapacity(
- @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel + 1)),
+ @typeInfo(Tag.Aggregate).@"struct".fields.len + @as(usize, @intCast(len_including_sentinel + 1)),
);
items.appendAssumeCapacity(.{
.tag = .aggregate,
@@ -7961,7 +7961,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
.memoized_call => |memoized_call| {
for (memoized_call.arg_values) |arg| assert(arg != .none);
- try extra.ensureUnusedCapacity(@typeInfo(MemoizedCall).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(MemoizedCall).@"struct".fields.len +
memoized_call.arg_values.len);
items.appendAssumeCapacity(.{
.tag = .memoized_call,
@@ -8050,7 +8050,7 @@ pub fn getUnionType(
const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).@"struct".fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
@@ -8259,7 +8259,7 @@ pub fn getStructType(
.auto => false,
.@"extern" => true,
.@"packed" => {
- try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
@@ -8327,7 +8327,7 @@ pub fn getStructType(
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0;
- try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).@"struct".fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
@@ -8443,7 +8443,7 @@ pub fn getAnonStructType(
try items.ensureUnusedCapacity(1);
try extra.ensureUnusedCapacity(
- @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3),
+ @typeInfo(TypeStructAnon).@"struct".fields.len + (fields_len * 3),
);
const extra_index = addExtraAssumeCapacity(extra, TypeStructAnon{
@@ -8509,7 +8509,7 @@ pub fn getFuncType(
const prev_extra_len = extra.mutate.len;
const params_len: u32 = @intCast(key.param_types.len);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeFunction).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeFunction).@"struct".fields.len +
@intFromBool(key.comptime_bits != 0) +
@intFromBool(key.noalias_bits != 0) +
params_len);
@@ -8575,7 +8575,7 @@ pub fn getExtern(
const items = local.getMutableItems(gpa);
const extra = local.getMutableExtra(gpa);
try items.ensureUnusedCapacity(1);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.Extern).Struct.fields.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.Extern).@"struct".fields.len);
try local.getMutableNavs(gpa).ensureUnusedCapacity(1);
// Predict the index the `@"extern" will live at, so we can construct the owner `Nav` before releasing the shard's mutex.
@@ -8642,7 +8642,7 @@ pub fn getFuncDecl(
// arrays. This is similar to what `getOrPutTrailingString` does.
const prev_extra_len = extra.mutate.len;
- try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).@"struct".fields.len);
const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{
.analysis = .{
@@ -8723,10 +8723,10 @@ pub fn getFuncDeclIes(
const prev_extra_len = extra.mutate.len;
const params_len: u32 = @intCast(key.param_types.len);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).@"struct".fields.len +
1 + // inferred_error_set
- @typeInfo(Tag.ErrorUnionType).Struct.fields.len +
- @typeInfo(Tag.TypeFunction).Struct.fields.len +
+ @typeInfo(Tag.ErrorUnionType).@"struct".fields.len +
+ @typeInfo(Tag.TypeFunction).@"struct".fields.len +
@intFromBool(key.comptime_bits != 0) +
@intFromBool(key.noalias_bits != 0) +
params_len);
@@ -8855,7 +8855,7 @@ pub fn getErrorSetType(
const local = ip.getLocal(tid);
const items = local.getMutableItems(gpa);
const extra = local.getMutableExtra(gpa);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).Struct.fields.len + names.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names.len);
const names_map = try ip.addMap(gpa, tid, names.len);
errdefer local.mutate.maps.len -= 1;
@@ -8930,7 +8930,7 @@ pub fn getFuncInstance(
const local = ip.getLocal(tid);
const items = local.getMutableItems(gpa);
const extra = local.getMutableExtra(gpa);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
arg.comptime_args.len);
const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
@@ -9015,11 +9015,11 @@ pub fn getFuncInstanceIes(
const prev_extra_len = extra.mutate.len;
const params_len: u32 = @intCast(arg.param_types.len);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
1 + // inferred_error_set
arg.comptime_args.len +
- @typeInfo(Tag.ErrorUnionType).Struct.fields.len +
- @typeInfo(Tag.TypeFunction).Struct.fields.len +
+ @typeInfo(Tag.ErrorUnionType).@"struct".fields.len +
+ @typeInfo(Tag.TypeFunction).@"struct".fields.len +
@intFromBool(arg.noalias_bits != 0) +
params_len);
@@ -9324,7 +9324,7 @@ pub fn getEnumType(
switch (ini.tag_mode) {
.auto => {
assert(!ini.has_values);
- try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).@"struct".fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
@@ -9384,7 +9384,7 @@ pub fn getEnumType(
local.mutate.maps.len -= 1;
};
- try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).@"struct".fields.len +
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
@@ -9499,7 +9499,7 @@ pub fn getGeneratedTagEnumType(
const prev_extra_len = extra.mutate.len;
switch (ini.tag_mode) {
.auto => {
- try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).@"struct".fields.len +
1 + // owner_union
fields_len); // field names
items.appendAssumeCapacity(.{
@@ -9518,7 +9518,7 @@ pub fn getGeneratedTagEnumType(
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)});
},
.explicit, .nonexhaustive => {
- try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).Struct.fields.len +
+ try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).@"struct".fields.len +
1 + // owner_union
fields_len + // field names
ini.values.len); // field values
@@ -9606,7 +9606,7 @@ pub fn getOpaqueType(
const extra = local.getMutableExtra(gpa);
try items.ensureUnusedCapacity(1);
- try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).Struct.fields.len + switch (ini.key) {
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).@"struct".fields.len + switch (ini.key) {
.declared => |d| d.captures.len,
.reified => 0,
});
@@ -9740,14 +9740,14 @@ fn addInt(
}
fn addExtra(extra: Local.Extra.Mutable, item: anytype) Allocator.Error!u32 {
- const fields = @typeInfo(@TypeOf(item)).Struct.fields;
+ const fields = @typeInfo(@TypeOf(item)).@"struct".fields;
try extra.ensureUnusedCapacity(fields.len);
return addExtraAssumeCapacity(extra, item);
}
fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 {
const result: u32 = extra.mutate.len;
- inline for (@typeInfo(@TypeOf(item)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(item)).@"struct".fields) |field| {
extra.appendAssumeCapacity(.{switch (field.type) {
Index,
Cau.Index,
@@ -9791,7 +9791,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
else => @compileError("unsupported host"),
}
const result: u32 = @intCast(ip.limbs.items.len);
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields, 0..) |field, i| {
const new: u32 = switch (field.type) {
u32 => @field(extra, field.name),
Index => @intFromEnum(@field(extra, field.name)),
@@ -9809,7 +9809,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { data: T, end: u32 } {
const extra_items = extra.view().items(.@"0");
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, index..) |field, extra_index| {
const extra_item = extra_items[extra_index];
@field(result, field.name) = switch (field.type) {
@@ -10268,7 +10268,7 @@ fn getCoercedFunc(
const extra = local.getMutableExtra(gpa);
const prev_extra_len = extra.mutate.len;
- try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).Struct.fields.len);
+ try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).@"struct".fields.len);
const extra_index = addExtraAssumeCapacity(extra, Tag.FuncCoerced{
.ty = ty,
@@ -10480,31 +10480,31 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_inferred_error_set => 0,
.type_enum_explicit, .type_enum_nonexhaustive => b: {
const info = extraData(extra_list, EnumExplicit, data);
- var ints = @typeInfo(EnumExplicit).Struct.fields.len;
+ var ints = @typeInfo(EnumExplicit).@"struct".fields.len;
if (info.zir_index == .none) ints += 1;
ints += if (info.captures_len != std.math.maxInt(u32))
info.captures_len
else
- @typeInfo(PackedU64).Struct.fields.len;
+ @typeInfo(PackedU64).@"struct".fields.len;
ints += info.fields_len;
if (info.values_map != .none) ints += info.fields_len;
break :b @sizeOf(u32) * ints;
},
.type_enum_auto => b: {
const info = extraData(extra_list, EnumAuto, data);
- const ints = @typeInfo(EnumAuto).Struct.fields.len + info.captures_len + info.fields_len;
+ const ints = @typeInfo(EnumAuto).@"struct".fields.len + info.captures_len + info.fields_len;
break :b @sizeOf(u32) * ints;
},
.type_opaque => b: {
const info = extraData(extra_list, Tag.TypeOpaque, data);
- const ints = @typeInfo(Tag.TypeOpaque).Struct.fields.len + info.captures_len;
+ const ints = @typeInfo(Tag.TypeOpaque).@"struct".fields.len + info.captures_len;
break :b @sizeOf(u32) * ints;
},
.type_struct => b: {
if (data == 0) break :b 0;
const extra = extraDataTrail(extra_list, Tag.TypeStruct, data);
const info = extra.data;
- var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len;
+ var ints: usize = @typeInfo(Tag.TypeStruct).@"struct".fields.len;
if (info.flags.any_captures) {
const captures_len = extra_items[extra.end];
ints += 1 + captures_len;
@@ -10535,7 +10535,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
extra_items[extra.end]
else
0;
- break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+ break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
@intFromBool(extra.data.flags.any_captures) + captures_len +
extra.data.fields_len * 2);
},
@@ -10545,7 +10545,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
extra_items[extra.end]
else
0;
- break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
+ break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
@intFromBool(extra.data.flags.any_captures) + captures_len +
extra.data.fields_len * 3);
},
@@ -11627,7 +11627,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
- => .Int,
+ => .int,
.c_longdouble_type,
.f16_type,
@@ -11635,20 +11635,20 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.f64_type,
.f80_type,
.f128_type,
- => .Float,
-
- .anyopaque_type => .Opaque,
- .bool_type => .Bool,
- .void_type => .Void,
- .type_type => .Type,
- .anyerror_type, .adhoc_inferred_error_set_type => .ErrorSet,
- .comptime_int_type => .ComptimeInt,
- .comptime_float_type => .ComptimeFloat,
- .noreturn_type => .NoReturn,
- .anyframe_type => .AnyFrame,
- .null_type => .Null,
- .undefined_type => .Undefined,
- .enum_literal_type => .EnumLiteral,
+ => .float,
+
+ .anyopaque_type => .@"opaque",
+ .bool_type => .bool,
+ .void_type => .void,
+ .type_type => .type,
+ .anyerror_type, .adhoc_inferred_error_set_type => .error_set,
+ .comptime_int_type => .comptime_int,
+ .comptime_float_type => .comptime_float,
+ .noreturn_type => .noreturn,
+ .anyframe_type => .@"anyframe",
+ .null_type => .null,
+ .undefined_type => .undefined,
+ .enum_literal_type => .enum_literal,
.manyptr_u8_type,
.manyptr_const_u8_type,
@@ -11656,11 +11656,11 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
- => .Pointer,
+ => .pointer,
- .optional_noreturn_type => .Optional,
- .anyerror_void_error_union_type => .ErrorUnion,
- .empty_struct_type => .Struct,
+ .optional_noreturn_type => .optional,
+ .anyerror_void_error_union_type => .error_union,
+ .empty_struct_type => .@"struct",
.generic_poison_type => return error.GenericPoison,
@@ -11687,48 +11687,48 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_int_signed,
.type_int_unsigned,
- => .Int,
+ => .int,
.type_array_big,
.type_array_small,
- => .Array,
+ => .array,
- .type_vector => .Vector,
+ .type_vector => .vector,
.type_pointer,
.type_slice,
- => .Pointer,
+ => .pointer,
- .type_optional => .Optional,
- .type_anyframe => .AnyFrame,
+ .type_optional => .optional,
+ .type_anyframe => .@"anyframe",
.type_error_union,
.type_anyerror_union,
- => .ErrorUnion,
+ => .error_union,
.type_error_set,
.type_inferred_error_set,
- => .ErrorSet,
+ => .error_set,
.type_enum_auto,
.type_enum_explicit,
.type_enum_nonexhaustive,
- => .Enum,
+ => .@"enum",
.simple_type => unreachable, // handled via Index tag above
- .type_opaque => .Opaque,
+ .type_opaque => .@"opaque",
.type_struct,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
.type_tuple_anon,
- => .Struct,
+ => .@"struct",
- .type_union => .Union,
+ .type_union => .@"union",
- .type_function => .Fn,
+ .type_function => .@"fn",
// values, not types
.undef,
@@ -11919,8 +11919,8 @@ fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index {
const func_extra = unwrapped_func.getExtra(ip);
const func_item = unwrapped_func.getItem(ip);
const extra_index = switch (func_item.tag) {
- .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len,
- .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len,
+ .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).@"struct".fields.len,
+ .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).@"struct".fields.len,
.func_coerced => {
const uncoerced_func_index: Index = @enumFromInt(func_extra.view().items(.@"0")[
func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
@@ -11929,8 +11929,8 @@ fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index {
const uncoerced_func_item = unwrapped_uncoerced_func.getItem(ip);
return @ptrCast(&unwrapped_uncoerced_func.getExtra(ip).view().items(.@"0")[
switch (uncoerced_func_item.tag) {
- .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).Struct.fields.len,
- .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).Struct.fields.len,
+ .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).@"struct".fields.len,
+ .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).@"struct".fields.len,
else => unreachable,
}
]);
diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig
index 28030e3879..2540cf9922 100644
--- a/src/Package/Fetch.zig
+++ b/src/Package/Fetch.zig
@@ -325,7 +325,7 @@ pub fn run(f: *Fetch) RunError!void {
// "p/$hash/foo", with possibly more directories after "foo".
// We want to fail unless the resolved relative path has a
// prefix of "p/$hash/".
- const digest_len = @typeInfo(Manifest.MultiHashHexDigest).Array.len;
+ const digest_len = @typeInfo(Manifest.MultiHashHexDigest).array.len;
const prefix_len: usize = if (f.job_queue.read_only) 0 else "p/".len;
const expected_prefix = f.parent_package_root.sub_path[0 .. prefix_len + digest_len];
if (!std.mem.startsWith(u8, pkg_root.sub_path, expected_prefix)) {
@@ -670,7 +670,7 @@ fn queueJobsForDeps(f: *Fetch) RunError!void {
.url = url,
.hash = h: {
const h = dep.hash orelse break :h null;
- const digest_len = @typeInfo(Manifest.MultiHashHexDigest).Array.len;
+ const digest_len = @typeInfo(Manifest.MultiHashHexDigest).array.len;
const multihash_digest = h[0..digest_len].*;
const gop = f.job_queue.table.getOrPutAssumeCapacity(multihash_digest);
if (gop.found_existing) continue;
diff --git a/src/Sema.zig b/src/Sema.zig
index 7f39762cb2..85a1e90a48 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1783,7 +1783,7 @@ fn analyzeBodyInner(
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
- if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(pt),
});
@@ -1985,14 +1985,14 @@ fn resolveDestType(
else => |e| return e,
};
- if (remove_eu and raw_ty.zigTypeTag(zcu) == .ErrorUnion) {
+ if (remove_eu and raw_ty.zigTypeTag(zcu) == .error_union) {
const eu_child = raw_ty.errorUnionPayload(zcu);
- if (remove_opt and eu_child.zigTypeTag(zcu) == .Optional) {
+ if (remove_opt and eu_child.zigTypeTag(zcu) == .optional) {
return eu_child.childType(zcu);
}
return eu_child;
}
- if (remove_opt and raw_ty.zigTypeTag(zcu) == .Optional) {
+ if (remove_opt and raw_ty.zigTypeTag(zcu) == .optional) {
return raw_ty.childType(zcu);
}
return raw_ty;
@@ -2280,7 +2280,7 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, non
non_optional_ty.fmt(pt),
});
errdefer msg.destroy(sema.gpa);
- if (non_optional_ty.zigTypeTag(pt.zcu) == .ErrorUnion) {
+ if (non_optional_ty.zigTypeTag(pt.zcu) == .error_union) {
try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
try addDeclaredHereNote(sema, msg, non_optional_ty);
@@ -2327,7 +2327,7 @@ fn failWithErrorSetCodeMissing(
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
const pt = sema.pt;
const zcu = pt.zcu;
- if (int_ty.zigTypeTag(zcu) == .Vector) {
+ if (int_ty.zigTypeTag(zcu) == .vector) {
const msg = msg: {
const msg = try sema.errMsg(src, "overflow of vector type '{}' with value '{}'", .{
int_ty.fmt(pt), val.fmtValueSema(pt, sema),
@@ -2380,7 +2380,7 @@ fn failWithInvalidFieldAccess(
const zcu = pt.zcu;
const inner_ty = if (object_ty.isSinglePointer(zcu)) object_ty.childType(zcu) else object_ty;
- if (inner_ty.zigTypeTag(zcu) == .Optional) opt: {
+ if (inner_ty.zigTypeTag(zcu) == .optional) opt: {
const child_ty = inner_ty.optionalChild(zcu);
if (!typeSupportsFieldAccess(zcu, child_ty, field_name)) break :opt;
const msg = msg: {
@@ -2390,7 +2390,7 @@ fn failWithInvalidFieldAccess(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
- } else if (inner_ty.zigTypeTag(zcu) == .ErrorUnion) err: {
+ } else if (inner_ty.zigTypeTag(zcu) == .error_union) err: {
const child_ty = inner_ty.errorUnionPayload(zcu);
if (!typeSupportsFieldAccess(zcu, child_ty, field_name)) break :err;
const msg = msg: {
@@ -2407,16 +2407,16 @@ fn failWithInvalidFieldAccess(
fn typeSupportsFieldAccess(zcu: *const Zcu, ty: Type, field_name: InternPool.NullTerminatedString) bool {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Array => return field_name.eqlSlice("len", ip),
- .Pointer => {
+ .array => return field_name.eqlSlice("len", ip),
+ .pointer => {
const ptr_info = ty.ptrInfo(zcu);
if (ptr_info.flags.size == .Slice) {
return field_name.eqlSlice("ptr", ip) or field_name.eqlSlice("len", ip);
- } else if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
+ } else if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) {
return field_name.eqlSlice("len", ip);
} else return false;
},
- .Type, .Struct, .Union => return true,
+ .type, .@"struct", .@"union" => return true,
else => return false,
}
}
@@ -3384,9 +3384,9 @@ fn ensureResultUsed(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Void, .NoReturn => return,
- .ErrorSet => return sema.fail(block, src, "error set is ignored", .{}),
- .ErrorUnion => {
+ .void, .noreturn => return,
+ .error_set => return sema.fail(block, src, "error set is ignored", .{}),
+ .error_union => {
const msg = msg: {
const msg = try sema.errMsg(src, "error union is ignored", .{});
errdefer msg.destroy(sema.gpa);
@@ -3419,8 +3419,8 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const src = block.nodeOffset(inst_data.src_node);
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(zcu)) {
- .ErrorSet => return sema.fail(block, src, "error set is discarded", .{}),
- .ErrorUnion => {
+ .error_set => return sema.fail(block, src, "error set is discarded", .{}),
+ .error_union => {
const msg = msg: {
const msg = try sema.errMsg(src, "error union is discarded", .{});
errdefer msg.destroy(sema.gpa);
@@ -3443,13 +3443,13 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const err_union_ty = if (operand_ty.zigTypeTag(zcu) == .Pointer)
+ const err_union_ty = if (operand_ty.zigTypeTag(zcu) == .pointer)
operand_ty.childType(zcu)
else
operand_ty;
- if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) return;
+ if (err_union_ty.zigTypeTag(zcu) != .error_union) return;
const payload_ty = err_union_ty.errorUnionPayload(zcu).zigTypeTag(zcu);
- if (payload_ty != .Void and payload_ty != .NoReturn) {
+ if (payload_ty != .void and payload_ty != .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(src, "error union payload is ignored", .{});
errdefer msg.destroy(sema.gpa);
@@ -4198,7 +4198,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
_ = try replacement_block.addBr(placeholder_inst, .void_value);
try sema.air_extra.ensureUnusedCapacity(
gpa,
- @typeInfo(Air.Block).Struct.fields.len + replacement_block.instructions.items.len,
+ @typeInfo(Air.Block).@"struct".fields.len + replacement_block.instructions.items.len,
);
sema.air_instructions.set(@intFromEnum(placeholder_inst), .{
.tag = .block,
@@ -4244,7 +4244,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
const is_int = switch (object_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => true,
+ .int, .comptime_int => true,
else => false,
};
const arg_src = block.src(.{ .for_input = .{
@@ -4259,7 +4259,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
errdefer msg.destroy(sema.gpa);
try sema.errNote(arg_src, msg, "for loop operand must be a range, array, slice, tuple, or vector", .{});
- if (object_ty.zigTypeTag(zcu) == .ErrorUnion) {
+ if (object_ty.zigTypeTag(zcu) == .error_union) {
try sema.errNote(arg_src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
@@ -4319,7 +4319,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
switch (object_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => continue,
+ .int, .comptime_int => continue,
else => {},
}
const arg_src = block.src(.{ .for_input = .{
@@ -4357,8 +4357,8 @@ fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcL
const zcu = pt.zcu;
var base_ptr = ptr;
while (true) switch (sema.typeOf(base_ptr).childType(zcu).zigTypeTag(zcu)) {
- .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
- .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
+ .error_union => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
+ .optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
try sema.checkKnownAllocPtr(block, ptr, base_ptr);
@@ -4383,12 +4383,12 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
else => |e| return e,
};
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
- assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+ assert(ptr_ty.zigTypeTag(zcu) == .pointer); // validated by a previous instruction
const elem_ty = ptr_ty.childType(zcu);
switch (ptr_ty.ptrSize(zcu)) {
.One => {
const uncoerced_ty = sema.typeOf(uncoerced_val);
- if (elem_ty.zigTypeTag(zcu) == .Array and elem_ty.childType(zcu).toIntern() == uncoerced_ty.toIntern()) {
+ if (elem_ty.zigTypeTag(zcu) == .array and elem_ty.childType(zcu).toIntern() == uncoerced_ty.toIntern()) {
// We're trying to initialize a *[1]T with a reference to a T - don't perform any coercion.
return uncoerced_val;
}
@@ -4403,7 +4403,7 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
// Our goal is to coerce `uncoerced_val` to an array of `elem_ty`.
const val_ty = sema.typeOf(uncoerced_val);
switch (val_ty.zigTypeTag(zcu)) {
- .Array, .Vector => {},
+ .array, .vector => {},
else => if (!val_ty.isTuple(zcu)) {
return sema.fail(block, src, "expected array of '{}', found '{}'", .{ elem_ty.fmt(pt), val_ty.fmt(pt) });
},
@@ -4439,7 +4439,7 @@ fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
else => |e| return e,
};
if (ty_operand.isGenericPoison()) return;
- if (ty_operand.optEuBaseType(zcu).zigTypeTag(zcu) != .Pointer) {
+ if (ty_operand.optEuBaseType(zcu).zigTypeTag(zcu) != .pointer) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "expected type '{}', found pointer", .{ty_operand.fmt(pt)});
errdefer msg.destroy(sema.gpa);
@@ -4464,7 +4464,7 @@ fn zirValidateArrayInitRefTy(
else => |e| return e,
};
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
- assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+ assert(ptr_ty.zigTypeTag(zcu) == .pointer); // validated by a previous instruction
switch (zcu.intern_pool.indexToKey(ptr_ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.Slice, .Many => {
@@ -4523,7 +4523,7 @@ fn validateArrayInitTy(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Array => {
+ .array => {
const array_len = ty.arrayLen(zcu);
if (init_count != array_len) {
return sema.fail(block, src, "expected {d} array elements; found {d}", .{
@@ -4532,7 +4532,7 @@ fn validateArrayInitTy(
}
return;
},
- .Vector => {
+ .vector => {
const array_len = ty.arrayLen(zcu);
if (init_count != array_len) {
return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
@@ -4541,7 +4541,7 @@ fn validateArrayInitTy(
}
return;
},
- .Struct => if (ty.isTuple(zcu)) {
+ .@"struct" => if (ty.isTuple(zcu)) {
try ty.resolveFields(pt);
const array_len = ty.arrayLen(zcu);
if (init_count > array_len) {
@@ -4574,7 +4574,7 @@ fn zirValidateStructInitTy(
const struct_ty = if (is_result_ty) ty.optEuBaseType(zcu) else ty;
switch (struct_ty.zigTypeTag(zcu)) {
- .Struct, .Union => return,
+ .@"struct", .@"union" => return,
else => {},
}
return sema.failWithStructInitNotSupported(block, src, struct_ty);
@@ -4599,13 +4599,13 @@ fn zirValidatePtrStructInit(
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
const agg_ty = sema.typeOf(object_ptr).childType(zcu).optEuBaseType(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
- .Struct => return sema.validateStructInit(
+ .@"struct" => return sema.validateStructInit(
block,
agg_ty,
init_src,
instrs,
),
- .Union => return sema.validateUnionInit(
+ .@"union" => return sema.validateUnionInit(
block,
agg_ty,
init_src,
@@ -5084,7 +5084,7 @@ fn zirValidatePtrArrayInit(
);
if (instrs.len != array_len) switch (array_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
var root_msg: ?*Zcu.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
@@ -5110,12 +5110,12 @@ fn zirValidatePtrArrayInit(
return sema.failWithOwnedErrorMsg(block, msg);
}
},
- .Array => {
+ .array => {
return sema.fail(block, init_src, "expected {d} array elements; found {d}", .{
array_len, instrs.len,
});
},
- .Vector => {
+ .vector => {
return sema.fail(block, init_src, "expected {d} vector elements; found {d}", .{
array_len, instrs.len,
});
@@ -5277,7 +5277,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- if (operand_ty.zigTypeTag(zcu) != .Pointer) {
+ if (operand_ty.zigTypeTag(zcu) != .pointer) {
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(pt)});
} else switch (operand_ty.ptrSize(zcu)) {
.One, .C => {},
@@ -5322,8 +5322,8 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
const operand_ty = sema.typeOf(operand);
const can_destructure = switch (operand_ty.zigTypeTag(zcu)) {
- .Array, .Vector => true,
- .Struct => operand_ty.isTuple(zcu),
+ .array, .vector => true,
+ .@"struct" => operand_ty.isTuple(zcu),
else => false,
};
@@ -5360,10 +5360,10 @@ fn failWithBadMemberAccess(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const kw_name = switch (agg_ty.zigTypeTag(zcu)) {
- .Union => "union",
- .Struct => "struct",
- .Opaque => "opaque",
- .Enum => "enum",
+ .@"union" => "union",
+ .@"struct" => "struct",
+ .@"opaque" => "opaque",
+ .@"enum" => "enum",
else => unreachable,
};
if (agg_ty.typeDeclInst(zcu)) |inst| if ((inst.resolve(ip) orelse return error.AnalysisFail) == .main_struct_inst) {
@@ -5432,10 +5432,10 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Zcu.ErrorMsg, decl_ty: Type) !void
const zcu = sema.pt.zcu;
const src_loc = decl_ty.srcLocOrNull(zcu) orelse return;
const category = switch (decl_ty.zigTypeTag(zcu)) {
- .Union => "union",
- .Struct => "struct",
- .Enum => "enum",
- .Opaque => "opaque",
+ .@"union" => "union",
+ .@"struct" => "struct",
+ .@"enum" => "enum",
+ .@"opaque" => "opaque",
else => unreachable,
};
try sema.errNote(src_loc, parent, "{s} declared here", .{category});
@@ -5562,7 +5562,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
// Where %c is an error union or error set. In such case we need to add
// to the current function's inferred error set, if any.
if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(zcu)) {
- .ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand),
+ .error_union, .error_set => try sema.addToInferredErrorSet(operand),
else => {},
};
@@ -5819,7 +5819,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
} else {
try child_block.instructions.append(gpa, loop_inst);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len);
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len + loop_block_len);
sema.air_instructions.items(.data)[@intFromEnum(loop_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(
Air.Block{ .body_len = @intCast(loop_block_len) },
);
@@ -6035,7 +6035,7 @@ fn resolveBlockBody(
// We need a runtime block for scoping reasons.
_ = try child_block.addBr(merges.block_inst, .void_value);
try parent_block.instructions.append(sema.gpa, merges.block_inst);
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = .void_type,
@@ -6111,7 +6111,7 @@ fn resolveAnalyzedBlock(
.dbg_inline_block => {
// Create a block containing all instruction from the body.
try parent_block.instructions.append(gpa, merges.block_inst);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = .noreturn_type,
@@ -6149,7 +6149,7 @@ fn resolveAnalyzedBlock(
try parent_block.instructions.append(gpa, merges.block_inst);
switch (block_tag) {
.block => {
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = .void_type,
@@ -6159,7 +6159,7 @@ fn resolveAnalyzedBlock(
} };
},
.dbg_inline_block => {
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = .void_type,
@@ -6210,7 +6210,7 @@ fn resolveAnalyzedBlock(
const ty_inst = Air.internedToRef(resolved_ty.toIntern());
switch (block_tag) {
.block => {
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = ty_inst,
@@ -6220,7 +6220,7 @@ fn resolveAnalyzedBlock(
} };
},
.dbg_inline_block => {
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.DbgInlineBlock).@"struct".fields.len +
child_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(merges.block_inst)] = .{ .ty_pl = .{
.ty = ty_inst,
@@ -6257,7 +6257,7 @@ fn resolveAnalyzedBlock(
// Convert the br instruction to a block instruction that has the coercion
// and then a new br inside that returns the coerced instruction.
const sub_block_len: u32 = @intCast(coerce_block.instructions.items.len + 1);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
sub_block_len);
try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
const sub_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
@@ -6306,7 +6306,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const options = try sema.resolveExportOptions(block, options_src, extra.options);
{
- if (ptr_ty.zigTypeTag(zcu) != .Pointer) {
+ if (ptr_ty.zigTypeTag(zcu) != .pointer) {
return sema.fail(block, ptr_src, "expected pointer type, found '{}'", .{ptr_ty.fmt(pt)});
}
const ptr_ty_info = ptr_ty.ptrInfo(zcu);
@@ -6856,7 +6856,7 @@ fn popErrorReturnTrace(
// The result might be an error. If it is, we leave the error trace alone. If it isn't, we need
// to pop any error trace that may have been propagated from our arguments.
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len);
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len);
const cond_block_inst = try block.addInstAsIndex(.{
.tag = .block,
.data = .{
@@ -6885,9 +6885,9 @@ fn popErrorReturnTrace(
defer else_block.instructions.deinit(gpa);
_ = try else_block.addBr(cond_block_inst, .void_value);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
- @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block
+ @typeInfo(Air.Block).@"struct".fields.len + 1); // +1 for the sole .cond_br instruction in the .block
const cond_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
@@ -7046,17 +7046,17 @@ fn checkCallArgumentCount(
const zcu = pt.zcu;
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => break :func_ty callee_ty,
- .Pointer => {
+ .@"fn" => break :func_ty callee_ty,
+ .pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
- if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Fn) {
+ if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
break :func_ty Type.fromInterned(ptr_info.child);
}
},
- .Optional => {
+ .optional => {
const opt_child = callee_ty.optionalChild(zcu);
- if (opt_child.zigTypeTag(zcu) == .Fn or (opt_child.isSinglePointer(zcu) and
- opt_child.childType(zcu).zigTypeTag(zcu) == .Fn))
+ if (opt_child.zigTypeTag(zcu) == .@"fn" or (opt_child.isSinglePointer(zcu) and
+ opt_child.childType(zcu).zigTypeTag(zcu) == .@"fn"))
{
const msg = msg: {
const msg = try sema.errMsg(func_src, "cannot call optional type '{}'", .{
@@ -7125,10 +7125,10 @@ fn callBuiltin(
const callee_ty = sema.typeOf(builtin_fn);
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => break :func_ty callee_ty,
- .Pointer => {
+ .@"fn" => break :func_ty callee_ty,
+ .pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
- if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Fn) {
+ if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
break :func_ty Type.fromInterned(ptr_info.child);
}
},
@@ -7276,7 +7276,7 @@ const CallArgsInfo = union(enum) {
// Resolve the arg!
const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
- if (sema.typeOf(uncoerced_arg).zigTypeTag(zcu) == .NoReturn) {
+ if (sema.typeOf(uncoerced_arg).zigTypeTag(zcu) == .noreturn) {
// This terminates resolution of arguments. The caller should
// propagate this.
return uncoerced_arg;
@@ -7863,7 +7863,7 @@ fn analyzeCall(
if (param_ty) |t| assert(!t.isGenericPoison());
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*);
- if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .NoReturn) {
+ if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .noreturn) {
return arg_out.*;
}
}
@@ -7884,7 +7884,7 @@ fn analyzeCall(
}
}
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).@"struct".fields.len +
args.len);
const func_inst = try block.addInst(.{
.tag = call_tag,
@@ -7991,7 +7991,7 @@ fn analyzeInlineCallArg(
};
new_param_types[arg_i.*] = param_ty;
const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst);
- if (ics.caller().typeOf(casted_arg).zigTypeTag(zcu) == .NoReturn) {
+ if (ics.caller().typeOf(casted_arg).zigTypeTag(zcu) == .noreturn) {
return casted_arg;
}
const arg_src = args_info.argSrc(arg_block, arg_i.*);
@@ -8039,7 +8039,7 @@ fn analyzeInlineCallArg(
.param_anytype, .param_anytype_comptime => {
// No coercion needed.
const uncasted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.generic_poison, func_ty_info, func_inst);
- if (ics.caller().typeOf(uncasted_arg).zigTypeTag(zcu) == .NoReturn) {
+ if (ics.caller().typeOf(uncasted_arg).zigTypeTag(zcu) == .noreturn) {
return uncasted_arg;
}
const arg_src = args_info.argSrc(arg_block, arg_i.*);
@@ -8228,7 +8228,7 @@ fn instantiateGenericCall(
const arg_ref = try args_info.analyzeArg(sema, block, arg_index, param_ty, generic_owner_ty_info, func);
try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_index), arg_ref);
const arg_ty = sema.typeOf(arg_ref);
- if (arg_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (arg_ty.zigTypeTag(zcu) == .noreturn) {
// This terminates argument analysis.
return arg_ref;
}
@@ -8345,7 +8345,7 @@ fn instantiateGenericCall(
try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index }));
try zcu.ensureFuncBodyAnalysisQueued(callee_index);
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len);
+ try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).@"struct".fields.len + runtime_args.items.len);
const result = try block.addInst(.{
.tag = call_tag,
.data = .{ .pl_op = .{
@@ -8404,9 +8404,9 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
- if (child_type.zigTypeTag(zcu) == .Opaque) {
+ if (child_type.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(pt)});
- } else if (child_type.zigTypeTag(zcu) == .Null) {
+ } else if (child_type.zigTypeTag(zcu) == .null) {
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(pt)});
}
const opt_type = try pt.optionalType(child_type.toIntern());
@@ -8429,7 +8429,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(zcu);
try indexable_ty.resolveFields(pt);
assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction
- if (indexable_ty.zigTypeTag(zcu) == .Struct) {
+ if (indexable_ty.zigTypeTag(zcu) == .@"struct") {
const elem_type = indexable_ty.fieldType(@intFromEnum(bin.rhs), zcu);
return Air.internedToRef(elem_type.toIntern());
} else {
@@ -8447,7 +8447,7 @@ fn zirElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else => |e| return e,
};
const ptr_ty = maybe_wrapped_ptr_ty.optEuBaseType(zcu);
- assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+ assert(ptr_ty.zigTypeTag(zcu) == .pointer); // validated by a previous instruction
const elem_ty = ptr_ty.childType(zcu);
if (elem_ty.toIntern() == .anyopaque_type) {
// The pointer's actual child type is effectively unknown, so it makes
@@ -8561,9 +8561,9 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
const pt = sema.pt;
const zcu = pt.zcu;
- if (elem_type.zigTypeTag(zcu) == .Opaque) {
+ if (elem_type.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(pt)});
- } else if (elem_type.zigTypeTag(zcu) == .NoReturn) {
+ } else if (elem_type.zigTypeTag(zcu) == .noreturn) {
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
}
}
@@ -8597,7 +8597,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
const payload = try sema.resolveType(block, rhs_src, extra.rhs);
- if (error_set.zigTypeTag(zcu) != .ErrorSet) {
+ if (error_set.zigTypeTag(zcu) != .error_set) {
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
error_set.fmt(pt),
});
@@ -8610,11 +8610,11 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
const pt = sema.pt;
const zcu = pt.zcu;
- if (payload_ty.zigTypeTag(zcu) == .Opaque) {
+ if (payload_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
payload_ty.fmt(pt),
});
- } else if (payload_ty.zigTypeTag(zcu) == .ErrorSet) {
+ } else if (payload_ty.zigTypeTag(zcu) == .error_set) {
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
payload_ty.fmt(pt),
});
@@ -8741,7 +8741,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
- if (sema.typeOf(lhs).zigTypeTag(zcu) == .Bool and sema.typeOf(rhs).zigTypeTag(zcu) == .Bool) {
+ if (sema.typeOf(lhs).zigTypeTag(zcu) == .bool and sema.typeOf(rhs).zigTypeTag(zcu) == .bool) {
const msg = msg: {
const msg = try sema.errMsg(lhs_src, "expected error set type, found 'bool'", .{});
errdefer msg.destroy(sema.gpa);
@@ -8752,9 +8752,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
}
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
- if (lhs_ty.zigTypeTag(zcu) != .ErrorSet)
+ if (lhs_ty.zigTypeTag(zcu) != .error_set)
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(pt)});
- if (rhs_ty.zigTypeTag(zcu) != .ErrorSet)
+ if (rhs_ty.zigTypeTag(zcu) != .error_set)
return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(pt)});
// Anything merged with anyerror is anyerror.
@@ -8807,8 +8807,8 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const operand_ty = sema.typeOf(operand);
const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(zcu)) {
- .Enum => operand,
- .Union => blk: {
+ .@"enum" => operand,
+ .@"union" => blk: {
try operand_ty.resolveFields(pt);
const tag_ty = operand_ty.unionTagType(zcu) orelse {
return sema.fail(
@@ -8865,7 +8865,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu_opt, "@enumFromInt");
const operand = try sema.resolveInst(extra.rhs);
- if (dest_ty.zigTypeTag(zcu) != .Enum) {
+ if (dest_ty.zigTypeTag(zcu) != .@"enum") {
return sema.fail(block, src, "expected enum, found '{}'", .{dest_ty.fmt(pt)});
}
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
@@ -8891,7 +8891,7 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern());
}
- if (dest_ty.intTagType(zcu).zigTypeTag(zcu) == .ComptimeInt) {
+ if (dest_ty.intTagType(zcu).zigTypeTag(zcu) == .comptime_int) {
return sema.failWithNeededComptime(block, operand_src, .{
.needed_comptime_reason = "value being casted to enum with 'comptime_int' tag type must be comptime-known",
});
@@ -8945,10 +8945,10 @@ fn analyzeOptionalPayloadPtr(
const pt = sema.pt;
const zcu = pt.zcu;
const optional_ptr_ty = sema.typeOf(optional_ptr);
- assert(optional_ptr_ty.zigTypeTag(zcu) == .Pointer);
+ assert(optional_ptr_ty.zigTypeTag(zcu) == .pointer);
const opt_type = optional_ptr_ty.childType(zcu);
- if (opt_type.zigTypeTag(zcu) != .Optional) {
+ if (opt_type.zigTypeTag(zcu) != .optional) {
return sema.failWithExpectedOptionalType(block, src, opt_type);
}
@@ -9019,8 +9019,8 @@ fn zirOptionalPayload(
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const result_ty = switch (operand_ty.zigTypeTag(zcu)) {
- .Optional => operand_ty.optionalChild(zcu),
- .Pointer => t: {
+ .optional => operand_ty.optionalChild(zcu),
+ .pointer => t: {
if (operand_ty.ptrSize(zcu) != .C) {
return sema.failWithExpectedOptionalType(block, src, operand_ty);
}
@@ -9072,7 +9072,7 @@ fn zirErrUnionPayload(
const operand = try sema.resolveInst(inst_data.operand);
const operand_src = src;
const err_union_ty = sema.typeOf(operand);
- if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(pt),
});
@@ -9138,9 +9138,9 @@ fn analyzeErrUnionPayloadPtr(
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
- assert(operand_ty.zigTypeTag(zcu) == .Pointer);
+ assert(operand_ty.zigTypeTag(zcu) == .pointer);
- if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) {
+ if (operand_ty.childType(zcu).zigTypeTag(zcu) != .error_union) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.childType(zcu).fmt(pt),
});
@@ -9216,7 +9216,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
- if (operand_ty.zigTypeTag(zcu) != .ErrorUnion) {
+ if (operand_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.fmt(pt),
});
@@ -9250,9 +9250,9 @@ fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand:
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
- assert(operand_ty.zigTypeTag(zcu) == .Pointer);
+ assert(operand_ty.zigTypeTag(zcu) == .pointer);
- if (operand_ty.childType(zcu).zigTypeTag(zcu) != .ErrorUnion) {
+ if (operand_ty.childType(zcu).zigTypeTag(zcu) != .error_union) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.childType(zcu).fmt(pt),
});
@@ -9587,7 +9587,7 @@ fn funcCommon(
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)});
}
if (!param_ty.isValidParamType(zcu)) {
- const opaque_str = if (param_ty.zigTypeTag(zcu) == .Opaque) "opaque " else "";
+ const opaque_str = if (param_ty.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{
opaque_str, param_ty.fmt(pt),
});
@@ -9621,7 +9621,7 @@ fn funcCommon(
return sema.failWithOwnedErrorMsg(block, msg);
}
if (is_source_decl and !this_generic and is_noalias and
- !(param_ty.zigTypeTag(zcu) == .Pointer or param_ty.isPtrLikeOptional(zcu)))
+ !(param_ty.zigTypeTag(zcu) == .pointer or param_ty.isPtrLikeOptional(zcu)))
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
@@ -9629,7 +9629,7 @@ fn funcCommon(
.Interrupt => if (target.cpu.arch.isX86()) {
const err_code_size = target.ptrBitWidth();
switch (i) {
- 0 => if (param_ty.zigTypeTag(zcu) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
+ 0 => if (param_ty.zigTypeTag(zcu) != .pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}),
1 => if (param_ty.bitSize(zcu) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}),
else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}),
}
@@ -9891,7 +9891,7 @@ fn finishFunc(
Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
if (!return_type.isValidReturnType(zcu)) {
- const opaque_str = if (return_type.zigTypeTag(zcu) == .Opaque) "opaque " else "";
+ const opaque_str = if (return_type.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
return sema.fail(block, ret_ty_src, "{s}return type '{}' not allowed", .{
opaque_str, return_type.fmt(pt),
});
@@ -9953,7 +9953,7 @@ fn finishFunc(
}
switch (cc_resolved) {
- .Interrupt, .Signal => if (return_type.zigTypeTag(zcu) != .Void and return_type.zigTypeTag(zcu) != .NoReturn) {
+ .Interrupt, .Signal => if (return_type.zigTypeTag(zcu) != .void and return_type.zigTypeTag(zcu) != .noreturn) {
return sema.fail(block, ret_ty_src, "function with calling convention '{s}' must return 'void' or 'noreturn'", .{@tagName(cc_resolved)});
},
.Inline => if (is_noinline) {
@@ -10154,11 +10154,11 @@ fn analyzeAs(
error.GenericPoison => return operand,
};
- if (dest_ty_tag == .Opaque) {
+ if (dest_ty_tag == .@"opaque") {
return sema.fail(block, src, "cannot cast to opaque type '{}'", .{dest_ty.fmt(pt)});
}
- if (dest_ty_tag == .NoReturn) {
+ if (dest_ty_tag == .noreturn) {
return sema.fail(block, src, "cannot cast to noreturn", .{});
}
@@ -10183,7 +10183,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const ptr_ty = operand_ty.scalarType(zcu);
- const is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = operand_ty.zigTypeTag(zcu) == .vector;
if (!ptr_ty.isPtrAtRuntime(zcu)) {
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)});
}
@@ -10305,7 +10305,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
const object_ptr = try sema.resolveInst(extra.lhs);
const struct_ty = sema.typeOf(object_ptr).childType(zcu);
switch (struct_ty.zigTypeTag(zcu)) {
- .Struct, .Union => {
+ .@"struct", .@"union" => {
return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, true);
},
else => {
@@ -10377,12 +10377,12 @@ fn intCast(
if (try sema.isComptimeKnown(operand)) {
return sema.coerce(block, dest_ty, operand, operand_src);
- } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ } else if (dest_scalar_ty.zigTypeTag(zcu) == .comptime_int) {
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
}
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
- const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
// requirement: intCast(u0, input) iff input == 0
@@ -10529,29 +10529,29 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
switch (dest_ty.zigTypeTag(zcu)) {
- .AnyFrame,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .ErrorSet,
- .ErrorUnion,
- .Fn,
- .Frame,
- .NoReturn,
- .Null,
- .Opaque,
- .Optional,
- .Type,
- .Undefined,
- .Void,
+ .@"anyframe",
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .error_set,
+ .error_union,
+ .@"fn",
+ .frame,
+ .noreturn,
+ .null,
+ .@"opaque",
+ .optional,
+ .type,
+ .undefined,
+ .void,
=> return sema.fail(block, src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)}),
- .Enum => {
+ .@"enum" => {
const msg = msg: {
const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
+ .int, .comptime_int => try sema.errNote(src, msg, "use @enumFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
else => {},
}
@@ -10560,13 +10560,13 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Pointer => {
+ .pointer => {
const msg = msg: {
const msg = try sema.errMsg(src, "cannot @bitCast to '{}'", .{dest_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (operand_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
- .Pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}),
+ .int, .comptime_int => try sema.errNote(src, msg, "use @ptrFromInt to cast from '{}'", .{operand_ty.fmt(pt)}),
+ .pointer => try sema.errNote(src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(pt)}),
else => {},
}
@@ -10574,10 +10574,10 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Struct, .Union => if (dest_ty.containerLayout(zcu) == .auto) {
+ .@"struct", .@"union" => if (dest_ty.containerLayout(zcu) == .auto) {
const container = switch (dest_ty.zigTypeTag(zcu)) {
- .Struct => "struct",
- .Union => "union",
+ .@"struct" => "struct",
+ .@"union" => "union",
else => unreachable,
};
return sema.fail(block, src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{
@@ -10585,37 +10585,37 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
});
},
- .Array,
- .Bool,
- .Float,
- .Int,
- .Vector,
+ .array,
+ .bool,
+ .float,
+ .int,
+ .vector,
=> {},
}
switch (operand_ty.zigTypeTag(zcu)) {
- .AnyFrame,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .ErrorSet,
- .ErrorUnion,
- .Fn,
- .Frame,
- .NoReturn,
- .Null,
- .Opaque,
- .Optional,
- .Type,
- .Undefined,
- .Void,
+ .@"anyframe",
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .error_set,
+ .error_union,
+ .@"fn",
+ .frame,
+ .noreturn,
+ .null,
+ .@"opaque",
+ .optional,
+ .type,
+ .undefined,
+ .void,
=> return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)}),
- .Enum => {
+ .@"enum" => {
const msg = msg: {
const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}),
+ .int, .comptime_int => try sema.errNote(operand_src, msg, "use @intFromEnum to cast to '{}'", .{dest_ty.fmt(pt)}),
else => {},
}
@@ -10623,13 +10623,13 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Pointer => {
+ .pointer => {
const msg = msg: {
const msg = try sema.errMsg(operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (dest_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}),
- .Pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}),
+ .int, .comptime_int => try sema.errNote(operand_src, msg, "use @intFromPtr to cast to '{}'", .{dest_ty.fmt(pt)}),
+ .pointer => try sema.errNote(operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(pt)}),
else => {},
}
@@ -10637,10 +10637,10 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Struct, .Union => if (operand_ty.containerLayout(zcu) == .auto) {
+ .@"struct", .@"union" => if (operand_ty.containerLayout(zcu) == .auto) {
const container = switch (operand_ty.zigTypeTag(zcu)) {
- .Struct => "struct",
- .Union => "union",
+ .@"struct" => "struct",
+ .@"union" => "union",
else => unreachable,
};
return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{
@@ -10648,11 +10648,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
});
},
- .Array,
- .Bool,
- .Float,
- .Int,
- .Vector,
+ .array,
+ .bool,
+ .float,
+ .int,
+ .vector,
=> {},
}
return sema.bitCast(block, dest_ty, operand, block.nodeOffset(inst_data.src_node), operand_src);
@@ -10677,12 +10677,12 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand_scalar_ty = operand_ty.scalarType(zcu);
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
- const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
const target = zcu.getTarget();
const dest_is_comptime_float = switch (dest_scalar_ty.zigTypeTag(zcu)) {
- .ComptimeFloat => true,
- .Float => false,
+ .comptime_float => true,
+ .float => false,
else => return sema.fail(
block,
src,
@@ -10692,7 +10692,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
switch (operand_scalar_ty.zigTypeTag(zcu)) {
- .ComptimeFloat, .Float, .ComptimeInt => {},
+ .comptime_float, .float, .comptime_int => {},
else => return sema.fail(
block,
operand_src,
@@ -10787,7 +10787,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
const indexable_ty = sema.typeOf(array_ptr);
- if (indexable_ty.zigTypeTag(zcu) != .Pointer) {
+ if (indexable_ty.zigTypeTag(zcu) != .pointer) {
const capture_src = block.src(.{ .for_capture_from_input = inst_data.src_node });
const msg = msg: {
const msg = try sema.errMsg(capture_src, "pointer capture of non pointer type '{}'", .{
@@ -10831,7 +10831,7 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const elem_index = try pt.intRef(Type.usize, extra.index);
const array_ty = sema.typeOf(array_ptr).childType(zcu);
switch (array_ty.zigTypeTag(zcu)) {
- .Array, .Vector => {},
+ .array, .vector => {},
else => if (!array_ty.isTuple(zcu)) {
return sema.failWithArrayInitNotSupported(block, src, array_ty);
},
@@ -11066,7 +11066,7 @@ const SwitchProngAnalysis = struct {
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(spa.operand);
- if (operand_ty.zigTypeTag(zcu) != .Union) {
+ if (operand_ty.zigTypeTag(zcu) != .@"union") {
const tag_capture_src: LazySrcLoc = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_tag_capture = capture_src.offset.switch_capture },
@@ -11102,7 +11102,7 @@ const SwitchProngAnalysis = struct {
if (inline_case_capture != .none) {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inline_case_capture, undefined) catch unreachable;
- if (operand_ty.zigTypeTag(zcu) == .Union) {
+ if (operand_ty.zigTypeTag(zcu) == .@"union") {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
@@ -11139,7 +11139,7 @@ const SwitchProngAnalysis = struct {
}
switch (operand_ty.zigTypeTag(zcu)) {
- .ErrorSet => if (spa.else_error_ty) |ty| {
+ .error_set => if (spa.else_error_ty) |ty| {
return sema.bitCast(block, ty, spa.operand, operand_src, null);
} else {
try sema.analyzeUnreachable(block, operand_src, false);
@@ -11150,7 +11150,7 @@ const SwitchProngAnalysis = struct {
}
switch (operand_ty.zigTypeTag(zcu)) {
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(operand_ty).?;
const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
@@ -11370,9 +11370,9 @@ const SwitchProngAnalysis = struct {
break :len coerce_block.instructions.items.len;
};
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
cases_extra.items.len +
- @typeInfo(Air.Block).Struct.fields.len +
+ @typeInfo(Air.Block).@"struct".fields.len +
1);
const switch_br_inst: u32 = @intCast(sema.air_instructions.len);
@@ -11396,7 +11396,7 @@ const SwitchProngAnalysis = struct {
return capture_block_inst.toRef();
},
- .ErrorSet => {
+ .error_set => {
if (capture_byref) {
return sema.fail(
block,
@@ -11444,18 +11444,18 @@ fn switchCond(
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(zcu)) {
- .Type,
- .Void,
- .Bool,
- .Int,
- .Float,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .Pointer,
- .Fn,
- .ErrorSet,
- .Enum,
+ .type,
+ .void,
+ .bool,
+ .int,
+ .float,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .pointer,
+ .@"fn",
+ .error_set,
+ .@"enum",
=> {
if (operand_ty.isSlice(zcu)) {
return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)});
@@ -11466,7 +11466,7 @@ fn switchCond(
return operand;
},
- .Union => {
+ .@"union" => {
try operand_ty.resolveFields(pt);
const enum_ty = operand_ty.unionTagType(zcu) orelse {
const msg = msg: {
@@ -11482,17 +11482,17 @@ fn switchCond(
return sema.unionToTag(block, enum_ty, operand, src);
},
- .ErrorUnion,
- .NoReturn,
- .Array,
- .Struct,
- .Undefined,
- .Null,
- .Optional,
- .Opaque,
- .Vector,
- .Frame,
- .AnyFrame,
+ .error_union,
+ .noreturn,
+ .array,
+ .@"struct",
+ .undefined,
+ .null,
+ .optional,
+ .@"opaque",
+ .vector,
+ .frame,
+ .@"anyframe",
=> return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(pt)}),
}
}
@@ -11593,7 +11593,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
else
operand_ty;
- if (operand_err_set.zigTypeTag(zcu) != .ErrorUnion) {
+ if (operand_err_set.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, switch_src, "expected error union type, found '{}'", .{
operand_ty.fmt(pt),
});
@@ -11791,7 +11791,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
true,
);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
true_instructions.len + sub_block.instructions.items.len);
_ = try child_block.addInst(.{
@@ -11886,7 +11886,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
};
const maybe_union_ty = sema.typeOf(raw_operand_val);
- const union_originally = maybe_union_ty.zigTypeTag(zcu) == .Union;
+ const union_originally = maybe_union_ty.zigTypeTag(zcu) == .@"union";
// Duplicate checking variables later also used for `inline else`.
var seen_enum_fields: []?LazySrcLoc = &.{};
@@ -11904,7 +11904,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
var empty_enum = false;
const operand_ty = sema.typeOf(operand);
- const err_set = operand_ty.zigTypeTag(zcu) == .ErrorSet;
+ const err_set = operand_ty.zigTypeTag(zcu) == .error_set;
var else_error_ty: ?Type = null;
@@ -11936,8 +11936,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
// Validate for duplicate items, missing else prong, and invalid range.
switch (operand_ty.zigTypeTag(zcu)) {
- .Union => unreachable, // handled in `switchCond`
- .Enum => {
+ .@"union" => unreachable, // handled in `switchCond`
+ .@"enum" => {
seen_enum_fields = try gpa.alloc(?LazySrcLoc, operand_ty.enumFieldCount(zcu));
empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(zcu);
@memset(seen_enum_fields, null);
@@ -12046,7 +12046,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
},
- .ErrorSet => else_error_ty = try validateErrSetSwitch(
+ .error_set => else_error_ty = try validateErrSetSwitch(
sema,
block,
&seen_errors,
@@ -12058,7 +12058,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
.{ .body = special.body, .end = special.end, .src = special_prong_src },
special_prong == .@"else",
),
- .Int, .ComptimeInt => {
+ .int, .comptime_int => {
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
@@ -12137,7 +12137,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
check_range: {
- if (operand_ty.zigTypeTag(zcu) == .Int) {
+ if (operand_ty.zigTypeTag(zcu) == .int) {
const min_int = try operand_ty.minInt(pt, operand_ty);
const max_int = try operand_ty.maxInt(pt, operand_ty);
if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
@@ -12162,7 +12162,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
}
},
- .Bool => {
+ .bool => {
var extra_index: usize = special.end;
{
var scalar_i: u32 = 0;
@@ -12238,7 +12238,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
},
}
},
- .EnumLiteral, .Void, .Fn, .Pointer, .Type => {
+ .enum_literal, .void, .@"fn", .pointer, .type => {
if (special_prong != .@"else") {
return sema.fail(
block,
@@ -12306,19 +12306,19 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
},
- .ErrorUnion,
- .NoReturn,
- .Array,
- .Struct,
- .Undefined,
- .Null,
- .Optional,
- .Opaque,
- .Vector,
- .Frame,
- .AnyFrame,
- .ComptimeFloat,
- .Float,
+ .error_union,
+ .noreturn,
+ .array,
+ .@"struct",
+ .undefined,
+ .null,
+ .optional,
+ .@"opaque",
+ .vector,
+ .frame,
+ .@"anyframe",
+ .comptime_float,
+ .float,
=> return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{
operand_ty.fmt(pt),
}),
@@ -12401,7 +12401,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) {
return .unreachable_value;
}
- if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(zcu) == .Enum and
+ if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(zcu) == .@"enum" and
(!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
{
try sema.zirDbgStmt(block, cond_dbg_node_index);
@@ -12502,7 +12502,7 @@ fn analyzeSwitchRuntimeBlock(
const block = child_block.parent.?;
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
- @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2;
+ @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + 2;
var cases_extra = try std.ArrayListUnmanaged(u32).initCapacity(gpa, estimated_cases_extra);
defer cases_extra.deinit(gpa);
@@ -12536,7 +12536,7 @@ fn analyzeSwitchRuntimeBlock(
const unresolved_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
- break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
+ break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
const prong_hint: std.builtin.BranchHint = if (err_set and
@@ -12669,7 +12669,7 @@ fn analyzeSwitchRuntimeBlock(
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
- break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
+ break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
if (emit_bb) try sema.emitBackwardBranch(block, block.src(.{ .switch_case_item = .{
@@ -12722,7 +12722,7 @@ fn analyzeSwitchRuntimeBlock(
for (items) |item| {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
- if (field_ty.zigTypeTag(zcu) != .NoReturn) break true;
+ if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
true;
@@ -12847,7 +12847,7 @@ fn analyzeSwitchRuntimeBlock(
} else {
try sema.air_extra.ensureUnusedCapacity(
gpa,
- @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len,
+ @typeInfo(Air.CondBr).@"struct".fields.len + prev_then_body.len + cond_body.len,
);
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{
@@ -12869,7 +12869,7 @@ fn analyzeSwitchRuntimeBlock(
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
var emit_bb = false;
if (special.is_inline) switch (operand_ty.zigTypeTag(zcu)) {
- .Enum => {
+ .@"enum" => {
if (operand_ty.isNonexhaustiveEnum(zcu) and !union_originally) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
operand_ty.fmt(pt),
@@ -12887,7 +12887,7 @@ fn analyzeSwitchRuntimeBlock(
const analyze_body = if (union_originally) blk: {
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
- break :blk field_ty.zigTypeTag(zcu) != .NoReturn;
+ break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
@@ -12920,7 +12920,7 @@ fn analyzeSwitchRuntimeBlock(
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
- .ErrorSet => {
+ .error_set => {
if (operand_ty.isAnyError(zcu)) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
operand_ty.fmt(pt),
@@ -12966,7 +12966,7 @@ fn analyzeSwitchRuntimeBlock(
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
- .Int => {
+ .int => {
var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set);
while (try it.next()) |cur| {
cases_len += 1;
@@ -13001,7 +13001,7 @@ fn analyzeSwitchRuntimeBlock(
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
- .Bool => {
+ .bool => {
if (true_count == 0) {
cases_len += 1;
@@ -13073,7 +13073,7 @@ fn analyzeSwitchRuntimeBlock(
if (zcu.backendSupportsFeature(.is_named_enum_value) and
special.body.len != 0 and block.wantSafety() and
- operand_ty.zigTypeTag(zcu) == .Enum and
+ operand_ty.zigTypeTag(zcu) == .@"enum" and
(!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
{
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
@@ -13086,7 +13086,7 @@ fn analyzeSwitchRuntimeBlock(
if (seen_field != null) continue;
const union_obj = zcu.typeToUnion(maybe_union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
- if (field_ty.zigTypeTag(zcu) != .NoReturn) break true;
+ if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
true;
@@ -13128,7 +13128,7 @@ fn analyzeSwitchRuntimeBlock(
} else {
try branch_hints.append(gpa, .none); // we have the range conditionals first
try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len +
- @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len);
+ @typeInfo(Air.CondBr).@"struct".fields.len + case_block.instructions.items.len);
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(prev_then_body.len),
@@ -13145,7 +13145,7 @@ fn analyzeSwitchRuntimeBlock(
assert(branch_hints.items.len == cases_len + 1);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
cases_extra.items.len + final_else_body.len +
(std.math.divCeil(usize, branch_hints.items.len, 10) catch unreachable)); // branch hints
@@ -13847,7 +13847,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind
const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
- if (operand_ty.zigTypeTag(zcu) == .ErrorSet) {
+ if (operand_ty.zigTypeTag(zcu) == .error_set) {
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
return;
}
@@ -14076,9 +14076,9 @@ fn zirShl(
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return lhs;
}
- if (scalar_ty.zigTypeTag(zcu) != .ComptimeInt and air_tag != .shl_sat) {
+ if (scalar_ty.zigTypeTag(zcu) != .comptime_int and air_tag != .shl_sat) {
const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
- if (rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(pt, i);
@@ -14097,7 +14097,7 @@ fn zirShl(
});
}
}
- if (rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(pt, i);
@@ -14118,12 +14118,12 @@ fn zirShl(
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef(zcu)) return pt.undefRef(lhs_ty);
const rhs_val = maybe_rhs_val orelse {
- if (scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (scalar_ty.zigTypeTag(zcu) == .comptime_int) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
break :rs rhs_src;
};
- const val = if (scalar_ty.zigTypeTag(zcu) == .ComptimeInt)
+ const val = if (scalar_ty.zigTypeTag(zcu) == .comptime_int)
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, pt)
else switch (air_tag) {
.shl_exact => val: {
@@ -14158,7 +14158,7 @@ fn zirShl(
const bit_count = scalar_ty.intInfo(zcu).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try pt.intValue(scalar_rhs_ty, bit_count);
- const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(zcu) == .vector) ok: {
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
@@ -14188,7 +14188,7 @@ fn zirShl(
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
- const any_ov_bit = if (lhs_ty.zigTypeTag(zcu) == .Vector)
+ const any_ov_bit = if (lhs_ty.zigTypeTag(zcu) == .vector)
try block.addInst(.{
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -14242,9 +14242,9 @@ fn zirShr(
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
return lhs;
}
- if (scalar_ty.zigTypeTag(zcu) != .ComptimeInt) {
+ if (scalar_ty.zigTypeTag(zcu) != .comptime_int) {
const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
- if (rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(pt, i);
@@ -14263,7 +14263,7 @@ fn zirShr(
});
}
}
- if (rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
const rhs_elem = try rhs_val.elemValue(pt, i);
@@ -14297,7 +14297,7 @@ fn zirShr(
}
} else rhs_src;
- if (maybe_rhs_val == null and scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (maybe_rhs_val == null and scalar_ty.zigTypeTag(zcu) == .comptime_int) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
@@ -14308,7 +14308,7 @@ fn zirShr(
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try pt.intValue(rhs_ty.scalarType(zcu), bit_count);
- const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(zcu) == .vector) ok: {
const bit_count_inst = Air.internedToRef((try sema.splat(rhs_ty, bit_count_val)).toIntern());
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
@@ -14328,7 +14328,7 @@ fn zirShr(
if (air_tag == .shr_exact) {
const back = try block.addBinOp(.shl, result, rhs);
- const ok = if (rhs_ty.zigTypeTag(zcu) == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(zcu) == .vector) ok: {
const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
@@ -14374,7 +14374,7 @@ fn zirBitwise(
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
if (!is_int) {
return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(zcu)), @tagName(rhs_ty.zigTypeTag(zcu)) });
@@ -14418,7 +14418,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand_type = sema.typeOf(operand);
const scalar_type = operand_type.scalarType(zcu);
- if (scalar_type.zigTypeTag(zcu) != .Int) {
+ if (scalar_type.zigTypeTag(zcu) != .int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
operand_type.fmt(pt),
});
@@ -14427,7 +14427,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu)) {
return pt.undefRef(operand_type);
- } else if (operand_type.zigTypeTag(zcu) == .Vector) {
+ } else if (operand_type.zigTypeTag(zcu) == .vector) {
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(zcu));
const elems = try sema.arena.alloc(InternPool.Index, vec_len);
for (elems, 0..) |*elem, i| {
@@ -14647,19 +14647,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.child = resolved_elem_ty.toIntern(),
});
const ptr_addrspace = p: {
- if (lhs_ty.zigTypeTag(zcu) == .Pointer) break :p lhs_ty.ptrAddressSpace(zcu);
- if (rhs_ty.zigTypeTag(zcu) == .Pointer) break :p rhs_ty.ptrAddressSpace(zcu);
+ if (lhs_ty.zigTypeTag(zcu) == .pointer) break :p lhs_ty.ptrAddressSpace(zcu);
+ if (rhs_ty.zigTypeTag(zcu) == .pointer) break :p rhs_ty.ptrAddressSpace(zcu);
break :p null;
};
const runtime_src = if (switch (lhs_ty.zigTypeTag(zcu)) {
- .Array, .Struct => try sema.resolveValue(lhs),
- .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
+ .array, .@"struct" => try sema.resolveValue(lhs),
+ .pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
else => unreachable,
}) |lhs_val| rs: {
if (switch (rhs_ty.zigTypeTag(zcu)) {
- .Array, .Struct => try sema.resolveValue(rhs),
- .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
+ .array, .@"struct" => try sema.resolveValue(rhs),
+ .pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
else => unreachable,
}) |rhs_val| {
const lhs_sub_val = if (lhs_ty.isSinglePointer(zcu))
@@ -14789,8 +14789,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(zcu)) {
- .Array => return operand_ty.arrayInfo(zcu),
- .Pointer => {
+ .array => return operand_ty.arrayInfo(zcu),
+ .pointer => {
const ptr_info = operand_ty.ptrInfo(zcu);
switch (ptr_info.flags.size) {
.Slice => {
@@ -14807,14 +14807,14 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
};
},
.One => {
- if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
+ if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) {
return Type.fromInterned(ptr_info.child).arrayInfo(zcu);
}
},
.C, .Many => {},
}
},
- .Struct => {
+ .@"struct" => {
if (operand_ty.isTuple(zcu) and peer_ty.isIndexable(zcu)) {
assert(!peer_ty.isTuple(zcu));
return .{
@@ -14934,12 +14934,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_len = uncoerced_lhs_ty.structFieldCount(zcu);
const lhs_dest_ty = switch (res_ty.zigTypeTag(zcu)) {
else => break :no_coerce,
- .Array => try pt.arrayType(.{
+ .array => try pt.arrayType(.{
.child = res_ty.childType(zcu).toIntern(),
.len = lhs_len,
.sentinel = if (res_ty.sentinel(zcu)) |s| s.toIntern() else .none,
}),
- .Vector => try pt.vectorType(.{
+ .vector => try pt.vectorType(.{
.child = res_ty.childType(zcu).toIntern(),
.len = lhs_len,
}),
@@ -14971,7 +14971,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const msg = try sema.errMsg(lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
+ .int, .float, .comptime_float, .comptime_int, .vector => {
try sema.errNote(operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
},
else => {},
@@ -14996,7 +14996,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.child = lhs_info.elem_type.toIntern(),
});
- const ptr_addrspace = if (lhs_ty.zigTypeTag(zcu) == .Pointer) lhs_ty.ptrAddressSpace(zcu) else null;
+ const ptr_addrspace = if (lhs_ty.zigTypeTag(zcu) == .pointer) lhs_ty.ptrAddressSpace(zcu) else null;
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| ct: {
@@ -15096,7 +15096,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
if (rhs_scalar_ty.isUnsignedInt(zcu) or switch (rhs_scalar_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt, .Float, .ComptimeFloat => false,
+ .int, .comptime_int, .float, .comptime_float => false,
else => true,
}) {
return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)});
@@ -15129,7 +15129,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
switch (rhs_scalar_ty.zigTypeTag(zcu)) {
- .Int, .ComptimeInt, .Float, .ComptimeFloat => {},
+ .int, .comptime_int, .float, .comptime_float => {},
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(pt)}),
}
@@ -15187,15 +15187,15 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
const maybe_lhs_val = try sema.resolveValueIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveValueIntable(casted_rhs);
- if ((lhs_ty.zigTypeTag(zcu) == .ComptimeFloat and rhs_ty.zigTypeTag(zcu) == .ComptimeInt) or
- (lhs_ty.zigTypeTag(zcu) == .ComptimeInt and rhs_ty.zigTypeTag(zcu) == .ComptimeFloat))
+ if ((lhs_ty.zigTypeTag(zcu) == .comptime_float and rhs_ty.zigTypeTag(zcu) == .comptime_int) or
+ (lhs_ty.zigTypeTag(zcu) == .comptime_int and rhs_ty.zigTypeTag(zcu) == .comptime_float))
{
// If it makes a difference whether we coerce to ints or floats before doing the division, error.
// If lhs % rhs is 0, it doesn't matter.
@@ -15240,13 +15240,13 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// * other float type: result is undefined
// If the lhs is undefined, result is undefined.
switch (scalar_tag) {
- .Int, .ComptimeInt, .ComptimeFloat => {
+ .int, .comptime_int, .comptime_float => {
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15352,7 +15352,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_scalar_ty = lhs_ty.scalarType(zcu);
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
@@ -15382,8 +15382,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} else {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15439,7 +15439,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ok = if (!is_int) ok: {
const floored = try block.addUnOp(.floor, result);
- if (resolved_type.zigTypeTag(zcu) == .Vector) {
+ if (resolved_type.zigTypeTag(zcu) == .vector) {
const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
@@ -15462,11 +15462,11 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
- if (resolved_type.zigTypeTag(zcu) == .Vector) {
+ if (resolved_type.zigTypeTag(zcu) == .vector) {
const zero_val = try sema.splat(resolved_type, scalar_zero);
const zero = Air.internedToRef(zero_val.toIntern());
const eql = try block.addCmpVector(remainder, zero, .eq);
@@ -15519,7 +15519,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
@@ -15550,8 +15550,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (!lhs_val.isUndef(zcu)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15630,7 +15630,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
@@ -15661,8 +15661,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (!lhs_val.isUndef(zcu)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
const zero_val = try sema.splat(resolved_type, scalar_zero);
@@ -15756,7 +15756,7 @@ fn addDivIntOverflowSafety(
}
var ok: Air.Inst.Ref = .none;
- if (resolved_type.zigTypeTag(zcu) == .Vector) {
+ if (resolved_type.zigTypeTag(zcu) == .vector) {
if (maybe_lhs_val == null) {
const min_int_ref = Air.internedToRef(min_int.toIntern());
ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
@@ -15819,7 +15819,7 @@ fn addDivByZeroSafety(
try pt.intValue(resolved_type.scalarType(zcu), 0)
else
try pt.floatValue(resolved_type.scalarType(zcu), 0.0);
- const ok = if (resolved_type.zigTypeTag(zcu) == .Vector) ok: {
+ const ok = if (resolved_type.zigTypeTag(zcu) == .vector) ok: {
const zero_val = try sema.splat(resolved_type, scalar_zero);
const zero = Air.internedToRef(zero_val.toIntern());
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
@@ -15867,7 +15867,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(zcu) == .Vector;
+ const is_vector = resolved_type.zigTypeTag(zcu) == .vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -15876,7 +15876,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const rhs_scalar_ty = rhs_ty.scalarType(zcu);
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
@@ -15904,8 +15904,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
- .ComptimeInt, .Int => try pt.intValue(resolved_type.scalarType(zcu), 0),
+ .comptime_float, .float => try pt.floatValue(resolved_type.scalarType(zcu), 0.0),
+ .comptime_int, .int => try pt.intValue(resolved_type.scalarType(zcu), 0),
else => unreachable,
};
const zero_val = if (is_vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -15987,7 +15987,7 @@ fn intRem(
) CompileError!Value {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -16058,7 +16058,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
@@ -16154,7 +16154,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const scalar_tag = resolved_type.scalarType(zcu).zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
@@ -16265,7 +16265,7 @@ fn zirOverflowArithmetic(
const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
- if (dest_ty.scalarType(zcu).zigTypeTag(zcu) != .Int) {
+ if (dest_ty.scalarType(zcu).zigTypeTag(zcu) != .int) {
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(pt)});
}
@@ -16438,7 +16438,7 @@ fn zirOverflowArithmetic(
fn splat(sema: *Sema, ty: Type, val: Value) !Value {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) != .Vector) return val;
+ if (ty.zigTypeTag(zcu) != .vector) return val;
const repeated = try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = val.toIntern() },
@@ -16450,7 +16450,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ov_ty = if (ty.zigTypeTag(zcu) == .Vector) try pt.vectorType(.{
+ const ov_ty = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = .u1_type,
}) else Type.u1;
@@ -16485,8 +16485,8 @@ fn analyzeArithmetic(
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- if (lhs_zig_ty_tag == .Pointer) {
- if (rhs_zig_ty_tag == .Pointer) {
+ if (lhs_zig_ty_tag == .pointer) {
+ if (rhs_zig_ty_tag == .pointer) {
if (lhs_ty.ptrSize(zcu) != .Slice and rhs_ty.ptrSize(zcu) != .Slice) {
if (zir_tag != .sub) {
return sema.failWithInvalidPtrArithmetic(block, src, "pointer-pointer", "subtraction");
@@ -16569,7 +16569,7 @@ fn analyzeArithmetic(
const scalar_type = resolved_type.scalarType(zcu);
const scalar_tag = scalar_type.zigTypeTag(zcu);
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
@@ -16667,7 +16667,7 @@ fn analyzeArithmetic(
return pt.undefRef(resolved_type);
}
- const val = if (scalar_tag == .ComptimeInt)
+ const val = if (scalar_tag == .comptime_int)
try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined)
else
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, pt);
@@ -16765,7 +16765,7 @@ fn analyzeArithmetic(
return pt.undefRef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
- const val = if (scalar_tag == .ComptimeInt)
+ const val = if (scalar_tag == .comptime_int)
try sema.intSub(lhs_val, rhs_val, resolved_type, undefined)
else
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, pt);
@@ -16789,13 +16789,13 @@ fn analyzeArithmetic(
// the result is nan.
// If either of the operands are nan, the result is nan.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 0.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 1.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
@@ -16875,13 +16875,13 @@ fn analyzeArithmetic(
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 0.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 1.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
@@ -16920,13 +16920,13 @@ fn analyzeArithmetic(
// If either of the operands are one, result is the other operand.
// If either of the operands are undefined, result is undefined.
const scalar_zero = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 0.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 0),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 0.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 0),
else => unreachable,
};
const scalar_one = switch (scalar_tag) {
- .ComptimeFloat, .Float => try pt.floatValue(scalar_type, 1.0),
- .ComptimeInt, .Int => try pt.intValue(scalar_type, 1),
+ .comptime_float, .float => try pt.floatValue(scalar_type, 1.0),
+ .comptime_int, .int => try pt.intValue(scalar_type, 1),
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
@@ -16956,7 +16956,7 @@ fn analyzeArithmetic(
return pt.undefRef(resolved_type);
}
- const val = if (scalar_tag == .ComptimeInt)
+ const val = if (scalar_tag == .comptime_int)
try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, pt)
else
try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, pt);
@@ -16971,7 +16971,7 @@ fn analyzeArithmetic(
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (block.wantSafety() and want_safety and scalar_tag == .Int) {
+ if (block.wantSafety() and want_safety and scalar_tag == .int) {
if (zcu.backendSupportsFeature(.safety_checked_instructions)) {
if (air_tag != air_tag_safe) {
_ = try sema.preparePanicId(block, src, .integer_overflow);
@@ -16997,7 +16997,7 @@ fn analyzeArithmetic(
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
- const any_ov_bit = if (resolved_type.zigTypeTag(zcu) == .Vector)
+ const any_ov_bit = if (resolved_type.zigTypeTag(zcu) == .vector)
try block.addInst(.{
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -17170,7 +17170,7 @@ fn zirAsm(
var extra_i = extra.end;
var output_type_bits = extra.data.output_type_bits;
- var needed_capacity: usize = @typeInfo(Air.Asm).Struct.fields.len + outputs_len + inputs_len;
+ var needed_capacity: usize = @typeInfo(Air.Asm).@"struct".fields.len + outputs_len + inputs_len;
const ConstraintName = struct { c: []const u8, n: []const u8 };
const out_args = try sema.arena.alloc(Air.Inst.Ref, outputs_len);
@@ -17217,8 +17217,8 @@ fn zirAsm(
const uncasted_arg = try sema.resolveInst(input.data.operand);
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
switch (uncasted_arg_ty.zigTypeTag(zcu)) {
- .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
- .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
+ .comptime_int => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
+ .comptime_float => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
else => {
arg.* = uncasted_arg;
},
@@ -17312,32 +17312,32 @@ fn zirCmpEq(
const rhs_ty = sema.typeOf(rhs);
const lhs_ty_tag = lhs_ty.zigTypeTag(zcu);
const rhs_ty_tag = rhs_ty.zigTypeTag(zcu);
- if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
+ if (lhs_ty_tag == .null and rhs_ty_tag == .null) {
// null == null, null != null
return if (op == .eq) .bool_true else .bool_false;
}
// comparing null with optionals
- if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(zcu))) {
+ if (lhs_ty_tag == .null and (rhs_ty_tag == .optional or rhs_ty.isCPtr(zcu))) {
return sema.analyzeIsNull(block, src, rhs, op == .neq);
}
- if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(zcu))) {
+ if (rhs_ty_tag == .null and (lhs_ty_tag == .optional or lhs_ty.isCPtr(zcu))) {
return sema.analyzeIsNull(block, src, lhs, op == .neq);
}
- if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
- const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
+ if (lhs_ty_tag == .null or rhs_ty_tag == .null) {
+ const non_null_type = if (lhs_ty_tag == .null) rhs_ty else lhs_ty;
return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(pt)});
}
- if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) {
+ if (lhs_ty_tag == .@"union" and (rhs_ty_tag == .enum_literal or rhs_ty_tag == .@"enum")) {
return sema.analyzeCmpUnionTag(block, src, lhs, lhs_src, rhs, rhs_src, op);
}
- if (rhs_ty_tag == .Union and (lhs_ty_tag == .EnumLiteral or lhs_ty_tag == .Enum)) {
+ if (rhs_ty_tag == .@"union" and (lhs_ty_tag == .enum_literal or lhs_ty_tag == .@"enum")) {
return sema.analyzeCmpUnionTag(block, src, rhs, rhs_src, lhs, lhs_src, op);
}
- if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) {
+ if (lhs_ty_tag == .error_set and rhs_ty_tag == .error_set) {
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveValue(lhs)) |lval| {
if (try sema.resolveValue(rhs)) |rval| {
@@ -17360,7 +17360,7 @@ fn zirCmpEq(
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addBinOp(air_tag, lhs, rhs);
}
- if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) {
+ if (lhs_ty_tag == .type and rhs_ty_tag == .type) {
const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs);
return if (lhs_as_type.eql(rhs_as_type, zcu) == (op == .eq)) .bool_true else .bool_false;
@@ -17399,7 +17399,7 @@ fn analyzeCmpUnionTag(
if (try sema.resolveValue(coerced_tag)) |enum_val| {
if (enum_val.isUndef(zcu)) return pt.undefRef(Type.bool);
const field_ty = union_ty.unionFieldType(enum_val, zcu).?;
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
return .bool_false;
}
}
@@ -17442,11 +17442,11 @@ fn analyzeCmp(
const zcu = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- if (lhs_ty.zigTypeTag(zcu) != .Optional and rhs_ty.zigTypeTag(zcu) != .Optional) {
+ if (lhs_ty.zigTypeTag(zcu) != .optional and rhs_ty.zigTypeTag(zcu) != .optional) {
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
}
- if (lhs_ty.zigTypeTag(zcu) == .Vector and rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (lhs_ty.zigTypeTag(zcu) == .vector and rhs_ty.zigTypeTag(zcu) == .vector) {
return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
if (lhs_ty.isNumeric(zcu) and rhs_ty.isNumeric(zcu)) {
@@ -17455,11 +17455,11 @@ fn analyzeCmp(
// numeric types.
return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
- if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .ErrorUnion and rhs_ty.zigTypeTag(zcu) == .ErrorSet) {
+ if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .error_union and rhs_ty.zigTypeTag(zcu) == .error_set) {
const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
}
- if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .ErrorSet and rhs_ty.zigTypeTag(zcu) == .ErrorUnion) {
+ if (is_equality_cmp and lhs_ty.zigTypeTag(zcu) == .error_set and rhs_ty.zigTypeTag(zcu) == .error_union) {
const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
}
@@ -17505,7 +17505,7 @@ fn cmpSelf(
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool);
- if (resolved_type.zigTypeTag(zcu) == .Vector) {
+ if (resolved_type.zigTypeTag(zcu) == .vector) {
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
return Air.internedToRef(cmp_val.toIntern());
}
@@ -17515,7 +17515,7 @@ fn cmpSelf(
else
.bool_false;
} else {
- if (resolved_type.zigTypeTag(zcu) == .Bool) {
+ if (resolved_type.zigTypeTag(zcu) == .bool) {
// We can lower bool eq/neq more efficiently.
return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
}
@@ -17524,7 +17524,7 @@ fn cmpSelf(
} else {
// For bools, we still check the other operand, because we can lower
// bool eq/neq more efficiently.
- if (resolved_type.zigTypeTag(zcu) == .Bool) {
+ if (resolved_type.zigTypeTag(zcu) == .bool) {
if (try sema.resolveValue(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool);
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
@@ -17534,7 +17534,7 @@ fn cmpSelf(
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (resolved_type.zigTypeTag(zcu) == .Vector) {
+ if (resolved_type.zigTypeTag(zcu) == .vector) {
return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized);
@@ -17568,34 +17568,34 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
switch (ty.zigTypeTag(pt.zcu)) {
- .Fn,
- .NoReturn,
- .Undefined,
- .Null,
- .Opaque,
+ .@"fn",
+ .noreturn,
+ .undefined,
+ .null,
+ .@"opaque",
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(pt)}),
- .Type,
- .EnumLiteral,
- .ComptimeFloat,
- .ComptimeInt,
- .Void,
+ .type,
+ .enum_literal,
+ .comptime_float,
+ .comptime_int,
+ .void,
=> return pt.intRef(Type.comptime_int, 0),
- .Bool,
- .Int,
- .Float,
- .Pointer,
- .Array,
- .Struct,
- .Optional,
- .ErrorUnion,
- .ErrorSet,
- .Enum,
- .Union,
- .Vector,
- .Frame,
- .AnyFrame,
+ .bool,
+ .int,
+ .float,
+ .pointer,
+ .array,
+ .@"struct",
+ .optional,
+ .error_union,
+ .error_set,
+ .@"enum",
+ .@"union",
+ .vector,
+ .frame,
+ .@"anyframe",
=> {},
}
const val = try ty.abiSizeLazy(pt);
@@ -17609,34 +17609,34 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
switch (operand_ty.zigTypeTag(zcu)) {
- .Fn,
- .NoReturn,
- .Undefined,
- .Null,
- .Opaque,
+ .@"fn",
+ .noreturn,
+ .undefined,
+ .null,
+ .@"opaque",
=> return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(pt)}),
- .Type,
- .EnumLiteral,
- .ComptimeFloat,
- .ComptimeInt,
- .Void,
+ .type,
+ .enum_literal,
+ .comptime_float,
+ .comptime_int,
+ .void,
=> return pt.intRef(Type.comptime_int, 0),
- .Bool,
- .Int,
- .Float,
- .Pointer,
- .Array,
- .Struct,
- .Optional,
- .ErrorUnion,
- .ErrorSet,
- .Enum,
- .Union,
- .Vector,
- .Frame,
- .AnyFrame,
+ .bool,
+ .int,
+ .float,
+ .pointer,
+ .array,
+ .@"struct",
+ .optional,
+ .error_union,
+ .error_set,
+ .@"enum",
+ .@"union",
+ .vector,
+ .frame,
+ .@"anyframe",
=> {},
}
const bit_size = try operand_ty.bitSizeSema(pt);
@@ -17893,21 +17893,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
switch (ty.zigTypeTag(zcu)) {
- .Type,
- .Void,
- .Bool,
- .NoReturn,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .EnumLiteral,
+ .type,
+ .void,
+ .bool,
+ .noreturn,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .enum_literal,
=> |type_info_tag| return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
.tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(type_info_tag))).toIntern(),
.val = .void_value,
} }))),
- .Fn => {
+ .@"fn" => {
const fn_info_nav = try sema.namespaceLookup(
block,
src,
@@ -18010,14 +18010,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Fn))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.@"fn"))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = fn_info_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Int => {
+ .int => {
const int_info_nav = try sema.namespaceLookup(
block,
src,
@@ -18037,14 +18037,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Int))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.int))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = int_info_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Float => {
+ .float => {
const float_info_nav = try sema.namespaceLookup(
block,
src,
@@ -18060,14 +18060,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Float))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.float))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = float_info_ty.toIntern(),
.storage = .{ .elems = &field_vals },
} }),
} })));
},
- .Pointer => {
+ .pointer => {
const info = ty.ptrInfo(zcu);
const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
try pt.intValue(Type.comptime_int, alignment)
@@ -18119,14 +18119,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Pointer))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.pointer))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = pointer_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Array => {
+ .array => {
const array_field_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18149,14 +18149,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Array))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.array))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = array_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Vector => {
+ .vector => {
const vector_field_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18177,14 +18177,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Vector))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.vector))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = vector_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Optional => {
+ .optional => {
const optional_field_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18202,14 +18202,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Optional))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.optional))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = optional_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .ErrorSet => {
+ .error_set => {
// Get the Error type
const error_field_ty = t: {
const nav = try sema.namespaceLookup(
@@ -18308,14 +18308,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.val = errors_payload_val,
} });
- // Construct Type{ .ErrorSet = errors_val }
+ // Construct Type{ .error_set = errors_val }
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorSet))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.error_set))).toIntern(),
.val = errors_val,
} })));
},
- .ErrorUnion => {
+ .error_union => {
const error_union_field_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18335,14 +18335,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.ErrorUnion))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.error_union))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = error_union_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Enum => {
+ .@"enum" => {
const is_exhaustive = Value.makeBool(ip.loadEnumType(ty.toIntern()).tag_mode != .nonexhaustive);
const enum_field_ty = t: {
@@ -18464,14 +18464,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Enum))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.@"enum"))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = type_enum_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Union => {
+ .@"union" => {
const type_union_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18611,14 +18611,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Union))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.@"union"))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = type_union_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Struct => {
+ .@"struct" => {
const type_struct_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18843,14 +18843,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Struct))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.@"struct"))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = type_struct_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Opaque => {
+ .@"opaque" => {
const type_opaque_ty = t: {
const nav = try sema.namespaceLookup(
block,
@@ -18871,15 +18871,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return Air.internedToRef((try pt.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.Opaque))).toIntern(),
+ .tag = (try pt.enumValueFieldIndex(type_info_tag_ty, @intFromEnum(std.builtin.TypeId.@"opaque"))).toIntern(),
.val = try pt.intern(.{ .aggregate = .{
.ty = type_opaque_ty.toIntern(),
.storage = .{ .elems = &field_values },
} }),
} })));
},
- .Frame => return sema.failWithUseOfAsync(block, src),
- .AnyFrame => return sema.failWithUseOfAsync(block, src),
+ .frame => return sema.failWithUseOfAsync(block, src),
+ .@"anyframe" => return sema.failWithUseOfAsync(block, src),
}
}
@@ -19064,8 +19064,8 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
const pt = sema.pt;
const zcu = pt.zcu;
switch (operand.zigTypeTag(zcu)) {
- .ComptimeInt => return Type.comptime_int,
- .Int => {
+ .comptime_int => return Type.comptime_int,
+ .int => {
const bits = operand.bitSize(zcu);
const count = if (bits == 0)
0
@@ -19079,7 +19079,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
};
return pt.intType(.unsigned, count);
},
- .Vector => {
+ .vector => {
const elem_ty = operand.elemType2(zcu);
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
return pt.vectorType(.{
@@ -19277,9 +19277,9 @@ fn finishCondBr(
) !Air.Inst.Ref {
const gpa = sema.gpa;
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
- @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
+ @typeInfo(Air.Block).@"struct".fields.len + child_block.instructions.items.len + 1);
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(then_block.instructions.items.len),
@@ -19307,8 +19307,8 @@ fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !voi
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Optional, .Null, .Undefined => return,
- .Pointer => if (ty.isPtrLikeOptional(zcu)) return,
+ .optional, .null, .undefined => return,
+ .pointer => if (ty.isPtrLikeOptional(zcu)) return,
else => {},
}
return sema.failWithExpectedOptionalType(block, src, ty);
@@ -19354,7 +19354,7 @@ fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .ErrorSet, .ErrorUnion, .Undefined => return,
+ .error_set, .error_union, .undefined => return,
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
ty.fmt(pt),
}),
@@ -19451,7 +19451,7 @@ fn zirCondbr(
const err_inst_data = sema.code.instructions.items(.data)[@intFromEnum(index)].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
- assert(operand_ty.zigTypeTag(zcu) == .ErrorUnion);
+ assert(operand_ty.zigTypeTag(zcu) == .error_union);
const result_ty = operand_ty.errorUnionSet(zcu);
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
};
@@ -19463,7 +19463,7 @@ fn zirCondbr(
break :h .unlikely;
} else try sema.analyzeBodyRuntimeBreak(&sub_block, else_body);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
true_instructions.len + sub_block.instructions.items.len);
_ = try parent_block.addInst(.{
.tag = .cond_br,
@@ -19490,7 +19490,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
const err_union_ty = sema.typeOf(err_union);
const pt = sema.pt;
const zcu = pt.zcu;
- if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(pt),
});
@@ -19524,7 +19524,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
// The only interesting hint here is `.cold`, which can come from e.g. `errdefer @panic`.
const is_cold = sema.branch_hint == .cold;
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Try).@"struct".fields.len +
sub_block.instructions.items.len);
const try_inst = try parent_block.addInst(.{
.tag = if (is_cold) .try_cold else .@"try",
@@ -19550,7 +19550,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const err_union_ty = sema.typeOf(err_union);
const pt = sema.pt;
const zcu = pt.zcu;
- if (err_union_ty.zigTypeTag(zcu) != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(pt),
});
@@ -19596,7 +19596,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
},
});
const res_ty_ref = Air.internedToRef(res_ty.toIntern());
- try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.TryPtr).@"struct".fields.len +
sub_block.instructions.items.len);
const try_inst = try parent_block.addInst(.{
.tag = if (is_cold) .try_ptr_cold else .try_ptr,
@@ -19747,7 +19747,7 @@ fn zirRetImplicit(
const operand = try sema.resolveInst(inst_data.operand);
const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = 0 });
const base_tag = sema.fn_ret_ty.baseZigTypeTag(zcu);
- if (base_tag == .NoReturn) {
+ if (base_tag == .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(ret_ty_src, "function declared '{}' implicitly returns", .{
sema.fn_ret_ty.fmt(pt),
@@ -19757,7 +19757,7 @@ fn zirRetImplicit(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
- } else if (base_tag != .Void) {
+ } else if (base_tag != .void) {
const msg = msg: {
const msg = try sema.errMsg(ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
sema.fn_ret_ty.fmt(pt),
@@ -19844,9 +19844,9 @@ fn retWithErrTracing(
try sema.callBuiltin(&else_block, src, return_err_fn, .never_inline, &args, .@"error return");
_ = try else_block.addUnOp(ret_tag, operand);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
- @typeInfo(Air.Block).Struct.fields.len + 1);
+ @typeInfo(Air.Block).@"struct".fields.len + 1);
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(then_block.instructions.items.len),
@@ -19958,7 +19958,7 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- assert(sema.fn_ret_ty.zigTypeTag(zcu) == .ErrorUnion);
+ assert(sema.fn_ret_ty.zigTypeTag(zcu) == .error_union);
const err_set_ty = sema.fn_ret_ty.errorUnionSet(zcu).toIntern();
switch (err_set_ty) {
.adhoc_inferred_error_set_type => {
@@ -19980,8 +19980,8 @@ fn addToInferredErrorSetPtr(sema: *Sema, ies: *InferredErrorSet, op_ty: Type) !v
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (op_ty.zigTypeTag(zcu)) {
- .ErrorSet => try ies.addErrorSet(op_ty, ip, arena),
- .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(zcu), ip, arena),
+ .error_set => try ies.addErrorSet(op_ty, ip, arena),
+ .error_union => try ies.addErrorSet(op_ty.errorUnionSet(zcu), ip, arena),
else => {},
}
}
@@ -19998,7 +19998,7 @@ fn analyzeRet(
// that the coercion below works correctly.
const pt = sema.pt;
const zcu = pt.zcu;
- if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(zcu) == .ErrorUnion) {
+ if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(zcu) == .error_union) {
try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, operand_src, .{ .is_ret = true }) catch |err| switch (err) {
@@ -20087,7 +20087,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
break :blk ty;
};
- if (elem_ty.zigTypeTag(zcu) == .NoReturn)
+ if (elem_ty.zigTypeTag(zcu) == .noreturn)
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
const target = zcu.getTarget();
@@ -20128,7 +20128,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAddressSpace(block, addrspace_src, ref, .pointer);
- } else if (elem_ty.zigTypeTag(zcu) == .Fn and target.cpu.arch == .avr) .flash else .generic;
+ } else if (elem_ty.zigTypeTag(zcu) == .@"fn" and target.cpu.arch == .avr) .flash else .generic;
const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
@@ -20162,11 +20162,11 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
}
- if (elem_ty.zigTypeTag(zcu) == .Fn) {
+ if (elem_ty.zigTypeTag(zcu) == .@"fn") {
if (inst_data.size != .One) {
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
}
- } else if (inst_data.size == .Many and elem_ty.zigTypeTag(zcu) == .Opaque) {
+ } else if (inst_data.size == .Many and elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
} else if (inst_data.size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
@@ -20181,7 +20181,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- if (elem_ty.zigTypeTag(zcu) == .Opaque) {
+ if (elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
}
}
@@ -20226,10 +20226,10 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const zcu = pt.zcu;
switch (obj_ty.zigTypeTag(zcu)) {
- .Struct => return sema.structInitEmpty(block, obj_ty, src, src),
- .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
- .Void => return Air.internedToRef(Value.void.toIntern()),
- .Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
+ .@"struct" => return sema.structInitEmpty(block, obj_ty, src, src),
+ .array, .vector => return sema.arrayInitEmpty(block, src, obj_ty),
+ .void => return Air.internedToRef(Value.void.toIntern()),
+ .@"union" => return sema.fail(block, src, "union initializer must initialize one field", .{}),
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
}
}
@@ -20249,7 +20249,7 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
};
const init_ty = if (is_byref) ty: {
const ptr_ty = ty_operand.optEuBaseType(zcu);
- assert(ptr_ty.zigTypeTag(zcu) == .Pointer); // validated by a previous instruction
+ assert(ptr_ty.zigTypeTag(zcu) == .pointer); // validated by a previous instruction
if (!ptr_ty.isSlice(zcu)) {
break :ty ptr_ty.childType(zcu);
}
@@ -20263,9 +20263,9 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is
const obj_ty = init_ty.optEuBaseType(zcu);
const empty_ref = switch (obj_ty.zigTypeTag(zcu)) {
- .Struct => try sema.structInitEmpty(block, obj_ty, src, src),
- .Array, .Vector => try sema.arrayInitEmpty(block, src, obj_ty),
- .Union => return sema.fail(block, src, "union initializer must initialize one field", .{}),
+ .@"struct" => try sema.structInitEmpty(block, obj_ty, src, src),
+ .array, .vector => try sema.arrayInitEmpty(block, src, obj_ty),
+ .@"union" => return sema.fail(block, src, "union initializer must initialize one field", .{}),
else => return sema.failWithArrayInitNotSupported(block, src, obj_ty),
};
const init_ref = try sema.coerce(block, init_ty, empty_ref, src);
@@ -20304,7 +20304,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com
const zcu = pt.zcu;
const arr_len = obj_ty.arrayLen(zcu);
if (arr_len != 0) {
- if (obj_ty.zigTypeTag(zcu) == .Array) {
+ if (obj_ty.zigTypeTag(zcu) == .array) {
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
} else {
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
@@ -20324,7 +20324,7 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const init_src = block.builtinCallArgSrc(inst_data.src_node, 2);
const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data;
const union_ty = try sema.resolveType(block, ty_src, extra.union_type);
- if (union_ty.zigTypeTag(pt.zcu) != .Union) {
+ if (union_ty.zigTypeTag(pt.zcu) != .@"union") {
return sema.fail(block, ty_src, "expected union type, found '{}'", .{union_ty.fmt(pt)});
}
const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{
@@ -20394,7 +20394,7 @@ fn zirStructInit(
const resolved_ty = result_ty.optEuBaseType(zcu);
try resolved_ty.resolveLayout(pt);
- if (resolved_ty.zigTypeTag(zcu) == .Struct) {
+ if (resolved_ty.zigTypeTag(zcu) == .@"struct") {
// This logic must be synchronized with that in `zirStructInitEmpty`.
// Maps field index to field_type index of where it was already initialized.
@@ -20450,7 +20450,7 @@ fn zirStructInit(
}
return sema.finishStructInit(block, src, src, field_inits, resolved_ty, result_ty, is_ref);
- } else if (resolved_ty.zigTypeTag(zcu) == .Union) {
+ } else if (resolved_ty.zigTypeTag(zcu) == .@"union") {
if (extra.data.fields_len != 1) {
return sema.fail(block, src, "union initialization expects exactly one field", .{});
}
@@ -20471,7 +20471,7 @@ fn zirStructInit(
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const field_ty = Type.fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
@@ -20756,7 +20756,7 @@ fn structInitAnon(
const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init).toIntern();
- if (Type.fromInterned(field_ty.*).zigTypeTag(zcu) == .Opaque) {
+ if (Type.fromInterned(field_ty.*).zigTypeTag(zcu) == .@"opaque") {
const msg = msg: {
const field_src = block.src(.{ .init_elem = .{
.init_node_offset = src.offset.node_offset.x,
@@ -20867,7 +20867,7 @@ fn zirArrayInit(
else => |e| return e,
};
const array_ty = result_ty.optEuBaseType(zcu);
- const is_tuple = array_ty.zigTypeTag(zcu) == .Struct;
+ const is_tuple = array_ty.zigTypeTag(zcu) == .@"struct";
const sentinel_val = array_ty.sentinel(zcu);
var root_msg: ?*Zcu.ErrorMsg = null;
@@ -21029,7 +21029,7 @@ fn arrayInitAnon(
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem).toIntern();
- if (Type.fromInterned(types[i]).zigTypeTag(zcu) == .Opaque) {
+ if (Type.fromInterned(types[i]).zigTypeTag(zcu) == .@"opaque") {
const msg = msg: {
const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
@@ -21148,7 +21148,7 @@ fn fieldType(
while (true) {
try cur_ty.resolveFields(pt);
switch (cur_ty.zigTypeTag(zcu)) {
- .Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
+ .@"struct" => switch (ip.indexToKey(cur_ty.toIntern())) {
.anon_struct_type => |anon_struct| {
const field_index = if (anon_struct.names.len == 0)
try sema.tupleFieldIndex(block, cur_ty, field_name, field_src)
@@ -21165,20 +21165,20 @@ fn fieldType(
},
else => unreachable,
},
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(cur_ty).?;
const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, cur_ty, union_obj, field_src, field_name);
const field_ty = union_obj.field_types.get(ip)[field_index];
return Air.internedToRef(field_ty);
},
- .Optional => {
+ .optional => {
// Struct/array init through optional requires the child type to not be a pointer.
// If the child of .optional is a pointer it'll error on the next loop.
cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
continue;
},
- .ErrorUnion => {
+ .error_union => {
cur_ty = cur_ty.errorUnionPayload(zcu);
continue;
},
@@ -21243,7 +21243,7 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const src = block.nodeOffset(inst_data.src_node);
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = operand_ty.zigTypeTag(zcu) == .vector;
const operand_scalar_ty = operand_ty.scalarType(zcu);
if (operand_scalar_ty.toIntern() != .bool_type) {
return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(zcu)});
@@ -21317,8 +21317,8 @@ fn zirAbs(
const scalar_ty = operand_ty.scalarType(zcu);
const result_ty = switch (scalar_ty.zigTypeTag(zcu)) {
- .ComptimeFloat, .Float, .ComptimeInt => operand_ty,
- .Int => if (scalar_ty.isSignedInt(zcu)) try operand_ty.toUnsigned(pt) else return operand,
+ .comptime_float, .float, .comptime_int => operand_ty,
+ .int => if (scalar_ty.isSignedInt(zcu)) try operand_ty.toUnsigned(pt) else return operand,
else => return sema.fail(
block,
operand_src,
@@ -21342,7 +21342,7 @@ fn maybeConstantUnaryMath(
const pt = sema.pt;
const zcu = pt.zcu;
switch (result_ty.zigTypeTag(zcu)) {
- .Vector => if (try sema.resolveValue(operand)) |val| {
+ .vector => if (try sema.resolveValue(operand)) |val| {
const scalar_ty = result_ty.scalarType(zcu);
const vec_len = result_ty.vectorLen(zcu);
if (val.isUndef(zcu))
@@ -21387,7 +21387,7 @@ fn zirUnaryMath(
const scalar_ty = operand_ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
- .ComptimeFloat, .Float => {},
+ .comptime_float, .float => {},
else => return sema.fail(
block,
operand_src,
@@ -21414,13 +21414,13 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try operand_ty.resolveLayout(pt);
const enum_ty = switch (operand_ty.zigTypeTag(zcu)) {
- .EnumLiteral => {
+ .enum_literal => {
const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined);
const tag_name = ip.indexToKey(val.toIntern()).enum_literal;
return sema.addNullTerminatedStrLit(tag_name);
},
- .Enum => operand_ty,
- .Union => operand_ty.unionTagType(zcu) orelse
+ .@"enum" => operand_ty,
+ .@"union" => operand_ty.unionTagType(zcu) orelse
return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(pt)}),
else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{
operand_ty.fmt(pt),
@@ -21500,17 +21500,17 @@ fn zirReify(
}
const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), zcu).?;
switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
- .Type => return .type_type,
- .Void => return .void_type,
- .Bool => return .bool_type,
- .NoReturn => return .noreturn_type,
- .ComptimeFloat => return .comptime_float_type,
- .ComptimeInt => return .comptime_int_type,
- .Undefined => return .undefined_type,
- .Null => return .null_type,
- .AnyFrame => return sema.failWithUseOfAsync(block, src),
- .EnumLiteral => return .enum_literal_type,
- .Int => {
+ .type => return .type_type,
+ .void => return .void_type,
+ .bool => return .bool_type,
+ .noreturn => return .noreturn_type,
+ .comptime_float => return .comptime_float_type,
+ .comptime_int => return .comptime_int_type,
+ .undefined => return .undefined_type,
+ .null => return .null_type,
+ .@"anyframe" => return sema.failWithUseOfAsync(block, src),
+ .enum_literal => return .enum_literal_type,
+ .int => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
pt,
@@ -21526,7 +21526,7 @@ fn zirReify(
const ty = try pt.intType(signedness, bits);
return Air.internedToRef(ty.toIntern());
},
- .Vector => {
+ .vector => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21548,7 +21548,7 @@ fn zirReify(
});
return Air.internedToRef(ty.toIntern());
},
- .Float => {
+ .float => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const bits_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21566,7 +21566,7 @@ fn zirReify(
};
return Air.internedToRef(ty.toIntern());
},
- .Pointer => {
+ .pointer => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const size_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21631,13 +21631,13 @@ fn zirReify(
break :s .none;
};
- if (elem_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (elem_ty.zigTypeTag(zcu) == .noreturn) {
return sema.fail(block, src, "pointer to noreturn not allowed", .{});
- } else if (elem_ty.zigTypeTag(zcu) == .Fn) {
+ } else if (elem_ty.zigTypeTag(zcu) == .@"fn") {
if (ptr_size != .One) {
return sema.fail(block, src, "function pointers must be single pointers", .{});
}
- } else if (ptr_size == .Many and elem_ty.zigTypeTag(zcu) == .Opaque) {
+ } else if (ptr_size == .Many and elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
} else if (ptr_size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
@@ -21652,7 +21652,7 @@ fn zirReify(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
- if (elem_ty.zigTypeTag(zcu) == .Opaque) {
+ if (elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
}
}
@@ -21671,7 +21671,7 @@ fn zirReify(
});
return Air.internedToRef(ty.toIntern());
},
- .Array => {
+ .array => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21700,7 +21700,7 @@ fn zirReify(
});
return Air.internedToRef(ty.toIntern());
},
- .Optional => {
+ .optional => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const child_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21712,7 +21712,7 @@ fn zirReify(
const ty = try pt.optionalType(child_ty.toIntern());
return Air.internedToRef(ty.toIntern());
},
- .ErrorUnion => {
+ .error_union => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const error_set_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21726,14 +21726,14 @@ fn zirReify(
const error_set_ty = error_set_val.toType();
const payload_ty = payload_val.toType();
- if (error_set_ty.zigTypeTag(zcu) != .ErrorSet) {
+ if (error_set_ty.zigTypeTag(zcu) != .error_set) {
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
}
const ty = try pt.errorUnionType(error_set_ty, payload_ty);
return Air.internedToRef(ty.toIntern());
},
- .ErrorSet => {
+ .error_set => {
const payload_val = Value.fromInterned(union_val.val).optionalValue(zcu) orelse
return Air.internedToRef(Type.anyerror.toIntern());
@@ -21767,7 +21767,7 @@ fn zirReify(
const ty = try pt.errorSetFromUnsortedNames(names.keys());
return Air.internedToRef(ty.toIntern());
},
- .Struct => {
+ .@"struct" => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21807,7 +21807,7 @@ fn zirReify(
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool());
},
- .Enum => {
+ .@"enum" => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21836,7 +21836,7 @@ fn zirReify(
return sema.reifyEnum(block, inst, src, tag_type_val.toType(), is_exhaustive_val.toBool(), fields_arr, name_strategy);
},
- .Opaque => {
+ .@"opaque" => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const decls_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21879,7 +21879,7 @@ fn zirReify(
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
},
- .Union => {
+ .@"union" => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21909,7 +21909,7 @@ fn zirReify(
return sema.reifyUnion(block, inst, src, layout, tag_type_val, fields_arr, name_strategy);
},
- .Fn => {
+ .@"fn" => {
const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(pt, struct_type.nameIndex(
ip,
@@ -21996,7 +21996,7 @@ fn zirReify(
});
return Air.internedToRef(ty.toIntern());
},
- .Frame => return sema.failWithUseOfAsync(block, src),
+ .frame => return sema.failWithUseOfAsync(block, src),
}
}
@@ -22067,7 +22067,7 @@ fn reifyEnum(
var done = false;
errdefer if (!done) wip_ty.cancel(ip, pt.tid);
- if (tag_ty.zigTypeTag(zcu) != .Int) {
+ if (tag_ty.zigTypeTag(zcu) != .int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
@@ -22344,7 +22344,7 @@ fn reifyUnion(
for (field_types) |field_ty_ip| {
const field_ty = Type.fromInterned(field_ty_ip);
- if (field_ty.zigTypeTag(zcu) == .Opaque) {
+ if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(gpa);
@@ -22602,7 +22602,7 @@ fn reifyStruct(
struct_type.field_inits.get(ip)[field_idx] = field_default;
}
- if (field_ty.zigTypeTag(zcu) == .Opaque) {
+ if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
@@ -22611,7 +22611,7 @@ fn reifyStruct(
break :msg msg;
});
}
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(gpa);
@@ -22794,7 +22794,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const operand_ty = sema.typeOf(operand);
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
- const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
const dest_scalar_ty = dest_ty.scalarType(zcu);
const operand_scalar_ty = operand_ty.scalarType(zcu);
@@ -22805,7 +22805,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (try sema.resolveValue(operand)) |operand_val| {
const result_val = try sema.intFromFloat(block, operand_src, operand_val, operand_ty, dest_ty, .truncate);
return Air.internedToRef(result_val.toIntern());
- } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ } else if (dest_scalar_ty.zigTypeTag(zcu) == .comptime_int) {
return sema.failWithNeededComptime(block, operand_src, .{
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
});
@@ -22877,7 +22877,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const operand_ty = sema.typeOf(operand);
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, src, operand_src);
- const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
const dest_scalar_ty = dest_ty.scalarType(zcu);
const operand_scalar_ty = operand_ty.scalarType(zcu);
@@ -22888,7 +22888,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (try sema.resolveValue(operand)) |operand_val| {
const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, pt, .sema);
return Air.internedToRef(result_val.toIntern());
- } else if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeFloat) {
+ } else if (dest_scalar_ty.zigTypeTag(zcu) == .comptime_float) {
return sema.failWithNeededComptime(block, operand_src, .{
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
});
@@ -22923,7 +22923,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const dest_ty = try sema.resolveDestType(block, src, extra.lhs, .remove_eu, "@ptrFromInt");
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src);
- const is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
const operand_ty = if (is_vector) operand_ty: {
const len = dest_ty.vectorLen(zcu);
break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len });
@@ -22975,7 +22975,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (!is_vector) {
- if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .Fn)) {
+ if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
if (!ptr_ty.isAllowzeroPtr(zcu)) {
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
@@ -22992,7 +22992,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
const len = dest_ty.vectorLen(zcu);
- if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .Fn)) {
+ if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
for (0..len) |i| {
const idx_ref = try pt.intRef(Type.usize, i);
const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
@@ -23042,11 +23042,11 @@ fn ptrFromIntVal(
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)});
return switch (ptr_ty.zigTypeTag(zcu)) {
- .Optional => Value.fromInterned(try pt.intern(.{ .opt = .{
+ .optional => Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = ptr_ty.toIntern(),
.val = if (addr == 0) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(),
} })),
- .Pointer => try pt.ptrIntValue(ptr_ty, addr),
+ .pointer => try pt.ptrIntValue(ptr_ty, addr),
else => unreachable,
};
}
@@ -23064,16 +23064,16 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
const dest_tag = base_dest_ty.zigTypeTag(zcu);
const operand_tag = base_operand_ty.zigTypeTag(zcu);
- if (dest_tag != .ErrorSet and dest_tag != .ErrorUnion) {
+ if (dest_tag != .error_set and dest_tag != .error_union) {
return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(dest_tag)});
}
- if (operand_tag != .ErrorSet and operand_tag != .ErrorUnion) {
+ if (operand_tag != .error_set and operand_tag != .error_union) {
return sema.fail(block, src, "expected error set or error union type, found '{s}'", .{@tagName(operand_tag)});
}
- if (dest_tag == .ErrorSet and operand_tag == .ErrorUnion) {
+ if (dest_tag == .error_set and operand_tag == .error_union) {
return sema.fail(block, src, "cannot cast an error union type to error set", .{});
}
- if (dest_tag == .ErrorUnion and operand_tag == .ErrorUnion and
+ if (dest_tag == .error_union and operand_tag == .error_union and
base_dest_ty.errorUnionPayload(zcu).toIntern() != base_operand_ty.errorUnionPayload(zcu).toIntern())
{
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -23088,8 +23088,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
break :msg msg;
});
}
- const dest_ty = if (dest_tag == .ErrorUnion) base_dest_ty.errorUnionSet(zcu) else base_dest_ty;
- const operand_ty = if (operand_tag == .ErrorUnion) base_operand_ty.errorUnionSet(zcu) else base_operand_ty;
+ const dest_ty = if (dest_tag == .error_union) base_dest_ty.errorUnionSet(zcu) else base_dest_ty;
+ const operand_ty = if (operand_tag == .error_union) base_operand_ty.errorUnionSet(zcu) else base_operand_ty;
// operand must be defined since it can be an invalid error value
const maybe_operand_val = try sema.resolveDefinedValue(block, operand_src, operand);
@@ -23121,7 +23121,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
break :disjoint true;
};
- if (disjoint and dest_tag != .ErrorUnion) {
+ if (disjoint and dest_tag != .error_union) {
return sema.fail(block, src, "error sets '{}' and '{}' have no common errors", .{
operand_ty.fmt(pt), dest_ty.fmt(pt),
});
@@ -23131,7 +23131,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
if (!dest_ty.isAnyError(zcu)) check: {
const operand_val = zcu.intern_pool.indexToKey(val.toIntern());
var error_name: InternPool.NullTerminatedString = undefined;
- if (operand_tag == .ErrorUnion) {
+ if (operand_tag == .error_union) {
if (operand_val.error_union.val != .err_name) break :check;
error_name = operand_val.error_union.val.err_name;
} else {
@@ -23153,7 +23153,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
dest_ty.toIntern() != .adhoc_inferred_error_set_type and
zcu.backendSupportsFeature(.error_set_has_value))
{
- if (dest_tag == .ErrorUnion) {
+ if (dest_tag == .error_union) {
const err_code = try sema.analyzeErrUnionCode(block, operand_src, operand);
const err_int = try block.addBitCast(err_int_ty, err_code);
const zero_err = try pt.intRef(try pt.errorIntType(), 0);
@@ -23178,7 +23178,7 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
}
fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
@@ -23239,10 +23239,10 @@ fn ptrCastFull(
try Type.fromInterned(dest_info.child).resolveLayout(pt);
const src_slice_like = src_info.flags.size == .Slice or
- (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .Array);
+ (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .array);
const dest_slice_like = dest_info.flags.size == .Slice or
- (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .Array);
+ (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .array);
if (dest_info.flags.size == .Slice and !src_slice_like) {
return sema.fail(block, src, "illegal pointer cast to slice", .{});
@@ -23277,7 +23277,7 @@ fn ptrCastFull(
errdefer msg.destroy(sema.gpa);
if (dest_info.flags.size == .Many and
(src_info.flags.size == .Slice or
- (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .Array)))
+ (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(zcu) == .array)))
{
try sema.errNote(src, msg, "use 'ptr' field to convert slice to many pointer", .{});
} else {
@@ -23473,7 +23473,7 @@ fn ptrCastFull(
}
const ptr = if (src_info.flags.size == .Slice and dest_info.flags.size != .Slice) ptr: {
- if (operand_ty.zigTypeTag(zcu) == .Optional) {
+ if (operand_ty.zigTypeTag(zcu) == .optional) {
break :ptr try sema.analyzeOptionalSlicePtr(block, operand_src, operand, operand_ty);
} else {
break :ptr try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty);
@@ -23485,7 +23485,7 @@ fn ptrCastFull(
var info = dest_info;
info.flags.size = .Many;
const ty = try pt.ptrTypeSema(info);
- if (dest_ty.zigTypeTag(zcu) == .Optional) {
+ if (dest_ty.zigTypeTag(zcu) == .optional) {
break :blk try pt.optionalType(ty.toIntern());
} else {
break :blk ty;
@@ -23535,7 +23535,7 @@ fn ptrCastFull(
try sema.validateRuntimeValue(block, operand_src, ptr);
if (block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu) and
- (try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt) or Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .Fn))
+ (try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt) or Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .@"fn"))
{
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
@@ -23570,7 +23570,7 @@ fn ptrCastFull(
var intermediate_info = src_info;
intermediate_info.flags.address_space = dest_info.flags.address_space;
const intermediate_ptr_ty = try pt.ptrTypeSema(intermediate_info);
- const intermediate_ty = if (dest_ptr_ty.zigTypeTag(zcu) == .Optional) blk: {
+ const intermediate_ty = if (dest_ptr_ty.zigTypeTag(zcu) == .optional) blk: {
break :blk try pt.optionalType(intermediate_ptr_ty.toIntern());
} else intermediate_ptr_ty;
const intermediate = try block.addInst(.{
@@ -23613,7 +23613,7 @@ fn ptrCastFull(
fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
@@ -23628,7 +23628,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
const dest_ty = blk: {
const dest_ty = try pt.ptrTypeSema(ptr_info);
- if (operand_ty.zigTypeTag(zcu) == .Optional) {
+ if (operand_ty.zigTypeTag(zcu) == .optional) {
break :blk try pt.optionalType(dest_ty.toIntern());
}
break :blk dest_ty;
@@ -23657,13 +23657,13 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const operand_ty = sema.typeOf(operand);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
- const operand_is_vector = operand_ty.zigTypeTag(zcu) == .Vector;
- const dest_is_vector = dest_ty.zigTypeTag(zcu) == .Vector;
+ const operand_is_vector = operand_ty.zigTypeTag(zcu) == .vector;
+ const dest_is_vector = dest_ty.zigTypeTag(zcu) == .vector;
if (operand_is_vector != dest_is_vector) {
return sema.fail(block, operand_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(pt), operand_ty.fmt(pt) });
}
- if (dest_scalar_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (dest_scalar_ty.zigTypeTag(zcu) == .comptime_int) {
return sema.coerce(block, dest_ty, operand, operand_src);
}
@@ -23673,7 +23673,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return Air.internedToRef(val.toIntern());
}
- if (operand_scalar_ty.zigTypeTag(zcu) != .ComptimeInt) {
+ if (operand_scalar_ty.zigTypeTag(zcu) != .comptime_int) {
const operand_info = operand_ty.intInfo(zcu);
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return Air.internedToRef(val.toIntern());
@@ -23751,7 +23751,7 @@ fn zirBitCount(
const result_scalar_ty = try pt.smallestUnsignedInt(bits);
switch (operand_ty.zigTypeTag(zcu)) {
- .Vector => {
+ .vector => {
const vec_len = operand_ty.vectorLen(zcu);
const result_ty = try pt.vectorType(.{
.len = vec_len,
@@ -23776,7 +23776,7 @@ fn zirBitCount(
return block.addTyOp(air_tag, result_ty, operand);
}
},
- .Int => {
+ .int => {
if (try sema.resolveValueResolveLazy(operand)) |val| {
if (val.isUndef(zcu)) return pt.undefRef(result_scalar_ty);
return pt.intRef(result_scalar_ty, comptimeOp(val, operand_ty, zcu));
@@ -23813,7 +23813,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
switch (operand_ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu)) return pt.undefRef(operand_ty);
const result_val = try val.byteSwap(operand_ty, pt, sema.arena);
@@ -23823,7 +23823,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.byte_swap, operand_ty, operand);
},
- .Vector => {
+ .vector => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu))
return pt.undefRef(operand_ty);
@@ -23862,7 +23862,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const pt = sema.pt;
const zcu = pt.zcu;
switch (operand_ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu)) return pt.undefRef(operand_ty);
const result_val = try val.bitReverse(operand_ty, pt, sema.arena);
@@ -23872,7 +23872,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.requireRuntimeBlock(block, src, runtime_src);
return block.addTyOp(.bit_reverse, operand_ty, operand);
},
- .Vector => {
+ .vector => {
const runtime_src = if (try sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu))
return pt.undefRef(operand_ty);
@@ -23924,7 +23924,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
const ip = &zcu.intern_pool;
try ty.resolveLayout(pt);
switch (ty.zigTypeTag(zcu)) {
- .Struct => {},
+ .@"struct" => {},
else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(pt)}),
}
@@ -23959,7 +23959,7 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Struct, .Enum, .Union, .Opaque => return,
+ .@"struct", .@"enum", .@"union", .@"opaque" => return,
else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(pt)}),
}
}
@@ -23969,8 +23969,8 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr
const pt = sema.pt;
const zcu = pt.zcu;
switch (try ty.zigTypeTagOrPoison(zcu)) {
- .ComptimeInt => return true,
- .Int => return false,
+ .comptime_int => return true,
+ .int => return false,
else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(pt)}),
}
}
@@ -23984,7 +23984,7 @@ fn checkInvalidPtrIntArithmetic(
const pt = sema.pt;
const zcu = pt.zcu;
switch (try ty.zigTypeTagOrPoison(zcu)) {
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .pointer => switch (ty.ptrSize(zcu)) {
.One, .Slice => return,
.Many, .C => return sema.failWithInvalidPtrArithmetic(block, src, "pointer-integer", "addition and subtraction"),
},
@@ -24001,8 +24001,8 @@ fn checkArithmeticOp(
rhs_zig_ty_tag: std.builtin.TypeId,
zir_tag: Zir.Inst.Tag,
) CompileError!void {
- const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
- const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat;
+ const is_int = scalar_tag == .int or scalar_tag == .comptime_int;
+ const is_float = scalar_tag == .float or scalar_tag == .comptime_float;
if (!is_int and !(is_float and floatOpAllowed(zir_tag))) {
return sema.fail(block, src, "invalid operands to binary expression: '{s}' and '{s}'", .{
@@ -24020,8 +24020,8 @@ fn checkPtrOperand(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Pointer => return,
- .Fn => {
+ .pointer => return,
+ .@"fn" => {
const msg = msg: {
const msg = try sema.errMsg(
ty_src,
@@ -24036,7 +24036,7 @@ fn checkPtrOperand(
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Optional => if (ty.childType(zcu).zigTypeTag(zcu) == .Pointer) return,
+ .optional => if (ty.childType(zcu).zigTypeTag(zcu) == .pointer) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
@@ -24052,8 +24052,8 @@ fn checkPtrType(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Pointer => if (allow_slice or !ty.isSlice(zcu)) return,
- .Fn => {
+ .pointer => if (allow_slice or !ty.isSlice(zcu)) return,
+ .@"fn" => {
const msg = msg: {
const msg = try sema.errMsg(
ty_src,
@@ -24068,7 +24068,7 @@ fn checkPtrType(
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Optional => if (ty.childType(zcu).zigTypeTag(zcu) == .Pointer) return,
+ .optional => if (ty.childType(zcu).zigTypeTag(zcu) == .pointer) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
@@ -24083,8 +24083,8 @@ fn checkVectorElemType(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Int, .Float, .Bool => return,
- .Optional, .Pointer => if (ty.isPtrAtRuntime(zcu)) return,
+ .int, .float, .bool => return,
+ .optional, .pointer => if (ty.isPtrAtRuntime(zcu)) return,
else => {},
}
return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(pt)});
@@ -24099,7 +24099,7 @@ fn checkFloatType(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeInt, .ComptimeFloat, .Float => {},
+ .comptime_int, .comptime_float, .float => {},
else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(pt)}),
}
}
@@ -24113,9 +24113,9 @@ fn checkNumericType(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeFloat, .Float, .ComptimeInt, .Int => {},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .ComptimeFloat, .Float, .ComptimeInt, .Int => {},
+ .comptime_float, .float, .comptime_int, .int => {},
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .comptime_float, .float, .comptime_int, .int => {},
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
},
else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(pt)}),
@@ -24167,7 +24167,7 @@ fn checkAtomicPtrOperand(
const ptr_ty = sema.typeOf(ptr);
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(zcu)) {
- .Pointer => ptr_ty.ptrInfo(zcu),
+ .pointer => ptr_ty.ptrInfo(zcu),
else => {
const wanted_ptr_ty = try pt.ptrTypeSema(wanted_ptr_data);
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
@@ -24208,11 +24208,11 @@ fn checkIntOrVector(
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (try operand_ty.zigTypeTagOrPoison(zcu)) {
- .Int => return operand_ty,
- .Vector => {
+ .int => return operand_ty,
+ .vector => {
const elem_ty = operand_ty.childType(zcu);
switch (try elem_ty.zigTypeTagOrPoison(zcu)) {
- .Int => return elem_ty,
+ .int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(pt),
}),
@@ -24233,11 +24233,11 @@ fn checkIntOrVectorAllowComptime(
const pt = sema.pt;
const zcu = pt.zcu;
switch (try operand_ty.zigTypeTagOrPoison(zcu)) {
- .Int, .ComptimeInt => return operand_ty,
- .Vector => {
+ .int, .comptime_int => return operand_ty,
+ .vector => {
const elem_ty = operand_ty.childType(zcu);
switch (try elem_ty.zigTypeTagOrPoison(zcu)) {
- .Int, .ComptimeInt => return elem_ty,
+ .int, .comptime_int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(pt),
}),
@@ -24277,7 +24277,7 @@ fn checkSimdBinOp(
const rhs_ty = sema.typeOf(uncasted_rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- const vec_len: ?usize = if (lhs_ty.zigTypeTag(zcu) == .Vector) lhs_ty.vectorLen(zcu) else null;
+ const vec_len: ?usize = if (lhs_ty.zigTypeTag(zcu) == .vector) lhs_ty.vectorLen(zcu) else null;
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
@@ -24308,14 +24308,14 @@ fn checkVectorizableBinaryOperands(
const zcu = pt.zcu;
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(zcu);
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(zcu);
- if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
+ if (lhs_zig_ty_tag != .vector and rhs_zig_ty_tag != .vector) return;
const lhs_is_vector = switch (lhs_zig_ty_tag) {
- .Vector, .Array => true,
+ .vector, .array => true,
else => false,
};
const rhs_is_vector = switch (rhs_zig_ty_tag) {
- .Vector, .Array => true,
+ .vector, .array => true,
else => false,
};
@@ -24477,7 +24477,7 @@ fn zirCmpxchg(
// zig fmt: on
const expected_value = try sema.resolveInst(extra.expected_value);
const elem_ty = sema.typeOf(expected_value);
- if (elem_ty.zigTypeTag(zcu) == .Float) {
+ if (elem_ty.zigTypeTag(zcu) == .float) {
return sema.fail(
block,
elem_ty_src,
@@ -24602,7 +24602,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const pt = sema.pt;
const zcu = pt.zcu;
- if (operand_ty.zigTypeTag(zcu) != .Vector) {
+ if (operand_ty.zigTypeTag(zcu) != .vector) {
return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(pt)});
}
@@ -24611,13 +24611,13 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// Type-check depending on operation.
switch (operation) {
.And, .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int, .Bool => {},
+ .int, .bool => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
@tagName(operation), operand_ty.fmt(pt),
}),
},
.Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int, .Float => {},
+ .int, .float => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
@tagName(operation), operand_ty.fmt(pt),
}),
@@ -24677,7 +24677,7 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
var mask_ty = sema.typeOf(mask);
const mask_len = switch (sema.typeOf(mask).zigTypeTag(zcu)) {
- .Array, .Vector => sema.typeOf(mask).arrayLen(zcu),
+ .array, .vector => sema.typeOf(mask).arrayLen(zcu),
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(pt)}),
};
mask_ty = try pt.vectorType(.{
@@ -24715,16 +24715,16 @@ fn analyzeShuffle(
});
const maybe_a_len = switch (sema.typeOf(a).zigTypeTag(zcu)) {
- .Array, .Vector => sema.typeOf(a).arrayLen(zcu),
- .Undefined => null,
+ .array, .vector => sema.typeOf(a).arrayLen(zcu),
+ .undefined => null,
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(pt),
sema.typeOf(a).fmt(pt),
}),
};
const maybe_b_len = switch (sema.typeOf(b).zigTypeTag(zcu)) {
- .Array, .Vector => sema.typeOf(b).arrayLen(zcu),
- .Undefined => null,
+ .array, .vector => sema.typeOf(b).arrayLen(zcu),
+ .undefined => null,
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(pt),
sema.typeOf(b).fmt(pt),
@@ -24869,7 +24869,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const pred_ty = sema.typeOf(pred_uncoerced);
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(zcu)) {
- .Vector, .Array => pred_ty.arrayLen(zcu),
+ .vector, .array => pred_ty.arrayLen(zcu),
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(pt)}),
};
const vec_len: u32 = @intCast(try sema.usizeCast(block, pred_src, vec_len_u64));
@@ -25011,13 +25011,13 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
switch (elem_ty.zigTypeTag(zcu)) {
- .Enum => if (op != .Xchg) {
+ .@"enum" => if (op != .Xchg) {
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
},
- .Bool => if (op != .Xchg) {
+ .bool => if (op != .Xchg) {
return sema.fail(block, op_src, "@atomicRmw with bool only allowed with .Xchg", .{});
},
- .Float => switch (op) {
+ .float => switch (op) {
.Xchg, .Add, .Sub, .Max, .Min => {},
else => return sema.fail(block, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, .Sub, .Max, and .Min", .{}),
},
@@ -25135,7 +25135,7 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const zcu = pt.zcu;
switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
- .ComptimeFloat, .Float => {},
+ .comptime_float, .float => {},
else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(pt)}),
}
@@ -25281,7 +25281,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
const ip = &zcu.intern_pool;
const extra = sema.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
assert(!flags.ptr_cast);
const inst_src = block.nodeOffset(extra.src_node);
@@ -25296,7 +25296,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
}
const parent_ty = Type.fromInterned(parent_ptr_info.child);
switch (parent_ty.zigTypeTag(zcu)) {
- .Struct, .Union => {},
+ .@"struct", .@"union" => {},
else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}),
}
try parent_ty.resolveLayout(pt);
@@ -25305,7 +25305,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
.needed_comptime_reason = "field name must be comptime-known",
});
const field_index = switch (parent_ty.zigTypeTag(zcu)) {
- .Struct => blk: {
+ .@"struct" => blk: {
if (parent_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
return sema.fail(block, inst_src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
@@ -25315,10 +25315,10 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
break :blk try sema.structFieldIndex(block, parent_ty, field_name, field_name_src);
}
},
- .Union => try sema.unionFieldIndex(block, parent_ty, field_name, field_name_src),
+ .@"union" => try sema.unionFieldIndex(block, parent_ty, field_name, field_name_src),
else => unreachable,
};
- if (parent_ty.zigTypeTag(zcu) == .Struct and parent_ty.structFieldIsComptime(field_index, zcu)) {
+ if (parent_ty.zigTypeTag(zcu) == .@"struct" and parent_ty.structFieldIsComptime(field_index, zcu)) {
return sema.fail(block, field_name_src, "cannot get @fieldParentPtr of a comptime field", .{});
}
@@ -25400,7 +25400,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: {
switch (parent_ty.zigTypeTag(zcu)) {
- .Struct => switch (parent_ty.containerLayout(zcu)) {
+ .@"struct" => switch (parent_ty.containerLayout(zcu)) {
.auto => {},
.@"extern" => {
const byte_offset = parent_ty.structFieldOffset(field_index, zcu);
@@ -25417,7 +25417,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
break :result Air.internedToRef(parent_ptr_val.toIntern());
},
},
- .Union => switch (parent_ty.containerLayout(zcu)) {
+ .@"union" => switch (parent_ty.containerLayout(zcu)) {
.auto => {},
.@"extern", .@"packed" => {
// For an extern or packed union, just coerce the pointer.
@@ -25914,7 +25914,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
// lowering. The AIR instruction requires pointers with element types of
// equal ABI size.
- if (dest_ty.zigTypeTag(zcu) != .Pointer or src_ty.zigTypeTag(zcu) != .Pointer) {
+ if (dest_ty.zigTypeTag(zcu) != .pointer or src_ty.zigTypeTag(zcu) != .pointer) {
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
}
@@ -26034,7 +26034,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
switch (ptr_info.flags.size) {
.Slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
.One => {
- if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .Array) {
+ if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) {
break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(zcu);
}
},
@@ -26533,7 +26533,7 @@ fn zirCDefine(
.needed_comptime_reason = "name of macro being undefined must be comptime-known",
});
const rhs = try sema.resolveInst(extra.rhs);
- if (sema.typeOf(rhs).zigTypeTag(zcu) != .Void) {
+ if (sema.typeOf(rhs).zigTypeTag(zcu) != .void) {
const value = try sema.resolveConstString(block, val_src, extra.rhs, .{
.needed_comptime_reason = "value of macro being undefined must be comptime-known",
});
@@ -26951,7 +26951,7 @@ fn validateVarType(
return sema.failWithOwnedErrorMsg(block, msg);
}
} else {
- if (var_ty.zigTypeTag(zcu) == .Opaque) {
+ if (var_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(
block,
src,
@@ -26968,7 +26968,7 @@ fn validateVarType(
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, src, var_ty);
- if (var_ty.zigTypeTag(zcu) == .ComptimeInt or var_ty.zigTypeTag(zcu) == .ComptimeFloat) {
+ if (var_ty.zigTypeTag(zcu) == .comptime_int or var_ty.zigTypeTag(zcu) == .comptime_float) {
try sema.errNote(src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
}
@@ -27003,42 +27003,42 @@ fn explainWhyTypeIsComptimeInner(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Bool,
- .Int,
- .Float,
- .ErrorSet,
- .Enum,
- .Frame,
- .AnyFrame,
- .Void,
+ .bool,
+ .int,
+ .float,
+ .error_set,
+ .@"enum",
+ .frame,
+ .@"anyframe",
+ .void,
=> return,
- .Fn => {
+ .@"fn" => {
try sema.errNote(src_loc, msg, "use '*const {}' for a function pointer type", .{ty.fmt(pt)});
},
- .Type => {
+ .type => {
try sema.errNote(src_loc, msg, "types are not available at runtime", .{});
},
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .NoReturn,
- .Undefined,
- .Null,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .noreturn,
+ .undefined,
+ .null,
=> return,
- .Opaque => {
+ .@"opaque" => {
try sema.errNote(src_loc, msg, "opaque type '{}' has undefined size", .{ty.fmt(pt)});
},
- .Array, .Vector => {
+ .array, .vector => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(zcu), type_set);
},
- .Pointer => {
+ .pointer => {
const elem_ty = ty.elemType2(zcu);
- if (elem_ty.zigTypeTag(zcu) == .Fn) {
+ if (elem_ty.zigTypeTag(zcu) == .@"fn") {
const fn_info = zcu.typeToFunc(elem_ty).?;
if (fn_info.is_generic) {
try sema.errNote(src_loc, msg, "function is generic", .{});
@@ -27055,14 +27055,14 @@ fn explainWhyTypeIsComptimeInner(
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(zcu), type_set);
},
- .Optional => {
+ .optional => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(zcu), type_set);
},
- .ErrorUnion => {
+ .error_union => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(zcu), type_set);
},
- .Struct => {
+ .@"struct" => {
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (zcu.typeToStruct(ty)) |struct_type| {
@@ -27082,7 +27082,7 @@ fn explainWhyTypeIsComptimeInner(
// TODO tuples
},
- .Union => {
+ .@"union" => {
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (zcu.typeToUnion(ty)) |union_obj| {
@@ -27123,34 +27123,34 @@ fn validateExternType(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .Undefined,
- .Null,
- .ErrorUnion,
- .ErrorSet,
- .Frame,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .undefined,
+ .null,
+ .error_union,
+ .error_set,
+ .frame,
=> return false,
- .Void => return position == .union_field or position == .ret_ty or position == .struct_field or position == .element,
- .NoReturn => return position == .ret_ty,
- .Opaque,
- .Bool,
- .Float,
- .AnyFrame,
+ .void => return position == .union_field or position == .ret_ty or position == .struct_field or position == .element,
+ .noreturn => return position == .ret_ty,
+ .@"opaque",
+ .bool,
+ .float,
+ .@"anyframe",
=> return true,
- .Pointer => {
- if (ty.childType(zcu).zigTypeTag(zcu) == .Fn) {
+ .pointer => {
+ if (ty.childType(zcu).zigTypeTag(zcu) == .@"fn") {
return ty.isConstPtr(zcu) and try sema.validateExternType(ty.childType(zcu), .other);
}
return !(ty.isSlice(zcu) or try ty.comptimeOnlySema(pt));
},
- .Int => switch (ty.intInfo(zcu).bits) {
+ .int => switch (ty.intInfo(zcu).bits) {
0, 8, 16, 32, 64, 128 => return true,
else => return false,
},
- .Fn => {
+ .@"fn" => {
if (position != .other) return false;
const target = zcu.getTarget();
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
@@ -27160,10 +27160,10 @@ fn validateExternType(
}
return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(zcu));
},
- .Enum => {
+ .@"enum" => {
return sema.validateExternType(ty.intTagType(zcu), position);
},
- .Struct, .Union => switch (ty.containerLayout(zcu)) {
+ .@"struct", .@"union" => switch (ty.containerLayout(zcu)) {
.@"extern" => return true,
.@"packed" => {
const bit_size = try ty.bitSizeSema(pt);
@@ -27174,12 +27174,12 @@ fn validateExternType(
},
.auto => return !(try ty.hasRuntimeBitsSema(pt)),
},
- .Array => {
+ .array => {
if (position == .ret_ty or position == .param_ty) return false;
return sema.validateExternType(ty.elemType2(zcu), .element);
},
- .Vector => return sema.validateExternType(ty.elemType2(zcu), .element),
- .Optional => return ty.isPtrLikeOptional(zcu),
+ .vector => return sema.validateExternType(ty.elemType2(zcu), .element),
+ .optional => return ty.isPtrLikeOptional(zcu),
}
}
@@ -27193,29 +27193,29 @@ fn explainWhyTypeIsNotExtern(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Opaque,
- .Bool,
- .Float,
- .AnyFrame,
+ .@"opaque",
+ .bool,
+ .float,
+ .@"anyframe",
=> return,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .Undefined,
- .Null,
- .ErrorUnion,
- .ErrorSet,
- .Frame,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .undefined,
+ .null,
+ .error_union,
+ .error_set,
+ .frame,
=> return,
- .Pointer => {
+ .pointer => {
if (ty.isSlice(zcu)) {
try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
} else {
const pointee_ty = ty.childType(zcu);
- if (!ty.isConstPtr(zcu) and pointee_ty.zigTypeTag(zcu) == .Fn) {
+ if (!ty.isConstPtr(zcu) and pointee_ty.zigTypeTag(zcu) == .@"fn") {
try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{});
} else if (try ty.comptimeOnlySema(pt)) {
try sema.errNote(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(pt)});
@@ -27224,14 +27224,14 @@ fn explainWhyTypeIsNotExtern(
try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other);
}
},
- .Void => try sema.errNote(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
- .NoReturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
- .Int => if (!std.math.isPowerOfTwo(ty.intInfo(zcu).bits)) {
+ .void => try sema.errNote(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
+ .noreturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
+ .int => if (!std.math.isPowerOfTwo(ty.intInfo(zcu).bits)) {
try sema.errNote(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{});
} else {
try sema.errNote(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{});
},
- .Fn => {
+ .@"fn" => {
if (position != .other) {
try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{});
try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{});
@@ -27244,14 +27244,14 @@ fn explainWhyTypeIsNotExtern(
else => return,
}
},
- .Enum => {
+ .@"enum" => {
const tag_ty = ty.intTagType(zcu);
try sema.errNote(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(pt)});
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
},
- .Struct => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
- .Union => try sema.errNote(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}),
- .Array => {
+ .@"struct" => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}),
+ .@"union" => try sema.errNote(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}),
+ .array => {
if (position == .ret_ty) {
return sema.errNote(src_loc, msg, "arrays are not allowed as a return type", .{});
} else if (position == .param_ty) {
@@ -27259,8 +27259,8 @@ fn explainWhyTypeIsNotExtern(
}
try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(zcu), .element);
},
- .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(zcu), .element),
- .Optional => try sema.errNote(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
+ .vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(zcu), .element),
+ .optional => try sema.errNote(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
}
}
@@ -27270,34 +27270,34 @@ fn validatePackedType(sema: *Sema, ty: Type) !bool {
const pt = sema.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .Undefined,
- .Null,
- .ErrorUnion,
- .ErrorSet,
- .Frame,
- .NoReturn,
- .Opaque,
- .AnyFrame,
- .Fn,
- .Array,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .undefined,
+ .null,
+ .error_union,
+ .error_set,
+ .frame,
+ .noreturn,
+ .@"opaque",
+ .@"anyframe",
+ .@"fn",
+ .array,
=> false,
- .Optional => return ty.isPtrLikeOptional(zcu),
- .Void,
- .Bool,
- .Float,
- .Int,
- .Vector,
+ .optional => return ty.isPtrLikeOptional(zcu),
+ .void,
+ .bool,
+ .float,
+ .int,
+ .vector,
=> true,
- .Enum => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).tag_mode) {
+ .@"enum" => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
},
- .Pointer => !ty.isSlice(zcu) and !try ty.comptimeOnlySema(pt),
- .Struct, .Union => ty.containerLayout(zcu) == .@"packed",
+ .pointer => !ty.isSlice(zcu) and !try ty.comptimeOnlySema(pt),
+ .@"struct", .@"union" => ty.containerLayout(zcu) == .@"packed",
};
}
@@ -27310,40 +27310,40 @@ fn explainWhyTypeIsNotPacked(
const pt = sema.pt;
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Void,
- .Bool,
- .Float,
- .Int,
- .Vector,
- .Enum,
+ .void,
+ .bool,
+ .float,
+ .int,
+ .vector,
+ .@"enum",
=> return,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .EnumLiteral,
- .Undefined,
- .Null,
- .Frame,
- .NoReturn,
- .Opaque,
- .ErrorUnion,
- .ErrorSet,
- .AnyFrame,
- .Optional,
- .Array,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .enum_literal,
+ .undefined,
+ .null,
+ .frame,
+ .noreturn,
+ .@"opaque",
+ .error_union,
+ .error_set,
+ .@"anyframe",
+ .optional,
+ .array,
=> try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}),
- .Pointer => if (ty.isSlice(zcu)) {
+ .pointer => if (ty.isSlice(zcu)) {
try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
} else {
try sema.errNote(src_loc, msg, "comptime-only pointer has no guaranteed in-memory representation", .{});
try sema.explainWhyTypeIsComptime(msg, src_loc, ty);
},
- .Fn => {
+ .@"fn" => {
try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{});
try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{});
},
- .Struct => try sema.errNote(src_loc, msg, "only packed structs layout are allowed in packed types", .{}),
- .Union => try sema.errNote(src_loc, msg, "only packed unions layout are allowed in packed types", .{}),
+ .@"struct" => try sema.errNote(src_loc, msg, "only packed structs layout are allowed in packed types", .{}),
+ .@"union" => try sema.errNote(src_loc, msg, "only packed unions layout are allowed in packed types", .{}),
}
}
@@ -27356,7 +27356,7 @@ fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
const fn_val = try sema.resolveConstValue(block, src, fn_ref, .{
.needed_comptime_reason = "panic handler must be comptime-known",
});
- assert(fn_val.typeOf(zcu).zigTypeTag(zcu) == .Fn);
+ assert(fn_val.typeOf(zcu).zigTypeTag(zcu) == .@"fn");
assert(try fn_val.typeOf(zcu).fnHasRuntimeBitsSema(pt));
try zcu.ensureFuncBodyAnalysisQueued(fn_val.toIntern());
zcu.panic_func_index = fn_val.toIntern();
@@ -27444,9 +27444,9 @@ fn addSafetyCheckExtra(
try parent_block.instructions.ensureUnusedCapacity(gpa, 1);
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
1 + // The main block only needs space for the cond_br.
- @typeInfo(Air.CondBr).Struct.fields.len +
+ @typeInfo(Air.CondBr).@"struct".fields.len +
1 + // The ok branch of the cond_br only needs space for the br.
fail_block.instructions.items.len);
@@ -27614,7 +27614,7 @@ fn panicSentinelMismatch(
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
- const ok = if (sentinel_ty.zigTypeTag(zcu) == .Vector) ok: {
+ const ok = if (sentinel_ty.zigTypeTag(zcu) == .vector) ok: {
const eql =
try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
@@ -27727,7 +27727,7 @@ fn fieldVal(
object_ty;
switch (inner_ty.zigTypeTag(zcu)) {
- .Array => {
+ .array => {
if (field_name.eqlSlice("len", ip)) {
return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(zcu))).toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
@@ -27756,7 +27756,7 @@ fn fieldVal(
);
}
},
- .Pointer => {
+ .pointer => {
const ptr_info = inner_ty.ptrInfo(zcu);
if (ptr_info.flags.size == .Slice) {
if (field_name.eqlSlice("ptr", ip)) {
@@ -27781,7 +27781,7 @@ fn fieldVal(
}
}
},
- .Type => {
+ .type => {
const dereffed_type = if (is_pointer_to)
try sema.analyzeLoad(block, src, object, object_src)
else
@@ -27791,7 +27791,7 @@ fn fieldVal(
const child_type = val.toType();
switch (try child_type.zigTypeTagOrPoison(zcu)) {
- .ErrorSet => {
+ .error_set => {
switch (ip.indexToKey(child_type.toIntern())) {
.error_set_type => |error_set_type| blk: {
if (error_set_type.nameIndex(ip, field_name) != null) break :blk;
@@ -27818,7 +27818,7 @@ fn fieldVal(
.name = field_name,
} })));
},
- .Union => {
+ .@"union" => {
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
@@ -27831,7 +27831,7 @@ fn fieldVal(
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
- .Enum => {
+ .@"enum" => {
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
@@ -27841,7 +27841,7 @@ fn fieldVal(
const enum_val = try pt.enumValueFieldIndex(child_type, field_index);
return Air.internedToRef(enum_val.toIntern());
},
- .Struct, .Opaque => {
+ .@"struct", .@"opaque" => {
switch (child_type.toIntern()) {
.empty_struct_type, .anyopaque_type => {}, // no namespace
else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
@@ -27854,19 +27854,19 @@ fn fieldVal(
const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)});
errdefer msg.destroy(sema.gpa);
if (child_type.isSlice(zcu)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
- if (child_type.zigTypeTag(zcu) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{});
+ if (child_type.zigTypeTag(zcu) == .array) try sema.errNote(src, msg, "array values have 'len' member", .{});
break :msg msg;
}),
}
},
- .Struct => if (is_pointer_to) {
+ .@"struct" => if (is_pointer_to) {
// Avoid loading the entire struct by fetching a pointer and loading that
const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
return sema.analyzeLoad(block, src, field_ptr, object_src);
} else {
return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
},
- .Union => if (is_pointer_to) {
+ .@"union" => if (is_pointer_to) {
// Avoid loading the entire union by fetching a pointer and loading that
const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty, false);
return sema.analyzeLoad(block, src, field_ptr, object_src);
@@ -27896,7 +27896,7 @@ fn fieldPtr(
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
const object_ty = switch (object_ptr_ty.zigTypeTag(zcu)) {
- .Pointer => object_ptr_ty.childType(zcu),
+ .pointer => object_ptr_ty.childType(zcu),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(pt)}),
};
@@ -27911,7 +27911,7 @@ fn fieldPtr(
object_ty;
switch (inner_ty.zigTypeTag(zcu)) {
- .Array => {
+ .array => {
if (field_name.eqlSlice("len", ip)) {
const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(zcu));
return uavRef(sema, int_val.toIntern());
@@ -27955,7 +27955,7 @@ fn fieldPtr(
);
}
},
- .Pointer => if (inner_ty.isSlice(zcu)) {
+ .pointer => if (inner_ty.isSlice(zcu)) {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
@@ -28010,7 +28010,7 @@ fn fieldPtr(
);
}
},
- .Type => {
+ .type => {
_ = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, object_ptr, undefined);
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
const inner = if (is_pointer_to)
@@ -28022,7 +28022,7 @@ fn fieldPtr(
const child_type = val.toType();
switch (child_type.zigTypeTag(zcu)) {
- .ErrorSet => {
+ .error_set => {
switch (ip.indexToKey(child_type.toIntern())) {
.error_set_type => |error_set_type| blk: {
if (error_set_type.nameIndex(ip, field_name) != null) {
@@ -28051,7 +28051,7 @@ fn fieldPtr(
.name = field_name,
} }));
},
- .Union => {
+ .@"union" => {
if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
@@ -28065,7 +28065,7 @@ fn fieldPtr(
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
- .Enum => {
+ .@"enum" => {
if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
@@ -28076,7 +28076,7 @@ fn fieldPtr(
const idx_val = try pt.enumValueFieldIndex(child_type, field_index_u32);
return uavRef(sema, idx_val.toIntern());
},
- .Struct, .Opaque => {
+ .@"struct", .@"opaque" => {
if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
@@ -28085,7 +28085,7 @@ fn fieldPtr(
else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(pt)}),
}
},
- .Struct => {
+ .@"struct" => {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
@@ -28094,7 +28094,7 @@ fn fieldPtr(
try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr);
return field_ptr;
},
- .Union => {
+ .@"union" => {
const inner_ptr = if (is_pointer_to)
try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
else
@@ -28134,13 +28134,13 @@ fn fieldCallBind(
const ip = &zcu.intern_pool;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
- const inner_ty = if (raw_ptr_ty.zigTypeTag(zcu) == .Pointer and (raw_ptr_ty.ptrSize(zcu) == .One or raw_ptr_ty.ptrSize(zcu) == .C))
+ const inner_ty = if (raw_ptr_ty.zigTypeTag(zcu) == .pointer and (raw_ptr_ty.ptrSize(zcu) == .One or raw_ptr_ty.ptrSize(zcu) == .C))
raw_ptr_ty.childType(zcu)
else
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(pt)});
// Optionally dereference a second pointer to get the concrete type.
- const is_double_ptr = inner_ty.zigTypeTag(zcu) == .Pointer and inner_ty.ptrSize(zcu) == .One;
+ const is_double_ptr = inner_ty.zigTypeTag(zcu) == .pointer and inner_ty.ptrSize(zcu) == .One;
const concrete_ty = if (is_double_ptr) inner_ty.childType(zcu) else inner_ty;
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
const object_ptr = if (is_double_ptr)
@@ -28150,7 +28150,7 @@ fn fieldCallBind(
find_field: {
switch (concrete_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
try concrete_ty.resolveFields(pt);
if (zcu.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
@@ -28176,14 +28176,14 @@ fn fieldCallBind(
}
}
},
- .Union => {
+ .@"union" => {
try concrete_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(concrete_ty).?;
_ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field;
const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) };
},
- .Type => {
+ .type => {
const namespace = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .direct = try sema.fieldVal(block, src, namespace, field_name, field_name_src) };
},
@@ -28205,7 +28205,7 @@ fn fieldCallBind(
const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]);
if (first_param_type.isGenericPoison() or
- (first_param_type.zigTypeTag(zcu) == .Pointer and
+ (first_param_type.zigTypeTag(zcu) == .pointer and
(first_param_type.ptrSize(zcu) == .One or
first_param_type.ptrSize(zcu) == .C) and
first_param_type.childType(zcu).eql(concrete_ty, zcu)))
@@ -28225,7 +28225,7 @@ fn fieldCallBind(
.func_inst = decl_val,
.arg0_inst = deref,
} };
- } else if (first_param_type.zigTypeTag(zcu) == .Optional) {
+ } else if (first_param_type.zigTypeTag(zcu) == .optional) {
const child = first_param_type.optionalChild(zcu);
if (child.eql(concrete_ty, zcu)) {
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
@@ -28233,7 +28233,7 @@ fn fieldCallBind(
.func_inst = decl_val,
.arg0_inst = deref,
} };
- } else if (child.zigTypeTag(zcu) == .Pointer and
+ } else if (child.zigTypeTag(zcu) == .pointer and
child.ptrSize(zcu) == .One and
child.childType(zcu).eql(concrete_ty, zcu))
{
@@ -28242,7 +28242,7 @@ fn fieldCallBind(
.arg0_inst = object_ptr,
} };
}
- } else if (first_param_type.zigTypeTag(zcu) == .ErrorUnion and
+ } else if (first_param_type.zigTypeTag(zcu) == .error_union and
first_param_type.errorUnionPayload(zcu).eql(concrete_ty, zcu))
{
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
@@ -28270,7 +28270,7 @@ fn fieldCallBind(
.{field_name.fmt(ip)},
);
}
- if (concrete_ty.zigTypeTag(zcu) == .ErrorUnion) {
+ if (concrete_ty.zigTypeTag(zcu) == .error_union) {
try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
if (is_double_ptr) {
@@ -28302,7 +28302,7 @@ fn finishFieldCallBind(
});
const container_ty = ptr_ty.childType(zcu);
- if (container_ty.zigTypeTag(zcu) == .Struct) {
+ if (container_ty.zigTypeTag(zcu) == .@"struct") {
if (container_ty.structFieldIsComptime(field_index, zcu)) {
try container_ty.resolveStructFieldInits(pt);
const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?;
@@ -28382,7 +28382,7 @@ fn structFieldPtr(
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- assert(struct_ty.zigTypeTag(zcu) == .Struct);
+ assert(struct_ty.zigTypeTag(zcu) == .@"struct");
try struct_ty.resolveFields(pt);
try struct_ty.resolveLayout(pt);
@@ -28508,7 +28508,7 @@ fn structFieldVal(
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- assert(struct_ty.zigTypeTag(zcu) == .Struct);
+ assert(struct_ty.zigTypeTag(zcu) == .@"struct");
try struct_ty.resolveFields(pt);
@@ -28646,7 +28646,7 @@ fn unionFieldPtr(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- assert(union_ty.zigTypeTag(zcu) == .Union);
+ assert(union_ty.zigTypeTag(zcu) == .@"union");
const union_ptr_ty = sema.typeOf(union_ptr);
const union_ptr_info = union_ptr_ty.ptrInfo(zcu);
@@ -28673,7 +28673,7 @@ fn unionFieldPtr(
});
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
- if (initializing and field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (initializing and field_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
@@ -28736,7 +28736,7 @@ fn unionFieldPtr(
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
}
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
_ = try block.addNoOp(.unreach);
return .unreachable_value;
}
@@ -28755,7 +28755,7 @@ fn unionFieldVal(
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- assert(union_ty.zigTypeTag(zcu) == .Union);
+ assert(union_ty.zigTypeTag(zcu) == .@"union");
try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
@@ -28811,7 +28811,7 @@ fn unionFieldVal(
const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
}
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
_ = try block.addNoOp(.unreach);
return .unreachable_value;
}
@@ -28835,14 +28835,14 @@ fn elemPtr(
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(zcu)) {
- .Pointer => indexable_ptr_ty.childType(zcu),
+ .pointer => indexable_ptr_ty.childType(zcu),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}),
};
try checkIndexable(sema, block, src, indexable_ty);
const elem_ptr = switch (indexable_ty.zigTypeTag(zcu)) {
- .Array, .Vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
- .Struct => blk: {
+ .array, .vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
+ .@"struct" => blk: {
// Tuple field access.
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
@@ -28898,8 +28898,8 @@ fn elemPtrOneLayerOnly(
.One => {
const child_ty = indexable_ty.childType(zcu);
const elem_ptr = switch (child_ty.zigTypeTag(zcu)) {
- .Array, .Vector => try sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety),
- .Struct => blk: {
+ .array, .vector => try sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety),
+ .@"struct" => blk: {
assert(child_ty.isTuple(zcu));
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
@@ -28936,7 +28936,7 @@ fn elemVal(
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
switch (indexable_ty.zigTypeTag(zcu)) {
- .Pointer => switch (indexable_ty.ptrSize(zcu)) {
+ .pointer => switch (indexable_ty.ptrSize(zcu)) {
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
@@ -28963,7 +28963,7 @@ fn elemVal(
.One => {
arr_sent: {
const inner_ty = indexable_ty.childType(zcu);
- if (inner_ty.zigTypeTag(zcu) != .Array) break :arr_sent;
+ if (inner_ty.zigTypeTag(zcu) != .array) break :arr_sent;
const sentinel = inner_ty.sentinel(zcu) orelse break :arr_sent;
const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent;
const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(pt));
@@ -28974,12 +28974,12 @@ fn elemVal(
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
},
- .Array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
- .Vector => {
+ .array => return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
+ .vector => {
// TODO: If the index is a vector, the result should be a vector.
return sema.elemValArray(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety);
},
- .Struct => {
+ .@"struct" => {
// Tuple field access.
const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{
.needed_comptime_reason = "tuple field access index must be comptime-known",
@@ -29448,7 +29448,7 @@ fn coerceExtra(
}
switch (dest_ty.zigTypeTag(zcu)) {
- .Optional => optional: {
+ .optional => optional: {
if (maybe_inst_val) |val| {
// undefined sets the optional bit also to undefined.
if (val.toIntern() == .undef) {
@@ -29472,7 +29472,7 @@ fn coerceExtra(
anyopaque_check: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
const elem_ty = inst_ty.elemType2(zcu);
- if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) {
+ if (elem_ty.zigTypeTag(zcu) == .pointer or elem_ty.isPtrLikeOptional(zcu)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -29499,11 +29499,11 @@ fn coerceExtra(
};
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
- .Pointer => pointer: {
+ .pointer => pointer: {
const dest_info = dest_ty.ptrInfo(zcu);
// Function body to function pointer.
- if (inst_ty.zigTypeTag(zcu) == .Fn) {
+ if (inst_ty.zigTypeTag(zcu) == .@"fn") {
const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined);
const fn_nav = switch (zcu.intern_pool.indexToKey(fn_val.toIntern())) {
.func => |f| f.owner_nav,
@@ -29521,7 +29521,7 @@ fn coerceExtra(
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(zcu);
const array_ty = Type.fromInterned(dest_info.child);
- if (array_ty.zigTypeTag(zcu) != .Array) break :single_item;
+ if (array_ty.zigTypeTag(zcu) != .array) break :single_item;
const array_elem_ty = array_ty.childType(zcu);
if (array_ty.arrayLen(zcu) != 1) break :single_item;
const dest_is_mut = !dest_info.flags.is_const;
@@ -29537,7 +29537,7 @@ fn coerceExtra(
if (!inst_ty.isSinglePointer(zcu)) break :src_array_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const array_ty = inst_ty.childType(zcu);
- if (array_ty.zigTypeTag(zcu) != .Array) break :src_array_ptr;
+ if (array_ty.zigTypeTag(zcu) != .array) break :src_array_ptr;
const array_elem_type = array_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
@@ -29612,10 +29612,10 @@ fn coerceExtra(
// cast from *T and [*]T to *anyopaque
// but don't do it if the source type is a double pointer
- if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(zcu) == .Pointer) to_anyopaque: {
+ if (dest_info.child == .anyopaque_type and inst_ty.zigTypeTag(zcu) == .pointer) to_anyopaque: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const elem_ty = inst_ty.elemType2(zcu);
- if (elem_ty.zigTypeTag(zcu) == .Pointer or elem_ty.isPtrLikeOptional(zcu)) {
+ if (elem_ty.zigTypeTag(zcu) == .pointer or elem_ty.isPtrLikeOptional(zcu)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -29636,19 +29636,19 @@ fn coerceExtra(
switch (dest_info.flags.size) {
// coercion to C pointer
.C => switch (inst_ty.zigTypeTag(zcu)) {
- .Null => return Air.internedToRef(try pt.intern(.{ .ptr = .{
+ .null => return Air.internedToRef(try pt.intern(.{ .ptr = .{
.ty = dest_ty.toIntern(),
.base_addr = .int,
.byte_offset = 0,
} })),
- .ComptimeInt => {
+ .comptime_int => {
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
- .Int => {
+ .int => {
const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) {
.signed => Type.isize,
.unsigned => Type.usize,
@@ -29663,7 +29663,7 @@ fn coerceExtra(
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
- .Pointer => p: {
+ .pointer => p: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
const inst_info = inst_ty.ptrInfo(zcu);
switch (try sema.coerceInMemoryAllowed(
@@ -29693,7 +29693,7 @@ fn coerceExtra(
else => {},
},
.One => switch (Type.fromInterned(dest_info.child).zigTypeTag(zcu)) {
- .Union => {
+ .@"union" => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isAnonStruct(zcu) and
@@ -29702,7 +29702,7 @@ fn coerceExtra(
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
- .Struct => {
+ .@"struct" => {
// pointer to anonymous struct to pointer to struct
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isAnonStruct(zcu) and
@@ -29714,7 +29714,7 @@ fn coerceExtra(
};
}
},
- .Array => {
+ .array => {
// pointer to tuple to pointer to array
if (inst_ty.isSinglePointer(zcu) and
inst_ty.childType(zcu).isTuple(zcu) and
@@ -29726,7 +29726,7 @@ fn coerceExtra(
else => {},
},
.Slice => to_slice: {
- if (inst_ty.zigTypeTag(zcu) == .Array) {
+ if (inst_ty.zigTypeTag(zcu) == .array) {
return sema.fail(
block,
inst_src,
@@ -29795,10 +29795,10 @@ fn coerceExtra(
},
}
},
- .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(zcu)) {
- .Float, .ComptimeFloat => float: {
+ .int, .comptime_int => switch (inst_ty.zigTypeTag(zcu)) {
+ .float, .comptime_float => float: {
const val = maybe_inst_val orelse {
- if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (dest_ty.zigTypeTag(zcu) == .comptime_int) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_int' must be comptime-known",
@@ -29809,7 +29809,7 @@ fn coerceExtra(
const result_val = try sema.intFromFloat(block, inst_src, val, inst_ty, dest_ty, .exact);
return Air.internedToRef(result_val.toIntern());
},
- .Int, .ComptimeInt => {
+ .int, .comptime_int => {
if (maybe_inst_val) |val| {
// comptime-known integer to other number
if (!(try sema.intFitsInType(val, dest_ty, null))) {
@@ -29824,7 +29824,7 @@ fn coerceExtra(
else => unreachable,
};
}
- if (dest_ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (dest_ty.zigTypeTag(zcu) == .comptime_int) {
if (!opts.report_err) return error.NotCoercible;
if (opts.no_cast_to_comptime_int) return inst;
return sema.failWithNeededComptime(block, inst_src, .{
@@ -29845,13 +29845,13 @@ fn coerceExtra(
},
else => {},
},
- .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(zcu)) {
- .ComptimeFloat => {
+ .float, .comptime_float => switch (inst_ty.zigTypeTag(zcu)) {
+ .comptime_float => {
const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined);
const result_val = try val.floatCast(dest_ty, pt);
return Air.internedToRef(result_val.toIntern());
},
- .Float => {
+ .float => {
if (maybe_inst_val) |val| {
const result_val = try val.floatCast(dest_ty, pt);
if (!val.eql(try result_val.floatCast(inst_ty, pt), inst_ty, zcu)) {
@@ -29863,7 +29863,7 @@ fn coerceExtra(
);
}
return Air.internedToRef(result_val.toIntern());
- } else if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) {
+ } else if (dest_ty.zigTypeTag(zcu) == .comptime_float) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
@@ -29878,9 +29878,9 @@ fn coerceExtra(
return block.addTyOp(.fpext, dest_ty, inst);
}
},
- .Int, .ComptimeInt => int: {
+ .int, .comptime_int => int: {
const val = maybe_inst_val orelse {
- if (dest_ty.zigTypeTag(zcu) == .ComptimeFloat) {
+ if (dest_ty.zigTypeTag(zcu) == .comptime_float) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, .{
.needed_comptime_reason = "value being casted to 'comptime_float' must be comptime-known",
@@ -29903,8 +29903,8 @@ fn coerceExtra(
},
else => {},
},
- .Enum => switch (inst_ty.zigTypeTag(zcu)) {
- .EnumLiteral => {
+ .@"enum" => switch (inst_ty.zigTypeTag(zcu)) {
+ .enum_literal => {
// enum literal to enum
const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined);
const string = zcu.intern_pool.indexToKey(val.toIntern()).enum_literal;
@@ -29915,7 +29915,7 @@ fn coerceExtra(
};
return Air.internedToRef((try pt.enumValueFieldIndex(dest_ty, @intCast(field_index))).toIntern());
},
- .Union => blk: {
+ .@"union" => blk: {
// union to its own tag type
const union_tag_ty = inst_ty.unionTagType(zcu) orelse break :blk;
if (union_tag_ty.eql(dest_ty, zcu)) {
@@ -29924,8 +29924,8 @@ fn coerceExtra(
},
else => {},
},
- .ErrorUnion => switch (inst_ty.zigTypeTag(zcu)) {
- .ErrorUnion => eu: {
+ .error_union => switch (inst_ty.zigTypeTag(zcu)) {
+ .error_union => eu: {
if (maybe_inst_val) |inst_val| {
switch (inst_val.toIntern()) {
.undef => return pt.undefRef(dest_ty),
@@ -29952,7 +29952,7 @@ fn coerceExtra(
}
}
},
- .ErrorSet => {
+ .error_set => {
// E to E!T
return sema.wrapErrorUnionSet(block, dest_ty, inst, inst_src);
},
@@ -29971,17 +29971,17 @@ fn coerceExtra(
};
},
},
- .Union => switch (inst_ty.zigTypeTag(zcu)) {
- .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
- .Struct => {
+ .@"union" => switch (inst_ty.zigTypeTag(zcu)) {
+ .@"enum", .enum_literal => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
+ .@"struct" => {
if (inst_ty.isAnonStruct(zcu)) {
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
- .Array => switch (inst_ty.zigTypeTag(zcu)) {
- .Array => array_to_array: {
+ .array => switch (inst_ty.zigTypeTag(zcu)) {
+ .array => array_to_array: {
// Array coercions are allowed only if the child is IMC and the sentinel is unchanged or removed.
if (.ok != try sema.coerceInMemoryAllowed(
block,
@@ -30005,8 +30005,8 @@ fn coerceExtra(
return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src);
},
- .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
- .Struct => {
+ .vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
+ .@"struct" => {
if (inst == .empty_struct) {
return sema.arrayInitEmpty(block, inst_src, dest_ty);
}
@@ -30016,16 +30016,16 @@ fn coerceExtra(
},
else => {},
},
- .Vector => switch (inst_ty.zigTypeTag(zcu)) {
- .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
- .Struct => {
+ .vector => switch (inst_ty.zigTypeTag(zcu)) {
+ .array, .vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
+ .@"struct" => {
if (inst_ty.isTuple(zcu)) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
- .Struct => blk: {
+ .@"struct" => blk: {
if (inst == .empty_struct) {
return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src);
}
@@ -30046,7 +30046,7 @@ fn coerceExtra(
if (!opts.report_err) return error.NotCoercible;
- if (opts.is_ret and dest_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (opts.is_ret and dest_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "function declared 'noreturn' returns", .{});
errdefer msg.destroy(sema.gpa);
@@ -30066,7 +30066,7 @@ fn coerceExtra(
errdefer msg.destroy(sema.gpa);
// E!T to T
- if (inst_ty.zigTypeTag(zcu) == .ErrorUnion and
+ if (inst_ty.zigTypeTag(zcu) == .error_union and
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(zcu), dest_ty, false, target, dest_ty_src, inst_src, maybe_inst_val)) == .ok)
{
try sema.errNote(inst_src, msg, "cannot convert error union to payload type", .{});
@@ -30074,7 +30074,7 @@ fn coerceExtra(
}
// ?T to T
- if (inst_ty.zigTypeTag(zcu) == .Optional and
+ if (inst_ty.zigTypeTag(zcu) == .optional and
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(zcu), dest_ty, false, target, dest_ty_src, inst_src, maybe_inst_val)) == .ok)
{
try sema.errNote(inst_src, msg, "cannot convert optional to payload type", .{});
@@ -30515,7 +30515,7 @@ pub fn coerceInMemoryAllowed(
const src_tag = src_ty.zigTypeTag(zcu);
// Differently-named integers with the same number of bits.
- if (dest_tag == .Int and src_tag == .Int) {
+ if (dest_tag == .int and src_tag == .int) {
const dest_info = dest_ty.intInfo(zcu);
const src_info = src_ty.intInfo(zcu);
@@ -30540,7 +30540,7 @@ pub fn coerceInMemoryAllowed(
}
// Comptime int to regular int.
- if (dest_tag == .Int and src_tag == .ComptimeInt) {
+ if (dest_tag == .int and src_tag == .comptime_int) {
if (src_val) |val| {
if (!(try sema.intFitsInType(val, dest_ty, null))) {
return .{ .comptime_int_not_coercible = .{ .wanted = dest_ty, .actual = val } };
@@ -30549,7 +30549,7 @@ pub fn coerceInMemoryAllowed(
}
// Differently-named floats with the same number of bits.
- if (dest_tag == .Float and src_tag == .Float) {
+ if (dest_tag == .float and src_tag == .float) {
const dest_bits = dest_ty.floatBits(target);
const src_bits = src_ty.floatBits(target);
if (dest_bits == src_bits) {
@@ -30572,12 +30572,12 @@ pub fn coerceInMemoryAllowed(
}
// Functions
- if (dest_tag == .Fn and src_tag == .Fn) {
+ if (dest_tag == .@"fn" and src_tag == .@"fn") {
return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src);
}
// Error Unions
- if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) {
+ if (dest_tag == .error_union and src_tag == .error_union) {
const dest_payload = dest_ty.errorUnionPayload(zcu);
const src_payload = src_ty.errorUnionPayload(zcu);
const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src, null);
@@ -30592,12 +30592,12 @@ pub fn coerceInMemoryAllowed(
}
// Error Sets
- if (dest_tag == .ErrorSet and src_tag == .ErrorSet) {
+ if (dest_tag == .error_set and src_tag == .error_set) {
return try sema.coerceInMemoryAllowedErrorSets(block, dest_ty, src_ty, dest_src, src_src);
}
// Arrays
- if (dest_tag == .Array and src_tag == .Array) {
+ if (dest_tag == .array and src_tag == .array) {
const dest_info = dest_ty.arrayInfo(zcu);
const src_info = src_ty.arrayInfo(zcu);
if (dest_info.len != src_info.len) {
@@ -30638,7 +30638,7 @@ pub fn coerceInMemoryAllowed(
}
// Vectors
- if (dest_tag == .Vector and src_tag == .Vector) {
+ if (dest_tag == .vector and src_tag == .vector) {
const dest_len = dest_ty.vectorLen(zcu);
const src_len = src_ty.vectorLen(zcu);
if (dest_len != src_len) {
@@ -30663,8 +30663,8 @@ pub fn coerceInMemoryAllowed(
}
// Arrays <-> Vectors
- if ((dest_tag == .Vector and src_tag == .Array) or
- (dest_tag == .Array and src_tag == .Vector))
+ if ((dest_tag == .vector and src_tag == .array) or
+ (dest_tag == .array and src_tag == .vector))
{
const dest_len = dest_ty.arrayLen(zcu);
const src_len = src_ty.arrayLen(zcu);
@@ -30686,7 +30686,7 @@ pub fn coerceInMemoryAllowed(
} };
}
- if (dest_tag == .Array) {
+ if (dest_tag == .array) {
const dest_info = dest_ty.arrayInfo(zcu);
if (dest_info.sentinel != null) {
return InMemoryCoercionResult{ .array_sentinel = .{
@@ -30707,7 +30707,7 @@ pub fn coerceInMemoryAllowed(
}
// Optionals
- if (dest_tag == .Optional and src_tag == .Optional) {
+ if (dest_tag == .optional and src_tag == .optional) {
if ((maybe_dest_ptr_ty != null) != (maybe_src_ptr_ty != null)) {
return InMemoryCoercionResult{ .optional_shape = .{
.actual = src_ty,
@@ -31000,7 +31000,7 @@ fn coerceInMemoryAllowedPtrs(
if (child != .ok) allow: {
// As a special case, we also allow coercing `*[n:s]T` to `*[n]T`, akin to dropping the sentinel from a slice.
// `*[n:s]T` cannot coerce in memory to `*[n]T` since they have different sizes.
- if (src_child.zigTypeTag(zcu) == .Array and dest_child.zigTypeTag(zcu) == .Array and
+ if (src_child.zigTypeTag(zcu) == .array and dest_child.zigTypeTag(zcu) == .array and
src_child.sentinel(zcu) != null and dest_child.sentinel(zcu) == null and
.ok == try sema.coerceInMemoryAllowed(block, dest_child.childType(zcu), src_child.childType(zcu), !dest_info.flags.is_const, target, dest_src, src_src, null))
{
@@ -31095,19 +31095,19 @@ fn coerceVarArgParam(
const uncasted_ty = sema.typeOf(inst);
const coerced = switch (uncasted_ty.zigTypeTag(zcu)) {
// TODO consider casting to c_int/f64 if they fit
- .ComptimeInt, .ComptimeFloat => return sema.fail(
+ .comptime_int, .comptime_float => return sema.fail(
block,
inst_src,
"integer and float literals passed to variadic function must be casted to a fixed-size number type",
.{},
),
- .Fn => fn_ptr: {
+ .@"fn" => fn_ptr: {
const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined);
const fn_nav = zcu.funcInfo(fn_val.toIntern()).owner_nav;
break :fn_ptr try sema.analyzeNavRef(inst_src, fn_nav);
},
- .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
- .Float => float: {
+ .array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
+ .float => float: {
const target = zcu.getTarget();
const double_bits = target.cTypeBitSize(.double);
const inst_bits = uncasted_ty.floatBits(target);
@@ -31202,7 +31202,7 @@ fn storePtr2(
// this code does not handle tuple-to-struct coercion which requires dealing with missing
// fields.
const operand_ty = sema.typeOf(uncasted_operand);
- if (operand_ty.isTuple(zcu) and elem_ty.zigTypeTag(zcu) == .Array) {
+ if (operand_ty.isTuple(zcu) and elem_ty.zigTypeTag(zcu) == .array) {
const field_count = operand_ty.structFieldCount(zcu);
var i: u32 = 0;
while (i < field_count) : (i += 1) {
@@ -31397,16 +31397,16 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const array_ty = sema.typeOf(ptr).childType(zcu);
- if (array_ty.zigTypeTag(zcu) != .Array) return null;
+ if (array_ty.zigTypeTag(zcu) != .array) return null;
var ptr_ref = ptr;
var ptr_inst = ptr_ref.toIndex() orelse return null;
const air_datas = sema.air_instructions.items(.data);
const air_tags = sema.air_instructions.items(.tag);
const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
- if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null;
+ if (!sema.isKnownZigType(ptr_ref, .pointer)) return null;
const child_ty = sema.typeOf(ptr_ref).childType(zcu);
- if (child_ty.zigTypeTag(zcu) == .Vector) break child_ty;
+ if (child_ty.zigTypeTag(zcu) == .vector) break child_ty;
ptr_inst = ptr_ref.toIndex() orelse return null;
} else return null;
@@ -31504,7 +31504,7 @@ fn bitCast(
if (try sema.resolveValue(inst)) |val| {
if (val.isUndef(zcu))
return pt.undefRef(dest_ty);
- if (old_ty.zigTypeTag(zcu) == .ErrorSet and dest_ty.zigTypeTag(zcu) == .ErrorSet) {
+ if (old_ty.zigTypeTag(zcu) == .error_set and dest_ty.zigTypeTag(zcu) == .error_set) {
// Special case: we sometimes call `bitCast` on error set values, but they
// don't have a well-defined layout, so we can't use `bitCastVal` on them.
return Air.internedToRef((try pt.getCoerced(val, dest_ty)).toIntern());
@@ -31548,7 +31548,7 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
const zcu = pt.zcu;
const dest_info = dest_ty.ptrInfo(zcu);
const inst_info = inst_ty.ptrInfo(zcu);
- const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(zcu) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(zcu) == 0 or
+ const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(zcu) == .array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(zcu) == 0 or
(Type.fromInterned(inst_info.child).arrayLen(zcu) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
(Type.fromInterned(inst_info.child).isTuple(zcu) and Type.fromInterned(inst_info.child).structFieldCount(zcu) == 0);
@@ -31615,9 +31615,9 @@ fn coerceCompatiblePtrs(
);
}
try sema.requireRuntimeBlock(block, inst_src, null);
- const inst_allows_zero = inst_ty.zigTypeTag(zcu) != .Pointer or inst_ty.ptrAllowsZero(zcu);
+ const inst_allows_zero = inst_ty.zigTypeTag(zcu) != .pointer or inst_ty.ptrAllowsZero(zcu);
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(zcu) and
- (try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .Fn))
+ (try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .@"fn"))
{
const actual_ptr = if (inst_ty.isSlice(zcu))
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
@@ -31674,7 +31674,7 @@ fn coerceEnumToUnion(
const union_obj = zcu.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFields(pt);
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
@@ -31729,7 +31729,7 @@ fn coerceEnumToUnion(
errdefer if (msg) |some| some.destroy(sema.gpa);
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
- if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .NoReturn) {
+ if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .noreturn) {
const err_msg = msg orelse try sema.errMsg(
inst_src,
"runtime coercion from enum '{}' to union '{}' which has a 'noreturn' field",
@@ -31911,7 +31911,7 @@ fn coerceArrayLike(
if (dest_ty.isVector(zcu) and inst_ty.isVector(zcu) and (try sema.resolveValue(inst)) == null) {
const inst_elem_ty = inst_ty.childType(zcu);
switch (dest_elem_ty.zigTypeTag(zcu)) {
- .Int => if (inst_elem_ty.isInt(zcu)) {
+ .int => if (inst_elem_ty.isInt(zcu)) {
// integer widening
const dst_info = dest_elem_ty.intInfo(zcu);
const src_info = inst_elem_ty.intInfo(zcu);
@@ -31923,7 +31923,7 @@ fn coerceArrayLike(
return block.addTyOp(.intcast, dest_ty, inst);
}
},
- .Float => if (inst_elem_ty.isRuntimeFloat()) {
+ .float => if (inst_elem_ty.isRuntimeFloat()) {
// float widening
const src_bits = inst_elem_ty.floatBits(target);
const dst_bits = dest_elem_ty.floatBits(target);
@@ -32504,10 +32504,10 @@ fn analyzeLoad(
const zcu = pt.zcu;
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag(zcu)) {
- .Pointer => ptr_ty.childType(zcu),
+ .pointer => ptr_ty.childType(zcu),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(pt)}),
};
- if (elem_ty.zigTypeTag(zcu) == .Opaque) {
+ if (elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, ptr_src, "cannot load opaque type '{}'", .{elem_ty.fmt(pt)});
}
@@ -32621,10 +32621,10 @@ fn analyzeIsNull(
const inverted_non_null_res: Air.Inst.Ref = if (invert_logic) .bool_true else .bool_false;
const operand_ty = sema.typeOf(operand);
- if (operand_ty.zigTypeTag(zcu) == .Optional and operand_ty.optionalChild(zcu).zigTypeTag(zcu) == .NoReturn) {
+ if (operand_ty.zigTypeTag(zcu) == .optional and operand_ty.optionalChild(zcu).zigTypeTag(zcu) == .noreturn) {
return inverted_non_null_res;
}
- if (operand_ty.zigTypeTag(zcu) != .Optional and !operand_ty.isPtrLikeOptional(zcu)) {
+ if (operand_ty.zigTypeTag(zcu) != .optional and !operand_ty.isPtrLikeOptional(zcu)) {
return inverted_non_null_res;
}
try sema.requireRuntimeBlock(block, src, null);
@@ -32641,13 +32641,13 @@ fn analyzePtrIsNonErrComptimeOnly(
const pt = sema.pt;
const zcu = pt.zcu;
const ptr_ty = sema.typeOf(operand);
- assert(ptr_ty.zigTypeTag(zcu) == .Pointer);
+ assert(ptr_ty.zigTypeTag(zcu) == .pointer);
const child_ty = ptr_ty.childType(zcu);
const child_tag = child_ty.zigTypeTag(zcu);
- if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return .bool_true;
- if (child_tag == .ErrorSet) return .bool_false;
- assert(child_tag == .ErrorUnion);
+ if (child_tag != .error_set and child_tag != .error_union) return .bool_true;
+ if (child_tag == .error_set) return .bool_false;
+ assert(child_tag == .error_union);
_ = block;
_ = src;
@@ -32666,12 +32666,12 @@ fn analyzeIsNonErrComptimeOnly(
const ip = &zcu.intern_pool;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(zcu);
- if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
- if (ot == .ErrorSet) return .bool_false;
- assert(ot == .ErrorUnion);
+ if (ot != .error_set and ot != .error_union) return .bool_true;
+ if (ot == .error_set) return .bool_false;
+ assert(ot == .error_union);
const payload_ty = operand_ty.errorUnionPayload(zcu);
- if (payload_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (payload_ty.zigTypeTag(zcu) == .noreturn) {
return .bool_false;
}
@@ -32828,7 +32828,7 @@ fn analyzeSlice(
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(zcu)) {
- .Pointer => ptr_ptr_ty.childType(zcu),
+ .pointer => ptr_ptr_ty.childType(zcu),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(pt)}),
};
@@ -32838,15 +32838,15 @@ fn analyzeSlice(
var elem_ty: Type = undefined;
var ptr_sentinel: ?Value = null;
switch (ptr_ptr_child_ty.zigTypeTag(zcu)) {
- .Array => {
+ .array => {
ptr_sentinel = ptr_ptr_child_ty.sentinel(zcu);
elem_ty = ptr_ptr_child_ty.childType(zcu);
},
- .Pointer => switch (ptr_ptr_child_ty.ptrSize(zcu)) {
+ .pointer => switch (ptr_ptr_child_ty.ptrSize(zcu)) {
.One => {
const double_child_ty = ptr_ptr_child_ty.childType(zcu);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
- if (double_child_ty.zigTypeTag(zcu) == .Array) {
+ if (double_child_ty.zigTypeTag(zcu) == .array) {
ptr_sentinel = double_child_ty.sentinel(zcu);
slice_ty = ptr_ptr_child_ty;
array_ty = double_child_ty;
@@ -32961,7 +32961,7 @@ fn analyzeSlice(
const ptr = if (slice_ty.isSlice(zcu))
try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty)
- else if (array_ty.zigTypeTag(zcu) == .Array) ptr: {
+ else if (array_ty.zigTypeTag(zcu) == .array) ptr: {
var manyptr_ty_key = zcu.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type;
assert(manyptr_ty_key.child == array_ty.toIntern());
assert(manyptr_ty_key.flags.size == .One);
@@ -32980,7 +32980,7 @@ fn analyzeSlice(
// we might learn of the length because it is a comptime-known slice value.
var end_is_len = uncasted_end_opt == .none;
const end = e: {
- if (array_ty.zigTypeTag(zcu) == .Array) {
+ if (array_ty.zigTypeTag(zcu) == .array) {
const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(zcu));
if (!end_is_len) {
@@ -33215,7 +33215,7 @@ fn analyzeSlice(
}
bounds_check: {
- const actual_len = if (array_ty.zigTypeTag(zcu) == .Array)
+ const actual_len = if (array_ty.zigTypeTag(zcu) == .array)
try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) l: {
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
@@ -33273,7 +33273,7 @@ fn analyzeSlice(
}
// requirement: end <= len
- const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .Array)
+ const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .array)
try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
@@ -33342,12 +33342,12 @@ fn cmpNumeric(
// One exception to heterogeneous comparison: comptime_float needs to
// coerce to fixed-width float.
- const lhs = if (lhs_ty_tag == .ComptimeFloat and rhs_ty_tag == .Float)
+ const lhs = if (lhs_ty_tag == .comptime_float and rhs_ty_tag == .float)
try sema.coerce(block, rhs_ty, uncasted_lhs, lhs_src)
else
uncasted_lhs;
- const rhs = if (lhs_ty_tag == .Float and rhs_ty_tag == .ComptimeFloat)
+ const rhs = if (lhs_ty_tag == .float and rhs_ty_tag == .comptime_float)
try sema.coerce(block, lhs_ty, uncasted_rhs, rhs_src)
else
uncasted_rhs;
@@ -33356,11 +33356,11 @@ fn cmpNumeric(
if (try sema.resolveValue(lhs)) |lhs_val| {
if (try sema.resolveValue(rhs)) |rhs_val| {
// Compare ints: const vs. undefined (or vice versa)
- if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(zcu) and rhs_val.isUndef(zcu)) {
+ if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu) and rhs_val.isUndef(zcu)) {
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
return if (res) .bool_true else .bool_false;
}
- } else if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(zcu) and lhs_val.isUndef(zcu)) {
+ } else if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu) and lhs_val.isUndef(zcu)) {
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
return if (res) .bool_true else .bool_false;
}
@@ -33377,7 +33377,7 @@ fn cmpNumeric(
else
.bool_false;
} else {
- if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(zcu)) {
+ if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu)) {
// Compare ints: const vs. var
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| {
return if (res) .bool_true else .bool_false;
@@ -33387,7 +33387,7 @@ fn cmpNumeric(
}
} else {
if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| {
- if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(zcu)) {
+ if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu)) {
// Compare ints: var vs. const
if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| {
return if (res) .bool_true else .bool_false;
@@ -33407,11 +33407,11 @@ fn cmpNumeric(
// For floats, emit a float comparison instruction.
const lhs_is_float = switch (lhs_ty_tag) {
- .Float, .ComptimeFloat => true,
+ .float, .comptime_float => true,
else => false,
};
const rhs_is_float = switch (rhs_ty_tag) {
- .Float, .ComptimeFloat => true,
+ .float, .comptime_float => true,
else => false,
};
@@ -33419,9 +33419,9 @@ fn cmpNumeric(
// Smaller fixed-width floats coerce to larger fixed-width floats.
// comptime_float coerces to fixed-width float.
const dest_ty = x: {
- if (lhs_ty_tag == .ComptimeFloat) {
+ if (lhs_ty_tag == .comptime_float) {
break :x rhs_ty;
- } else if (rhs_ty_tag == .ComptimeFloat) {
+ } else if (rhs_ty_tag == .comptime_float) {
break :x lhs_ty;
}
if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) {
@@ -33691,8 +33691,8 @@ fn cmpVector(
const zcu = pt.zcu;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- assert(lhs_ty.zigTypeTag(zcu) == .Vector);
- assert(rhs_ty.zigTypeTag(zcu) == .Vector);
+ assert(lhs_ty.zigTypeTag(zcu) == .vector);
+ assert(rhs_ty.zigTypeTag(zcu) == .vector);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
@@ -34005,22 +34005,22 @@ const PeerResolveStrategy = enum {
fn select(ty: Type, zcu: *Zcu) PeerResolveStrategy {
return switch (ty.zigTypeTag(zcu)) {
- .Type, .Void, .Bool, .Opaque, .Frame, .AnyFrame => .exact,
- .NoReturn, .Undefined => .unknown,
- .Null => .nullable,
- .ComptimeInt => .comptime_int,
- .Int => .fixed_int,
- .ComptimeFloat => .comptime_float,
- .Float => .fixed_float,
- .Pointer => if (ty.ptrInfo(zcu).flags.size == .C) .c_ptr else .ptr,
- .Array => .array,
- .Vector => .vector,
- .Optional => .optional,
- .ErrorSet => .error_set,
- .ErrorUnion => .error_union,
- .EnumLiteral, .Enum, .Union => .enum_or_union,
- .Struct => if (ty.isTupleOrAnonStruct(zcu)) .coercible_struct else .exact,
- .Fn => .func,
+ .type, .void, .bool, .@"opaque", .frame, .@"anyframe" => .exact,
+ .noreturn, .undefined => .unknown,
+ .null => .nullable,
+ .comptime_int => .comptime_int,
+ .int => .fixed_int,
+ .comptime_float => .comptime_float,
+ .float => .fixed_float,
+ .pointer => if (ty.ptrInfo(zcu).flags.size == .C) .c_ptr else .ptr,
+ .array => .array,
+ .vector => .vector,
+ .optional => .optional,
+ .error_set => .error_set,
+ .error_union => .error_union,
+ .enum_literal, .@"enum", .@"union" => .enum_or_union,
+ .@"struct" => if (ty.isTupleOrAnonStruct(zcu)) .coercible_struct else .exact,
+ .@"fn" => .func,
};
}
};
@@ -34213,7 +34213,7 @@ fn resolvePeerTypesInner(
for (peer_tys) |*ty_ptr| {
const ty = ty_ptr.* orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .NoReturn, .Undefined => ty_ptr.* = null,
+ .noreturn, .undefined => ty_ptr.* = null,
else => {},
}
}
@@ -34228,7 +34228,7 @@ fn resolvePeerTypesInner(
var final_set: ?Type = null;
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
- if (ty.zigTypeTag(zcu) != .ErrorSet) return .{ .conflict = .{
+ if (ty.zigTypeTag(zcu) != .error_set) return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
} };
@@ -34246,12 +34246,12 @@ fn resolvePeerTypesInner(
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
const ty = ty_ptr.* orelse continue;
const set_ty = switch (ty.zigTypeTag(zcu)) {
- .ErrorSet => blk: {
+ .error_set => blk: {
ty_ptr.* = null; // no payload to decide on
val_ptr.* = null;
break :blk ty;
},
- .ErrorUnion => blk: {
+ .error_union => blk: {
const set_ty = ty.errorUnionSet(zcu);
ty_ptr.* = ty.errorUnionPayload(zcu);
if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) {
@@ -34300,11 +34300,11 @@ fn resolvePeerTypesInner(
for (peer_tys, peer_vals) |*ty_ptr, *val_ptr| {
const ty = ty_ptr.* orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .Null => {
+ .null => {
ty_ptr.* = null;
val_ptr.* = null;
},
- .Optional => {
+ .optional => {
ty_ptr.* = ty.optionalChild(zcu);
if (val_ptr.*) |opt_val| val_ptr.* = if (!opt_val.isUndef(zcu)) opt_val.optionalValue(zcu) else null;
},
@@ -34482,8 +34482,8 @@ fn resolvePeerTypesInner(
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
const ty = opt_ty orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeInt => continue, // comptime-known integers can always coerce to C pointers
- .Int => {
+ .comptime_int => continue, // comptime-known integers can always coerce to C pointers
+ .int => {
if (opt_val != null) {
// Always allow the coercion for comptime-known ints
continue;
@@ -34494,7 +34494,7 @@ fn resolvePeerTypesInner(
if (bits <= ptr_bits) continue;
}
},
- .Null => continue,
+ .null => continue,
else => {},
}
@@ -34581,8 +34581,8 @@ fn resolvePeerTypesInner(
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
const peer_info: InternPool.Key.PtrType = switch (ty.zigTypeTag(zcu)) {
- .Pointer => ty.ptrInfo(zcu),
- .Fn => .{
+ .pointer => ty.ptrInfo(zcu),
+ .@"fn" => .{
.child = ty.toIntern(),
.flags = .{
.address_space = target_util.defaultAddressSpace(target, .global_constant),
@@ -34889,7 +34889,7 @@ fn resolvePeerTypesInner(
first_idx = i;
continue;
};
- if (ty.zigTypeTag(zcu) != .Fn) return .{ .conflict = .{
+ if (ty.zigTypeTag(zcu) != .@"fn") return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
} };
@@ -34918,7 +34918,7 @@ fn resolvePeerTypesInner(
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .EnumLiteral, .Enum, .Union => {},
+ .enum_literal, .@"enum", .@"union" => {},
else => return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
@@ -34937,16 +34937,16 @@ fn resolvePeerTypesInner(
} };
switch (cur_ty.zigTypeTag(zcu)) {
- .EnumLiteral => {
+ .enum_literal => {
opt_cur_ty = ty;
cur_ty_idx = i;
},
- .Enum => switch (ty.zigTypeTag(zcu)) {
- .EnumLiteral => {},
- .Enum => {
+ .@"enum" => switch (ty.zigTypeTag(zcu)) {
+ .enum_literal => {},
+ .@"enum" => {
if (!ty.eql(cur_ty, zcu)) return generic_err;
},
- .Union => {
+ .@"union" => {
const tag_ty = ty.unionTagTypeHypothetical(zcu);
if (!tag_ty.eql(cur_ty, zcu)) return generic_err;
opt_cur_ty = ty;
@@ -34954,13 +34954,13 @@ fn resolvePeerTypesInner(
},
else => unreachable,
},
- .Union => switch (ty.zigTypeTag(zcu)) {
- .EnumLiteral => {},
- .Enum => {
+ .@"union" => switch (ty.zigTypeTag(zcu)) {
+ .enum_literal => {},
+ .@"enum" => {
const cur_tag_ty = cur_ty.unionTagTypeHypothetical(zcu);
if (!ty.eql(cur_tag_ty, zcu)) return generic_err;
},
- .Union => {
+ .@"union" => {
if (!ty.eql(cur_ty, zcu)) return generic_err;
},
else => unreachable,
@@ -34975,7 +34975,7 @@ fn resolvePeerTypesInner(
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeInt => {},
+ .comptime_int => {},
else => return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
@@ -34989,7 +34989,7 @@ fn resolvePeerTypesInner(
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeInt, .ComptimeFloat => {},
+ .comptime_int, .comptime_float => {},
else => return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
@@ -35012,7 +35012,7 @@ fn resolvePeerTypesInner(
const peer_tag = ty.zigTypeTag(zcu);
switch (peer_tag) {
- .ComptimeInt => {
+ .comptime_int => {
// If the value is undefined, we can't refine to a fixed-width int
if (opt_val == null or opt_val.?.isUndef(zcu)) return .{ .conflict = .{
.peer_idx_a = strat_reason,
@@ -35022,7 +35022,7 @@ fn resolvePeerTypesInner(
ptr_opt_val.* = try sema.resolveLazyValue(opt_val.?);
continue;
},
- .Int => {},
+ .int => {},
else => return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
@@ -35091,14 +35091,14 @@ fn resolvePeerTypesInner(
for (peer_tys, peer_vals, 0..) |opt_ty, opt_val, i| {
const ty = opt_ty orelse continue;
switch (ty.zigTypeTag(zcu)) {
- .ComptimeFloat, .ComptimeInt => {},
- .Int => {
+ .comptime_float, .comptime_int => {},
+ .int => {
if (opt_val == null) return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
} };
},
- .Float => {
+ .float => {
if (opt_cur_ty) |cur_ty| {
if (cur_ty.eql(ty, zcu)) continue;
// Recreate the type so we eliminate any c_longdouble
@@ -35330,11 +35330,11 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
const pt = sema.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Array => .{
+ .array => .{
.len = ty.arrayLen(zcu),
.elem_ty = ty.childType(zcu),
},
- .Struct => {
+ .@"struct" => {
const field_count = ty.structFieldCount(zcu);
if (field_count == 0) return .{
.len = 0,
@@ -35625,7 +35625,7 @@ fn backingIntType(
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
if (small.has_backing_int) {
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
@@ -35700,12 +35700,12 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Pointer) {
+ if (ty.zigTypeTag(zcu) == .pointer) {
switch (ty.ptrSize(zcu)) {
.Slice, .Many, .C => return,
.One => {
const elem_ty = ty.childType(zcu);
- if (elem_ty.zigTypeTag(zcu) == .Array) return;
+ if (elem_ty.zigTypeTag(zcu) == .array) return;
// TODO https://github.com/ziglang/zig/issues/15479
// if (elem_ty.isTuple()) return;
},
@@ -35797,7 +35797,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
- if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .NoReturn) continue; // TODO: should this affect alignment?
+ if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .noreturn) continue; // TODO: should this affect alignment?
max_size = @max(max_size, field_ty.abiSizeSema(pt) catch |err| switch (err) {
error.AnalysisFail => {
@@ -36185,7 +36185,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
@@ -36357,7 +36357,7 @@ fn structFields(
struct_type.field_types.get(ip)[field_i] = field_ty.toIntern();
- if (field_ty.zigTypeTag(zcu) == .Opaque) {
+ if (field_ty.zigTypeTag(zcu) == .@"opaque") {
const msg = msg: {
const msg = try sema.errMsg(ty_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
@@ -36367,7 +36367,7 @@ fn structFields(
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
}
- if (field_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (field_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
const msg = try sema.errMsg(ty_src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(sema.gpa);
@@ -36635,7 +36635,7 @@ fn unionFields(
if (small.auto_enum_tag) {
// The provided type is an integer type and we must construct the enum tag type here.
int_tag_ty = provided_ty;
- if (int_tag_ty.zigTypeTag(zcu) != .Int and int_tag_ty.zigTypeTag(zcu) != .ComptimeInt) {
+ if (int_tag_ty.zigTypeTag(zcu) != .int and int_tag_ty.zigTypeTag(zcu) != .comptime_int) {
return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(pt)});
}
@@ -36834,7 +36834,7 @@ fn unionFields(
}
}
- if (field_ty.zigTypeTag(zcu) == .Opaque) {
+ if (field_ty.zigTypeTag(zcu) == .@"opaque") {
const msg = msg: {
const msg = try sema.errMsg(type_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(sema.gpa);
@@ -36932,7 +36932,7 @@ fn generateUnionTagTypeNumbered(
const name = try ip.getOrPutStringFmt(
gpa,
pt.tid,
- "@typeInfo({}).Union.tag_type.?",
+ "@typeInfo({}).@\"union\".tag_type.?",
.{union_name.fmt(ip)},
.no_embedded_nulls,
);
@@ -36968,7 +36968,7 @@ fn generateUnionTagTypeSimple(
const name = try ip.getOrPutStringFmt(
gpa,
pt.tid,
- "@typeInfo({}).Union.tag_type.?",
+ "@typeInfo({}).@\"union\".tag_type.?",
.{union_name.fmt(ip)},
.no_embedded_nulls,
);
@@ -37637,7 +37637,7 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize)
fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -37693,7 +37693,7 @@ fn numberAddWrapScalar(
const zcu = pt.zcu;
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return pt.undefValue(ty);
- if (ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (ty.zigTypeTag(zcu) == .comptime_int) {
return sema.intAdd(lhs, rhs, ty, undefined);
}
@@ -37729,7 +37729,7 @@ fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize)
fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value {
const pt = sema.pt;
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -37786,7 +37786,7 @@ fn numberSubWrapScalar(
const zcu = pt.zcu;
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return pt.undefValue(ty);
- if (ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (ty.zigTypeTag(zcu) == .comptime_int) {
return sema.intSub(lhs, rhs, ty, undefined);
}
@@ -37806,7 +37806,7 @@ fn intSubWithOverflow(
) !Value.OverflowArithmeticResult {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const vec_len = ty.vectorLen(zcu);
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
@@ -37879,7 +37879,7 @@ fn intFromFloat(
) CompileError!Value {
const pt = sema.pt;
const zcu = pt.zcu;
- if (float_ty.zigTypeTag(zcu) == .Vector) {
+ if (float_ty.zigTypeTag(zcu) == .vector) {
const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(zcu));
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(pt, i);
@@ -38015,7 +38015,7 @@ fn intFitsInType(
},
},
.aggregate => |aggregate| {
- assert(ty.zigTypeTag(zcu) == .Vector);
+ assert(ty.zigTypeTag(zcu) == .vector);
return switch (aggregate.storage) {
.bytes => |bytes| for (bytes.toSlice(ty.vectorLen(zcu), &zcu.intern_pool), 0..) |byte, i| {
if (byte == 0) continue;
@@ -38070,7 +38070,7 @@ fn intAddWithOverflow(
) !Value.OverflowArithmeticResult {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const vec_len = ty.vectorLen(zcu);
const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len);
const result_data = try sema.arena.alloc(InternPool.Index, vec_len);
@@ -38143,7 +38143,7 @@ fn compareAll(
) CompileError!bool {
const pt = sema.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < ty.vectorLen(zcu)) : (i += 1) {
const lhs_elem = try lhs.elemValue(pt, i);
@@ -38194,7 +38194,7 @@ fn compareVector(
) !Value {
const pt = sema.pt;
const zcu = pt.zcu;
- assert(ty.zigTypeTag(zcu) == .Vector);
+ assert(ty.zigTypeTag(zcu) == .vector);
const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(zcu));
for (result_data, 0..) |*scalar, i| {
const lhs_elem = try lhs.elemValue(pt, i);
@@ -38555,7 +38555,7 @@ pub fn resolveDeclaredEnum(
if (tag_type_ref != .none) {
const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref);
- if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) {
+ if (ty.zigTypeTag(zcu) != .int and ty.zigTypeTag(zcu) != .comptime_int) {
return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
}
break :ty ty;
diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig
index 04229532fc..d3adbe7eb8 100644
--- a/src/Sema/bitcast.zig
+++ b/src/Sema/bitcast.zig
@@ -274,7 +274,7 @@ const UnpackValueBits = struct {
=> try unpack.primitive(val),
.aggregate => switch (ty.zigTypeTag(zcu)) {
- .Vector => {
+ .vector => {
const len: usize = @intCast(ty.arrayLen(zcu));
for (0..len) |i| {
// We reverse vector elements in packed memory on BE targets.
@@ -286,7 +286,7 @@ const UnpackValueBits = struct {
try unpack.add(elem_val);
}
},
- .Array => {
+ .array => {
// Each element is padded up to its ABI size. Padding bits are undefined.
// The final element does not have trailing padding.
// Elements are reversed in packed memory on BE targets.
@@ -316,7 +316,7 @@ const UnpackValueBits = struct {
try unpack.add(s);
};
},
- .Struct => switch (ty.containerLayout(zcu)) {
+ .@"struct" => switch (ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"extern" => switch (endian) {
.little => {
@@ -473,7 +473,7 @@ const PackValueBits = struct {
const ip = &zcu.intern_pool;
const arena = pack.arena;
switch (ty.zigTypeTag(zcu)) {
- .Vector => {
+ .vector => {
// Elements are bit-packed.
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
@@ -496,7 +496,7 @@ const PackValueBits = struct {
.storage = .{ .elems = elems },
} }));
},
- .Array => {
+ .array => {
// Each element is padded up to its ABI size. The final element does not have trailing padding.
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
@@ -530,7 +530,7 @@ const PackValueBits = struct {
.storage = .{ .elems = elems },
} }));
},
- .Struct => switch (ty.containerLayout(zcu)) {
+ .@"struct" => switch (ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"extern" => {
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
@@ -587,7 +587,7 @@ const PackValueBits = struct {
} }));
},
},
- .Union => {
+ .@"union" => {
// We will attempt to read as the backing representation. If this emits
// `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
// We will also attempt smaller fields when we get `undefined`, as if some bits are
diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig
index 893ea6db36..10e81d7a9e 100644
--- a/src/Sema/comptime_ptr_access.zig
+++ b/src/Sema/comptime_ptr_access.zig
@@ -226,7 +226,7 @@ fn loadComptimePtrInner(
.variable => return .runtime_load,
// We let `.@"extern"` through here if it's a function.
// This allows you to alias `extern fn`s.
- .@"extern" => |e| if (Type.fromInterned(e.ty).zigTypeTag(zcu) == .Fn)
+ .@"extern" => |e| if (Type.fromInterned(e.ty).zigTypeTag(zcu) == .@"fn")
break :val .{ .interned = val }
else
return .runtime_load,
@@ -296,8 +296,8 @@ fn loadComptimePtrInner(
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
- .Struct, .Pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)),
- .Union => {
+ .@"struct", .pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)),
+ .@"union" => {
const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) {
.un => |un| .{ Value.fromInterned(un.tag), un.payload.* },
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
@@ -321,7 +321,7 @@ fn loadComptimePtrInner(
};
if (ptr.byte_offset == 0 and host_bits == 0) {
- if (load_ty.zigTypeTag(zcu) != .Array or array_offset == 0) {
+ if (load_ty.zigTypeTag(zcu) != .array or array_offset == 0) {
if (.ok == try sema.coerceInMemoryAllowed(
block,
load_ty,
@@ -366,7 +366,7 @@ fn loadComptimePtrInner(
null,
)) {
// Changing the length of an array.
- const skip_base: u64 = extra_base_index + if (load_ty.zigTypeTag(zcu) == .Array) skip: {
+ const skip_base: u64 = extra_base_index + if (load_ty.zigTypeTag(zcu) == .array) skip: {
break :skip load_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
} else 0;
if (skip_base + load_count > val_count) return .{ .out_of_bounds = base_val.typeOf(zcu) };
@@ -394,7 +394,7 @@ fn loadComptimePtrInner(
var cur_val = base_val;
var cur_offset = ptr.byte_offset;
- if (load_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
+ if (load_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
}
@@ -410,30 +410,30 @@ fn loadComptimePtrInner(
while (true) {
const cur_ty = cur_val.typeOf(zcu);
switch (cur_ty.zigTypeTag(zcu)) {
- .NoReturn,
- .Type,
- .ComptimeInt,
- .ComptimeFloat,
- .Null,
- .Undefined,
- .EnumLiteral,
- .Opaque,
- .Fn,
- .ErrorUnion,
+ .noreturn,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .null,
+ .undefined,
+ .enum_literal,
+ .@"opaque",
+ .@"fn",
+ .error_union,
=> unreachable, // ill-defined layout
- .Int,
- .Float,
- .Bool,
- .Void,
- .Pointer,
- .ErrorSet,
- .AnyFrame,
- .Frame,
- .Enum,
- .Vector,
+ .int,
+ .float,
+ .bool,
+ .void,
+ .pointer,
+ .error_set,
+ .@"anyframe",
+ .frame,
+ .@"enum",
+ .vector,
=> break, // terminal types (no sub-values)
- .Optional => break, // this can only be a pointer-like optional so is terminal
- .Array => {
+ .optional => break, // this can only be a pointer-like optional so is terminal
+ .array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try elem_ty.abiSizeSema(pt);
const elem_idx = cur_offset / elem_size;
@@ -446,7 +446,7 @@ fn loadComptimePtrInner(
break;
}
},
- .Struct => switch (cur_ty.containerLayout(zcu)) {
+ .@"struct" => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
@@ -459,7 +459,7 @@ fn loadComptimePtrInner(
}
} else break, // pointer spans multiple fields
},
- .Union => switch (cur_ty.containerLayout(zcu)) {
+ .@"union" => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => {
@@ -692,11 +692,11 @@ fn prepareComptimePtrStore(
const agg_ty = agg_val.typeOf(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
- .Struct, .Pointer => break :strat .{ .direct = .{
+ .@"struct", .pointer => break :strat .{ .direct = .{
.val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)),
.alloc = alloc,
} },
- .Union => {
+ .@"union" => {
if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) {
return .undef;
}
@@ -717,7 +717,7 @@ fn prepareComptimePtrStore(
};
if (ptr.byte_offset == 0) {
- if (store_ty.zigTypeTag(zcu) != .Array or array_offset == 0) direct: {
+ if (store_ty.zigTypeTag(zcu) != .array or array_offset == 0) direct: {
const base_val_ty = switch (base_strat) {
.direct => |direct| direct.val.typeOf(zcu),
.index => |index| index.val.typeOf(zcu).childType(zcu),
@@ -770,7 +770,7 @@ fn prepareComptimePtrStore(
}
if (base_elem_offset + extra_base_index + store_count > val_count) return .{ .out_of_bounds = oob_ty };
- if (store_ty.zigTypeTag(zcu) == .Array) {
+ if (store_ty.zigTypeTag(zcu) == .array) {
const skip = store_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
return .{ .flat_index = .{
.alloc = base_strat.alloc(),
@@ -780,7 +780,7 @@ fn prepareComptimePtrStore(
}
// `base_val` must be an array, since otherwise the "direct reinterpret" logic above noticed it.
- assert(base_val.typeOf(zcu).zigTypeTag(zcu) == .Array);
+ assert(base_val.typeOf(zcu).zigTypeTag(zcu) == .array);
var index: u64 = base_elem_offset + extra_base_index;
const arr_val, const arr_index = (try recursiveIndex(sema, base_val, &index)).?;
@@ -816,7 +816,7 @@ fn prepareComptimePtrStore(
return .{ .needed_well_defined = cur_val.typeOf(zcu) };
}
- if (store_ty.zigTypeTag(zcu) == .Array and array_offset > 0) {
+ if (store_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
}
@@ -832,30 +832,30 @@ fn prepareComptimePtrStore(
while (true) {
const cur_ty = cur_val.typeOf(zcu);
switch (cur_ty.zigTypeTag(zcu)) {
- .NoReturn,
- .Type,
- .ComptimeInt,
- .ComptimeFloat,
- .Null,
- .Undefined,
- .EnumLiteral,
- .Opaque,
- .Fn,
- .ErrorUnion,
+ .noreturn,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .null,
+ .undefined,
+ .enum_literal,
+ .@"opaque",
+ .@"fn",
+ .error_union,
=> unreachable, // ill-defined layout
- .Int,
- .Float,
- .Bool,
- .Void,
- .Pointer,
- .ErrorSet,
- .AnyFrame,
- .Frame,
- .Enum,
- .Vector,
+ .int,
+ .float,
+ .bool,
+ .void,
+ .pointer,
+ .error_set,
+ .@"anyframe",
+ .frame,
+ .@"enum",
+ .vector,
=> break, // terminal types (no sub-values)
- .Optional => break, // this can only be a pointer-like optional so is terminal
- .Array => {
+ .optional => break, // this can only be a pointer-like optional so is terminal
+ .array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try elem_ty.abiSizeSema(pt);
const elem_idx = cur_offset / elem_size;
@@ -868,7 +868,7 @@ fn prepareComptimePtrStore(
break;
}
},
- .Struct => switch (cur_ty.containerLayout(zcu)) {
+ .@"struct" => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
@@ -881,7 +881,7 @@ fn prepareComptimePtrStore(
}
} else break, // pointer spans multiple fields
},
- .Union => switch (cur_ty.containerLayout(zcu)) {
+ .@"union" => switch (cur_ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => {
@@ -942,7 +942,7 @@ fn flattenArray(
return;
}
- if (ty.zigTypeTag(zcu) != .Array) {
+ if (ty.zigTypeTag(zcu) != .array) {
out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern();
next_idx.* += 1;
return;
@@ -975,7 +975,7 @@ fn unflattenArray(
const zcu = sema.pt.zcu;
const arena = sema.arena;
- if (ty.zigTypeTag(zcu) != .Array) {
+ if (ty.zigTypeTag(zcu) != .array) {
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
next_idx.* += 1;
return sema.pt.getCoerced(val, ty);
@@ -1008,7 +1008,7 @@ fn recursiveIndex(
const pt = sema.pt;
const ty = mv.typeOf(pt.zcu);
- assert(ty.zigTypeTag(pt.zcu) == .Array);
+ assert(ty.zigTypeTag(pt.zcu) == .array);
const ty_base_elems = ty.arrayBase(pt.zcu)[1];
if (index.* >= ty_base_elems) {
@@ -1017,7 +1017,7 @@ fn recursiveIndex(
}
const elem_ty = ty.childType(pt.zcu);
- if (elem_ty.zigTypeTag(pt.zcu) != .Array) {
+ if (elem_ty.zigTypeTag(pt.zcu) != .array) {
assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check
return .{ mv, index.* };
}
diff --git a/src/Type.zig b/src/Type.zig
index 2925592a37..2048fc852b 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -31,8 +31,8 @@ pub fn zigTypeTagOrPoison(ty: Type, zcu: *const Zcu) error{GenericPoison}!std.bu
pub fn baseZigTypeTag(self: Type, mod: *Zcu) std.builtin.TypeId {
return switch (self.zigTypeTag(mod)) {
- .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod),
- .Optional => {
+ .error_union => self.errorUnionPayload(mod).baseZigTypeTag(mod),
+ .optional => {
return self.optionalChild(mod).baseZigTypeTag(mod);
},
else => |t| t,
@@ -41,37 +41,37 @@ pub fn baseZigTypeTag(self: Type, mod: *Zcu) std.builtin.TypeId {
pub fn isSelfComparable(ty: Type, zcu: *const Zcu, is_equality_cmp: bool) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Int,
- .Float,
- .ComptimeFloat,
- .ComptimeInt,
+ .int,
+ .float,
+ .comptime_float,
+ .comptime_int,
=> true,
- .Vector => ty.elemType2(zcu).isSelfComparable(zcu, is_equality_cmp),
-
- .Bool,
- .Type,
- .Void,
- .ErrorSet,
- .Fn,
- .Opaque,
- .AnyFrame,
- .Enum,
- .EnumLiteral,
+ .vector => ty.elemType2(zcu).isSelfComparable(zcu, is_equality_cmp),
+
+ .bool,
+ .type,
+ .void,
+ .error_set,
+ .@"fn",
+ .@"opaque",
+ .@"anyframe",
+ .@"enum",
+ .enum_literal,
=> is_equality_cmp,
- .NoReturn,
- .Array,
- .Struct,
- .Undefined,
- .Null,
- .ErrorUnion,
- .Union,
- .Frame,
+ .noreturn,
+ .array,
+ .@"struct",
+ .undefined,
+ .null,
+ .error_union,
+ .@"union",
+ .frame,
=> false,
- .Pointer => !ty.isSlice(zcu) and (is_equality_cmp or ty.isCPtr(zcu)),
- .Optional => {
+ .pointer => !ty.isSlice(zcu) and (is_equality_cmp or ty.isCPtr(zcu)),
+ .optional => {
if (!is_equality_cmp) return false;
return ty.optionalChild(zcu).isSelfComparable(zcu, is_equality_cmp);
},
@@ -80,9 +80,9 @@ pub fn isSelfComparable(ty: Type, zcu: *const Zcu, is_equality_cmp: bool) bool {
/// If it is a function pointer, returns the function type. Otherwise returns null.
pub fn castPtrToFn(ty: Type, zcu: *const Zcu) ?Type {
- if (ty.zigTypeTag(zcu) != .Pointer) return null;
+ if (ty.zigTypeTag(zcu) != .pointer) return null;
const elem_ty = ty.childType(zcu);
- if (elem_ty.zigTypeTag(zcu) != .Fn) return null;
+ if (elem_ty.zigTypeTag(zcu) != .@"fn") return null;
return elem_ty;
}
@@ -267,7 +267,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
},
.inferred_error_set_type => |func_index| {
const func_nav = ip.getNav(zcu.funcInfo(func_index).owner_nav);
- try writer.print("@typeInfo(@typeInfo(@TypeOf({})).Fn.return_type.?).ErrorUnion.error_set", .{
+ try writer.print("@typeInfo(@typeInfo(@TypeOf({})).@\"fn\".return_type.?).error_union.error_set", .{
func_nav.fqn.fmt(ip),
});
},
@@ -796,7 +796,7 @@ pub fn fnHasRuntimeBitsInner(
pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
switch (ty.zigTypeTag(zcu)) {
- .Fn => return ty.fnHasRuntimeBits(zcu),
+ .@"fn" => return ty.fnHasRuntimeBits(zcu),
else => return ty.hasRuntimeBits(zcu),
}
}
@@ -804,7 +804,7 @@ pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Fn => true,
+ .@"fn" => true,
else => return ty.hasRuntimeBitsIgnoreComptime(zcu),
};
}
@@ -1216,9 +1216,9 @@ fn abiAlignmentInnerOptional(
const child_type = ty.optionalChild(zcu);
switch (child_type.zigTypeTag(zcu)) {
- .Pointer => return .{ .scalar = ptrAbiAlignment(target) },
- .ErrorSet => return Type.anyerror.abiAlignmentInner(strat, zcu, tid),
- .NoReturn => return .{ .scalar = .@"1" },
+ .pointer => return .{ .scalar = ptrAbiAlignment(target) },
+ .error_set => return Type.anyerror.abiAlignmentInner(strat, zcu, tid),
+ .noreturn => return .{ .scalar = .@"1" },
else => {},
}
@@ -2053,7 +2053,7 @@ pub fn elemType2(ty: Type, zcu: *const Zcu) Type {
fn shallowElemType(child_ty: Type, zcu: *const Zcu) Type {
return switch (child_ty.zigTypeTag(zcu)) {
- .Array, .Vector => child_ty.childType(zcu),
+ .array, .vector => child_ty.childType(zcu),
else => child_ty,
};
}
@@ -2061,7 +2061,7 @@ fn shallowElemType(child_ty: Type, zcu: *const Zcu) Type {
/// For vectors, returns the element type. Otherwise returns self.
pub fn scalarType(ty: Type, zcu: *const Zcu) Type {
return switch (ty.zigTypeTag(zcu)) {
- .Vector => ty.childType(zcu),
+ .vector => ty.childType(zcu),
else => ty,
};
}
@@ -2217,7 +2217,7 @@ pub fn isAnyError(ty: Type, zcu: *const Zcu) bool {
pub fn isError(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .ErrorUnion, .ErrorSet => true,
+ .error_union, .error_set => true,
else => false,
};
}
@@ -2341,8 +2341,8 @@ pub fn isUnsignedInt(ty: Type, zcu: *const Zcu) bool {
/// If this function returns true, then intInfo() can be called on the type.
pub fn isAbiInt(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Int, .Enum, .ErrorSet => true,
- .Struct => ty.containerLayout(zcu) == .@"packed",
+ .int, .@"enum", .error_set => true,
+ .@"struct" => ty.containerLayout(zcu) == .@"packed",
else => false,
};
}
@@ -2494,14 +2494,14 @@ pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.CallingConvent
pub fn isValidParamType(self: Type, zcu: *const Zcu) bool {
return switch (self.zigTypeTagOrPoison(zcu) catch return true) {
- .Opaque, .NoReturn => false,
+ .@"opaque", .noreturn => false,
else => true,
};
}
pub fn isValidReturnType(self: Type, zcu: *const Zcu) bool {
return switch (self.zigTypeTagOrPoison(zcu) catch return true) {
- .Opaque => false,
+ .@"opaque" => false,
else => true,
};
}
@@ -2782,8 +2782,8 @@ pub fn comptimeOnlyInner(
.ptr_type => |ptr_type| {
const child_ty = Type.fromInterned(ptr_type.child);
switch (child_ty.zigTypeTag(zcu)) {
- .Fn => return !try child_ty.fnHasRuntimeBitsInner(strat, zcu, tid),
- .Opaque => return false,
+ .@"fn" => return !try child_ty.fnHasRuntimeBitsInner(strat, zcu, tid),
+ .@"opaque" => return false,
else => return child_ty.comptimeOnlyInner(strat, zcu, tid),
}
},
@@ -2954,7 +2954,7 @@ pub fn comptimeOnlyInner(
}
pub fn isVector(ty: Type, zcu: *const Zcu) bool {
- return ty.zigTypeTag(zcu) == .Vector;
+ return ty.zigTypeTag(zcu) == .vector;
}
/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
@@ -2966,40 +2966,40 @@ pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 {
pub fn isArrayOrVector(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Array, .Vector => true,
+ .array, .vector => true,
else => false,
};
}
pub fn isIndexable(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Array, .Vector => true,
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .array, .vector => true,
+ .pointer => switch (ty.ptrSize(zcu)) {
.Slice, .Many, .C => true,
.One => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Array, .Vector => true,
- .Struct => ty.childType(zcu).isTuple(zcu),
+ .array, .vector => true,
+ .@"struct" => ty.childType(zcu).isTuple(zcu),
else => false,
},
},
- .Struct => ty.isTuple(zcu),
+ .@"struct" => ty.isTuple(zcu),
else => false,
};
}
pub fn indexableHasLen(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Array, .Vector => true,
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .array, .vector => true,
+ .pointer => switch (ty.ptrSize(zcu)) {
.Many, .C => false,
.Slice => true,
.One => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Array, .Vector => true,
- .Struct => ty.childType(zcu).isTuple(zcu),
+ .array, .vector => true,
+ .@"struct" => ty.childType(zcu).isTuple(zcu),
else => false,
},
},
- .Struct => ty.isTuple(zcu),
+ .@"struct" => ty.isTuple(zcu),
else => false,
};
}
@@ -3030,7 +3030,7 @@ pub fn getParentNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex
pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
const zcu = pt.zcu;
const scalar = try minIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
- return if (ty.zigTypeTag(zcu) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
} })) else scalar;
@@ -3061,7 +3061,7 @@ pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
const zcu = pt.zcu;
const scalar = try maxIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
- return if (ty.zigTypeTag(zcu) == .Vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
+ return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
} })) else scalar;
@@ -3539,8 +3539,8 @@ pub fn isSimpleTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
var cur = ty;
while (true) switch (cur.zigTypeTag(zcu)) {
- .Optional => cur = cur.optionalChild(zcu),
- .ErrorUnion => cur = cur.errorUnionPayload(zcu),
+ .optional => cur = cur.optionalChild(zcu),
+ .error_union => cur = cur.errorUnionPayload(zcu),
else => return cur,
};
}
@@ -3548,8 +3548,8 @@ pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
- .Vector => try pt.vectorType(.{
+ .int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
+ .vector => try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(),
}),
@@ -3625,7 +3625,7 @@ pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice {
pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
var cur_ty: Type = ty;
var cur_len: u64 = 1;
- while (cur_ty.zigTypeTag(zcu) == .Array) {
+ while (cur_ty.zigTypeTag(zcu) == .array) {
cur_len *= cur_ty.arrayLenIncludingSentinel(zcu);
cur_ty = cur_ty.childType(zcu);
}
@@ -3692,7 +3692,7 @@ pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Struct => switch (ip.indexToKey(ty.toIntern())) {
+ .@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
try field_ty.resolveLayout(pt);
@@ -3700,21 +3700,21 @@ pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
.struct_type => return ty.resolveStructInner(pt, .layout),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(pt, .layout),
- .Array => {
+ .@"union" => return ty.resolveUnionInner(pt, .layout),
+ .array => {
if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
const elem_ty = ty.childType(zcu);
return elem_ty.resolveLayout(pt);
},
- .Optional => {
+ .optional => {
const payload_ty = ty.optionalChild(zcu);
return payload_ty.resolveLayout(pt);
},
- .ErrorUnion => {
+ .error_union => {
const payload_ty = ty.errorUnionPayload(zcu);
return payload_ty.resolveLayout(pt);
},
- .Fn => {
+ .@"fn" => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) {
// Resolving of generic function types is deferred to when
@@ -3830,30 +3830,30 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Type,
- .Void,
- .Bool,
- .NoReturn,
- .Int,
- .Float,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .ErrorSet,
- .Enum,
- .Opaque,
- .Frame,
- .AnyFrame,
- .Vector,
- .EnumLiteral,
+ .type,
+ .void,
+ .bool,
+ .noreturn,
+ .int,
+ .float,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .error_set,
+ .@"enum",
+ .@"opaque",
+ .frame,
+ .@"anyframe",
+ .vector,
+ .enum_literal,
=> {},
- .Pointer => return ty.childType(zcu).resolveFully(pt),
- .Array => return ty.childType(zcu).resolveFully(pt),
- .Optional => return ty.optionalChild(zcu).resolveFully(pt),
- .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(pt),
- .Fn => {
+ .pointer => return ty.childType(zcu).resolveFully(pt),
+ .array => return ty.childType(zcu).resolveFully(pt),
+ .optional => return ty.optionalChild(zcu).resolveFully(pt),
+ .error_union => return ty.errorUnionPayload(zcu).resolveFully(pt),
+ .@"fn" => {
const info = zcu.typeToFunc(ty).?;
if (info.is_generic) return;
for (0..info.param_types.len) |i| {
@@ -3863,7 +3863,7 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
try Type.fromInterned(info.return_type).resolveFully(pt);
},
- .Struct => switch (ip.indexToKey(ty.toIntern())) {
+ .@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
try field_ty.resolveFully(pt);
@@ -3871,7 +3871,7 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
.struct_type => return ty.resolveStructInner(pt, .full),
else => unreachable,
},
- .Union => return ty.resolveUnionInner(pt, .full),
+ .@"union" => return ty.resolveUnionInner(pt, .full),
}
}
diff --git a/src/Value.zig b/src/Value.zig
index 0843045f75..e29e033884 100644
--- a/src/Value.zig
+++ b/src/Value.zig
@@ -64,7 +64,7 @@ pub fn fmtValueSemaFull(ctx: print_value.FormatContext) std.fmt.Formatter(print_
/// Asserts `val` is an array of `u8`
pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
const zcu = pt.zcu;
- assert(ty.zigTypeTag(zcu) == .Array);
+ assert(ty.zigTypeTag(zcu) == .array);
assert(ty.childType(zcu).toIntern() == .u8_type);
const ip = &zcu.intern_pool;
switch (zcu.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
@@ -349,12 +349,12 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
return;
}
switch (ty.zigTypeTag(zcu)) {
- .Void => {},
- .Bool => {
+ .void => {},
+ .bool => {
buffer[0] = @intFromBool(val.toBool());
},
- .Int, .Enum, .ErrorSet, .Pointer => |tag| {
- const int_ty = if (tag == .Pointer) int_ty: {
+ .int, .@"enum", .error_set, .pointer => |tag| {
+ const int_ty = if (tag == .pointer) int_ty: {
if (ty.isSlice(zcu)) return error.IllDefinedMemoryLayout;
if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
break :int_ty Type.usize;
@@ -367,7 +367,7 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
const bigint = val.toBigInt(&bigint_buffer, zcu);
bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
- .Float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target)) {
16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, zcu)), endian),
32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, zcu)), endian),
64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, zcu)), endian),
@@ -375,7 +375,7 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, zcu)), endian),
else => unreachable,
},
- .Array => {
+ .array => {
const len = ty.arrayLen(zcu);
const elem_ty = ty.childType(zcu);
const elem_size: usize = @intCast(elem_ty.abiSize(zcu));
@@ -387,13 +387,13 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
buf_off += elem_size;
}
},
- .Vector => {
+ .vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
},
- .Struct => {
+ .@"struct" => {
const struct_type = zcu.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
switch (struct_type.layout) {
.auto => return error.IllDefinedMemoryLayout,
@@ -415,7 +415,7 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
},
}
},
- .Union => switch (ty.containerLayout(zcu)) {
+ .@"union" => switch (ty.containerLayout(zcu)) {
.auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
.@"extern" => {
if (val.unionTag(zcu)) |union_tag| {
@@ -437,7 +437,7 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
},
},
- .Optional => {
+ .optional => {
if (!ty.isPtrLikeOptional(zcu)) return error.IllDefinedMemoryLayout;
const opt_val = val.optionalValue(zcu);
if (opt_val) |some| {
@@ -473,8 +473,8 @@ pub fn writeToPackedMemory(
return;
}
switch (ty.zigTypeTag(zcu)) {
- .Void => {},
- .Bool => {
+ .void => {},
+ .bool => {
const byte_index = switch (endian) {
.little => bit_offset / 8,
.big => buffer.len - bit_offset / 8 - 1,
@@ -485,7 +485,7 @@ pub fn writeToPackedMemory(
buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8)));
}
},
- .Int, .Enum => {
+ .int, .@"enum" => {
if (buffer.len == 0) return;
const bits = ty.intInfo(zcu).bits;
if (bits == 0) return;
@@ -503,7 +503,7 @@ pub fn writeToPackedMemory(
},
}
},
- .Float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target)) {
16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, zcu)), endian),
32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, zcu)), endian),
64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, zcu)), endian),
@@ -511,7 +511,7 @@ pub fn writeToPackedMemory(
128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, zcu)), endian),
else => unreachable,
},
- .Vector => {
+ .vector => {
const elem_ty = ty.childType(zcu);
const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
const len: usize = @intCast(ty.arrayLen(zcu));
@@ -526,7 +526,7 @@ pub fn writeToPackedMemory(
bits += elem_bit_size;
}
},
- .Struct => {
+ .@"struct" => {
const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
@@ -544,7 +544,7 @@ pub fn writeToPackedMemory(
bits += field_bits;
}
},
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
switch (union_obj.flagsUnordered(ip).layout) {
.auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
@@ -561,12 +561,12 @@ pub fn writeToPackedMemory(
},
}
},
- .Pointer => {
+ .pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset);
},
- .Optional => {
+ .optional => {
assert(ty.isPtrLikeOptional(zcu));
const child = ty.optionalChild(zcu);
const opt_val = val.optionalValue(zcu);
@@ -599,18 +599,18 @@ pub fn readFromMemory(
const target = zcu.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(zcu)) {
- .Void => return Value.void,
- .Bool => {
+ .void => return Value.void,
+ .bool => {
if (buffer[0] == 0) {
return Value.false;
} else {
return Value.true;
}
},
- .Int, .Enum => |ty_tag| {
+ .int, .@"enum" => |ty_tag| {
const int_ty = switch (ty_tag) {
- .Int => ty,
- .Enum => ty.intTagType(zcu),
+ .int => ty,
+ .@"enum" => ty.intTagType(zcu),
else => unreachable,
};
const int_info = int_ty.intInfo(zcu);
@@ -639,7 +639,7 @@ pub fn readFromMemory(
return zcu.getCoerced(try zcu.intValue_big(int_ty, bigint.toConst()), ty);
}
},
- .Float => return Value.fromInterned(try pt.intern(.{ .float = .{
+ .float => return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) },
@@ -650,7 +650,7 @@ pub fn readFromMemory(
else => unreachable,
},
} })),
- .Array => {
+ .array => {
const elem_ty = ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
@@ -664,13 +664,13 @@ pub fn readFromMemory(
.storage = .{ .elems = elems },
} }));
},
- .Vector => {
+ .vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
},
- .Struct => {
+ .@"struct" => {
const struct_type = zcu.typeToStruct(ty).?;
switch (struct_type.layout) {
.auto => unreachable, // Sema is supposed to have emitted a compile error already
@@ -694,7 +694,7 @@ pub fn readFromMemory(
},
}
},
- .ErrorSet => {
+ .error_set => {
const bits = zcu.errorSetBits();
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
@@ -706,7 +706,7 @@ pub fn readFromMemory(
.name = name,
} }));
},
- .Union => switch (ty.containerLayout(zcu)) {
+ .@"union" => switch (ty.containerLayout(zcu)) {
.auto => return error.IllDefinedMemoryLayout,
.@"extern" => {
const union_size = ty.abiSize(zcu);
@@ -723,7 +723,7 @@ pub fn readFromMemory(
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
},
},
- .Pointer => {
+ .pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
const int_val = try readFromMemory(Type.usize, zcu, buffer, arena);
return Value.fromInterned(try pt.intern(.{ .ptr = .{
@@ -732,7 +732,7 @@ pub fn readFromMemory(
.byte_offset = int_val.toUnsignedInt(zcu),
} }));
},
- .Optional => {
+ .optional => {
assert(ty.isPtrLikeOptional(zcu));
const child_ty = ty.optionalChild(zcu);
const child_val = try readFromMemory(child_ty, zcu, buffer, arena);
@@ -768,8 +768,8 @@ pub fn readFromPackedMemory(
const target = zcu.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(zcu)) {
- .Void => return Value.void,
- .Bool => {
+ .void => return Value.void,
+ .bool => {
const byte = switch (endian) {
.big => buffer[buffer.len - bit_offset / 8 - 1],
.little => buffer[bit_offset / 8],
@@ -780,7 +780,7 @@ pub fn readFromPackedMemory(
return Value.true;
}
},
- .Int => {
+ .int => {
if (buffer.len == 0) return pt.intValue(ty, 0);
const int_info = ty.intInfo(zcu);
const bits = int_info.bits;
@@ -804,12 +804,12 @@ pub fn readFromPackedMemory(
bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
return pt.intValue_big(ty, bigint.toConst());
},
- .Enum => {
+ .@"enum" => {
const int_ty = ty.intTagType(zcu);
const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena);
return pt.getCoerced(int_val, ty);
},
- .Float => return Value.fromInterned(try pt.intern(.{ .float = .{
+ .float => return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian)) },
@@ -820,7 +820,7 @@ pub fn readFromPackedMemory(
else => unreachable,
},
} })),
- .Vector => {
+ .vector => {
const elem_ty = ty.childType(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
@@ -837,7 +837,7 @@ pub fn readFromPackedMemory(
.storage = .{ .elems = elems },
} }));
},
- .Struct => {
+ .@"struct" => {
// Sema is supposed to have emitted a compile error already for Auto layout structs,
// and Extern is handled by non-packed readFromMemory.
const struct_type = zcu.typeToPackedStruct(ty).?;
@@ -854,7 +854,7 @@ pub fn readFromPackedMemory(
.storage = .{ .elems = field_vals },
} }));
},
- .Union => switch (ty.containerLayout(zcu)) {
+ .@"union" => switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
.@"packed" => {
const backing_ty = try ty.unionBackingType(pt);
@@ -866,7 +866,7 @@ pub fn readFromPackedMemory(
} }));
},
},
- .Pointer => {
+ .pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
const int_val = try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena);
return Value.fromInterned(try pt.intern(.{ .ptr = .{
@@ -875,7 +875,7 @@ pub fn readFromPackedMemory(
.byte_offset = int_val.toUnsignedInt(zcu),
} }));
},
- .Optional => {
+ .optional => {
assert(ty.isPtrLikeOptional(zcu));
const child_ty = ty.optionalChild(zcu);
const child_val = try readFromPackedMemory(child_ty, pt, buffer, bit_offset, arena);
@@ -1155,7 +1155,7 @@ pub fn compareHeteroAdvanced(
/// For vectors, returns true if comparison is true for ALL elements.
pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, pt: Zcu.PerThread) !bool {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const scalar_ty = ty.scalarType(zcu);
for (0..ty.vectorLen(zcu)) |i| {
const lhs_elem = try lhs.elemValue(pt, i);
@@ -1519,7 +1519,7 @@ pub fn floatFromIntAdvanced(
comptime strat: ResolveStrat,
) !Value {
const zcu = pt.zcu;
- if (int_ty.zigTypeTag(zcu) == .Vector) {
+ if (int_ty.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(zcu));
const scalar_ty = float_ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1573,7 +1573,7 @@ fn calcLimbLenFloat(scalar: anytype) usize {
}
const w_value = @abs(scalar);
- return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1;
+ return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).int.bits) + 1;
}
pub const OverflowArithmeticResult = struct {
@@ -1589,7 +1589,7 @@ pub fn intAddSat(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1640,7 +1640,7 @@ pub fn intSubSat(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1692,7 +1692,7 @@ pub fn intMulWithOverflow(
pt: Zcu.PerThread,
) !OverflowArithmeticResult {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const vec_len = ty.vectorLen(zcu);
const overflowed_data = try arena.alloc(InternPool.Index, vec_len);
const result_data = try arena.alloc(InternPool.Index, vec_len);
@@ -1770,7 +1770,7 @@ pub fn numberMulWrap(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1797,7 +1797,7 @@ pub fn numberMulWrapScalar(
const zcu = pt.zcu;
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return Value.undef;
- if (ty.zigTypeTag(zcu) == .ComptimeInt) {
+ if (ty.zigTypeTag(zcu) == .comptime_int) {
return intMul(lhs, rhs, ty, undefined, arena, pt);
}
@@ -1817,7 +1817,7 @@ pub fn intMulSat(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1897,7 +1897,7 @@ pub fn numberMin(lhs: Value, rhs: Value, zcu: *Zcu) Value {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -1941,7 +1941,7 @@ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThrea
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2014,7 +2014,7 @@ fn intValueAa(ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2044,7 +2044,7 @@ pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, pt:
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2097,7 +2097,7 @@ pub fn bitwiseOrScalar(orig_lhs: Value, orig_rhs: Value, ty: Type, arena: Alloca
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2157,7 +2157,7 @@ pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator
}
fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2213,7 +2213,7 @@ pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt:
}
pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2256,7 +2256,7 @@ pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator,
}
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2328,7 +2328,7 @@ pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
}
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2362,7 +2362,7 @@ pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, pt: Zcu.PerThrea
}
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2419,7 +2419,7 @@ pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator
fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2471,7 +2471,7 @@ pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt:
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2496,7 +2496,7 @@ pub fn intTruncBitsAsValue(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2540,7 +2540,7 @@ pub fn intTruncScalar(
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2588,7 +2588,7 @@ pub fn shlWithOverflow(
allocator: Allocator,
pt: Zcu.PerThread,
) !OverflowArithmeticResult {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const vec_len = ty.vectorLen(pt.zcu);
const overflowed_data = try allocator.alloc(InternPool.Index, vec_len);
const result_data = try allocator.alloc(InternPool.Index, vec_len);
@@ -2653,7 +2653,7 @@ pub fn shlSat(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2704,7 +2704,7 @@ pub fn shlTrunc(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2734,7 +2734,7 @@ pub fn shlTruncScalar(
}
pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) !Value {
- if (ty.zigTypeTag(pt.zcu) == .Vector) {
+ if (ty.zigTypeTag(pt.zcu) == .vector) {
const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(pt.zcu));
const scalar_ty = ty.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2789,7 +2789,7 @@ pub fn floatNeg(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2829,7 +2829,7 @@ pub fn floatAdd(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2875,7 +2875,7 @@ pub fn floatSub(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2920,7 +2920,7 @@ pub fn floatDiv(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -2965,7 +2965,7 @@ pub fn floatDivFloor(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3010,7 +3010,7 @@ pub fn floatDivTrunc(
arena: Allocator,
pt: Zcu.PerThread,
) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3056,7 +3056,7 @@ pub fn floatMul(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3095,7 +3095,7 @@ pub fn floatMulScalar(
}
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
- if (float_type.zigTypeTag(pt.zcu) == .Vector) {
+ if (float_type.zigTypeTag(pt.zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
const scalar_ty = float_type.scalarType(pt.zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3129,7 +3129,7 @@ pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3163,7 +3163,7 @@ pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3197,7 +3197,7 @@ pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3231,7 +3231,7 @@ pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3265,7 +3265,7 @@ pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3299,7 +3299,7 @@ pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3333,7 +3333,7 @@ pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Erro
pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3367,7 +3367,7 @@ pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3401,7 +3401,7 @@ pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
const scalar_ty = ty.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3419,21 +3419,21 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
const zcu = pt.zcu;
switch (ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
var buffer: Value.BigIntSpace = undefined;
var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
operand_bigint.abs();
return pt.intValue_big(try ty.toUnsigned(pt), operand_bigint.toConst());
},
- .ComptimeInt => {
+ .comptime_int => {
var buffer: Value.BigIntSpace = undefined;
var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
operand_bigint.abs();
return pt.intValue_big(ty, operand_bigint.toConst());
},
- .ComptimeFloat, .Float => {
+ .comptime_float, .float => {
const target = zcu.getTarget();
const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @abs(val.toFloat(f16, zcu)) },
@@ -3454,7 +3454,7 @@ pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allo
pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3488,7 +3488,7 @@ pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3522,7 +3522,7 @@ pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Err
pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3556,7 +3556,7 @@ pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Er
pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3597,7 +3597,7 @@ pub fn mulAdd(
pt: Zcu.PerThread,
) !Value {
const zcu = pt.zcu;
- if (float_type.zigTypeTag(zcu) == .Vector) {
+ if (float_type.zigTypeTag(zcu) == .vector) {
const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
const scalar_ty = float_type.scalarType(zcu);
for (result_data, 0..) |*scalar, i| {
@@ -3720,7 +3720,7 @@ pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
const opt_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One);
- assert(opt_ty.zigTypeTag(zcu) == .Optional);
+ assert(opt_ty.zigTypeTag(zcu) == .optional);
const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
@@ -3754,7 +3754,7 @@ pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
const eu_ty = parent_ptr_ty.childType(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .One);
- assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion);
+ assert(eu_ty.zigTypeTag(zcu) == .error_union);
const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_ty.ptrInfo(zcu);
@@ -3789,7 +3789,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
// Exiting this `switch` indicates that the `field` pointer representation should be used.
// `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) {
- .Struct => field: {
+ .@"struct" => field: {
const field_ty = aggregate_ty.fieldType(field_idx, zcu);
switch (aggregate_ty.containerLayout(zcu)) {
.auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) },
@@ -3839,7 +3839,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
},
}
},
- .Union => field: {
+ .@"union" => field: {
const union_obj = zcu.typeToUnion(aggregate_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
switch (aggregate_ty.containerLayout(zcu)) {
@@ -3887,7 +3887,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
},
}
},
- .Pointer => field_ty: {
+ .pointer => field_ty: {
assert(aggregate_ty.isSlice(zcu));
break :field_ty switch (field_idx) {
Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) },
@@ -3944,7 +3944,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value
if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
// Since we have a bit-pointer, the pointer address should be unchanged.
- assert(elem_ty.zigTypeTag(zcu) == .Vector);
+ assert(elem_ty.zigTypeTag(zcu) == .vector);
return pt.getCoerced(parent_ptr, result_ty);
}
@@ -3955,8 +3955,8 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value
const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
.One => switch (elem_ty.zigTypeTag(zcu)) {
- .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeSema(pt), 8) },
- .Array => strat: {
+ .vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeSema(pt), 8) },
+ .array => strat: {
const arr_elem_ty = elem_ty.childType(zcu);
if (try arr_elem_ty.comptimeOnlySema(pt)) {
break :strat .{ .elem_ptr = arr_elem_ty };
@@ -4178,19 +4178,19 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
const base_ptr_ty = base_ptr.typeOf(zcu);
const agg_ty = base_ptr_ty.childType(zcu);
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
- .Struct => .{ agg_ty.fieldType(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
+ .@"struct" => .{ agg_ty.fieldType(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
@intCast(field.index),
if (have_sema) .sema else .normal,
pt.zcu,
if (have_sema) pt.tid else {},
) },
- .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
+ .@"union" => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
@intCast(field.index),
if (have_sema) .sema else .normal,
pt.zcu,
if (have_sema) pt.tid else {},
) },
- .Pointer => .{ switch (field.index) {
+ .pointer => .{ switch (field.index) {
Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
Value.slice_len_index => Type.usize,
else => unreachable,
@@ -4268,31 +4268,31 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
break;
}
switch (cur_ty.zigTypeTag(zcu)) {
- .NoReturn,
- .Type,
- .ComptimeInt,
- .ComptimeFloat,
- .Null,
- .Undefined,
- .EnumLiteral,
- .Opaque,
- .Fn,
- .ErrorUnion,
- .Int,
- .Float,
- .Bool,
- .Void,
- .Pointer,
- .ErrorSet,
- .AnyFrame,
- .Frame,
- .Enum,
- .Vector,
- .Optional,
- .Union,
+ .noreturn,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .null,
+ .undefined,
+ .enum_literal,
+ .@"opaque",
+ .@"fn",
+ .error_union,
+ .int,
+ .float,
+ .bool,
+ .void,
+ .pointer,
+ .error_set,
+ .@"anyframe",
+ .frame,
+ .@"enum",
+ .vector,
+ .optional,
+ .@"union",
=> break,
- .Array => {
+ .array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
const start_idx = cur_offset / elem_size;
@@ -4321,7 +4321,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
break;
}
},
- .Struct => switch (cur_ty.containerLayout(zcu)) {
+ .@"struct" => switch (cur_ty.containerLayout(zcu)) {
.auto, .@"packed" => break,
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const field_ty = cur_ty.fieldType(field_idx, zcu);
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 7a03535c2f..148570d85a 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -43,9 +43,9 @@ const dev = @import("dev.zig");
comptime {
@setEvalBranchQuota(4000);
for (
- @typeInfo(Zir.Inst.Ref).Enum.fields,
- @typeInfo(Air.Inst.Ref).Enum.fields,
- @typeInfo(InternPool.Index).Enum.fields,
+ @typeInfo(Zir.Inst.Ref).@"enum".fields,
+ @typeInfo(Air.Inst.Ref).@"enum".fields,
+ @typeInfo(InternPool.Index).@"enum".fields,
) |zir_field, air_field, ip_field| {
assert(mem.eql(u8, zir_field.name, ip_field.name));
assert(mem.eql(u8, air_field.name, ip_field.name));
@@ -246,7 +246,7 @@ pub const PanicId = enum {
memcpy_alias,
noreturn_returned,
- pub const len = @typeInfo(PanicId).Enum.fields.len;
+ pub const len = @typeInfo(PanicId).@"enum".fields.len;
};
pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig
index 67e0e9ee8b..0a581773e9 100644
--- a/src/Zcu/PerThread.zig
+++ b/src/Zcu/PerThread.zig
@@ -921,7 +921,7 @@ fn createFileRootStruct(
assert(!small.has_captures_len);
assert(!small.has_backing_int);
assert(small.layout == .auto);
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
extra_index += 1;
@@ -1005,7 +1005,7 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
- var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
+ var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = file.zir.extra[extra_index];
@@ -2080,7 +2080,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
func.setCallsOrAwaitsErrorableFn(ip, false);
// First few indexes of extra are reserved and set at the end.
- const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
+ const reserved_count = @typeInfo(Air.ExtraIndex).@"enum".fields.len;
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
sema.air_extra.items.len += reserved_count;
@@ -2204,7 +2204,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
}
// Copy the block into place and mark that as the main block.
- try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
+ try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
inner_block.instructions.items.len);
const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(inner_block.instructions.items.len),
@@ -2405,7 +2405,7 @@ fn processExportsInner(
.resolved => |r| Value.fromInterned(r.val),
};
// If the value is a function, we also need to check if that function succeeded analysis.
- if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) {
+ if (val.typeOf(zcu).zigTypeTag(zcu) == .@"fn") {
const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() });
if (zcu.failed_analysis.contains(func_unit)) break :failed true;
if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true;
@@ -2869,7 +2869,7 @@ pub fn errorSetFromUnsortedNames(
/// Supports only pointers, not pointer-like optionals.
pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
const zcu = pt.zcu;
- assert(ty.zigTypeTag(zcu) == .Pointer and !ty.isSlice(zcu));
+ assert(ty.zigTypeTag(zcu) == .pointer and !ty.isSlice(zcu));
assert(x != 0 or ty.isAllowzeroPtr(zcu));
return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ty.toIntern(),
@@ -2882,7 +2882,7 @@ pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
if (std.debug.runtime_safety) {
const tag = ty.zigTypeTag(pt.zcu);
- assert(tag == .Enum);
+ assert(tag == .@"enum");
}
return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 0ed37b79d7..c61e540c4a 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1324,7 +1324,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() },
else => {
switch (operand_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
// TODO convert this to mvn + and
const op_reg = switch (operand) {
.register => |r| r,
@@ -1355,8 +1355,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = dest_reg };
},
- .Vector => return self.fail("TODO bitwise not for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO bitwise not for vectors", .{}),
+ .int => {
const int_info = operand_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const op_reg = switch (operand) {
@@ -1412,9 +1412,9 @@ fn minMax(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM min/max on floats", .{}),
- .Vector => return self.fail("TODO ARM min/max on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM min/max on floats", .{}),
+ .vector => return self.fail("TODO ARM min/max on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -1903,9 +1903,9 @@ fn addSub(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO binary operations on floats", .{}),
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO binary operations on floats", .{}),
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -1965,8 +1965,8 @@ fn mul(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -1998,8 +1998,8 @@ fn divFloat(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO div_float", .{}),
- .Vector => return self.fail("TODO div_float on vectors", .{}),
+ .float => return self.fail("TODO div_float", .{}),
+ .vector => return self.fail("TODO div_float on vectors", .{}),
else => unreachable,
}
}
@@ -2015,9 +2015,9 @@ fn divTrunc(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO div on floats", .{}),
- .Vector => return self.fail("TODO div on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO div on floats", .{}),
+ .vector => return self.fail("TODO div on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2050,9 +2050,9 @@ fn divFloor(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO div on floats", .{}),
- .Vector => return self.fail("TODO div on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO div on floats", .{}),
+ .vector => return self.fail("TODO div on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2084,9 +2084,9 @@ fn divExact(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO div on floats", .{}),
- .Vector => return self.fail("TODO div on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO div on floats", .{}),
+ .vector => return self.fail("TODO div on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2121,9 +2121,9 @@ fn rem(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO rem/zcu on floats", .{}),
- .Vector => return self.fail("TODO rem/zcu on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO rem/zcu on floats", .{}),
+ .vector => return self.fail("TODO rem/zcu on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2193,9 +2193,9 @@ fn modulo(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO zcu on floats", .{}),
- .Vector => return self.fail("TODO zcu on vectors", .{}),
- .Int => return self.fail("TODO zcu on ints", .{}),
+ .float => return self.fail("TODO zcu on floats", .{}),
+ .vector => return self.fail("TODO zcu on vectors", .{}),
+ .int => return self.fail("TODO zcu on ints", .{}),
else => unreachable,
}
}
@@ -2212,8 +2212,8 @@ fn wrappingArithmetic(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
// Generate an add/sub/mul
@@ -2248,8 +2248,8 @@ fn bitwise(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2284,8 +2284,8 @@ fn shiftExact(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -2335,8 +2335,8 @@ fn shiftNormal(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
// Generate a shl_exact/shr_exact
@@ -2376,7 +2376,7 @@ fn booleanOp(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
@@ -2404,7 +2404,7 @@ fn ptrArithmetic(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Pointer => {
+ .pointer => {
assert(rhs_ty.eql(Type.usize, zcu));
const ptr_ty = lhs_ty;
@@ -2539,8 +2539,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
switch (int_info.bits) {
@@ -2667,8 +2667,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -2892,8 +2892,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, zcu)));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -4279,8 +4279,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const ip = &zcu.intern_pool;
const fn_ty = switch (ty.zigTypeTag(zcu)) {
- .Fn => ty,
- .Pointer => ty.childType(zcu),
+ .@"fn" => ty,
+ .pointer => ty.childType(zcu),
else => unreachable,
};
@@ -4388,7 +4388,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
},
else => return self.fail("TODO implement calling bitcasted functions", .{}),
} else {
- assert(ty.zigTypeTag(zcu) == .Pointer);
+ assert(ty.zigTypeTag(zcu) == .pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(ty, .x30, mcv);
@@ -4523,7 +4523,7 @@ fn cmp(
const pt = self.pt;
const zcu = pt.zcu;
const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
- .Optional => blk: {
+ .optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :blk Type.u1;
@@ -4533,12 +4533,12 @@ fn cmp(
return self.fail("TODO ARM cmp non-pointer optionals", .{});
}
},
- .Float => return self.fail("TODO ARM cmp floats", .{}),
- .Enum => lhs_ty.intTagType(zcu),
- .Int => lhs_ty,
- .Bool => Type.u1,
- .Pointer => Type.usize,
- .ErrorSet => Type.u16,
+ .float => return self.fail("TODO ARM cmp floats", .{}),
+ .@"enum" => lhs_ty.intTagType(zcu),
+ .int => lhs_ty,
+ .bool => Type.u1,
+ .pointer => Type.usize,
+ .error_set => Type.u16,
else => unreachable,
};
@@ -6243,7 +6243,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
result.return_value = .{ .none = {} };
@@ -6301,7 +6301,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 16;
},
.Unspecified => {
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
result.return_value = .{ .none = {} };
diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig
index b3926b8cc1..99c67d6761 100644
--- a/src/arch/aarch64/abi.zig
+++ b/src/arch/aarch64/abi.zig
@@ -20,7 +20,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
if (ty.containerLayout(zcu) == .@"packed") return .byval;
const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
@@ -30,7 +30,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
if (bit_size > 64) return .double_integer;
return .integer;
},
- .Union => {
+ .@"union" => {
if (ty.containerLayout(zcu) == .@"packed") return .byval;
const float_count = countFloats(ty, zcu, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
@@ -40,35 +40,35 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
if (bit_size > 64) return .double_integer;
return .integer;
},
- .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
- .Vector => {
+ .int, .@"enum", .error_set, .float, .bool => return .byval,
+ .vector => {
const bit_size = ty.bitSize(zcu);
// TODO is this controlled by a cpu feature?
if (bit_size > 128) return .memory;
return .byval;
},
- .Optional => {
+ .optional => {
std.debug.assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
- .Pointer => {
+ .pointer => {
std.debug.assert(!ty.isSlice(zcu));
return .byval;
},
- .ErrorUnion,
- .Frame,
- .AnyFrame,
- .NoReturn,
- .Void,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .Fn,
- .Opaque,
- .EnumLiteral,
- .Array,
+ .error_union,
+ .frame,
+ .@"anyframe",
+ .noreturn,
+ .void,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .@"fn",
+ .@"opaque",
+ .enum_literal,
+ .array,
=> unreachable,
}
}
@@ -79,7 +79,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
const target = zcu.getTarget();
const invalid = std.math.maxInt(u8);
switch (ty.zigTypeTag(zcu)) {
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
var max_count: u8 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
@@ -90,7 +90,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
}
return max_count;
},
- .Struct => {
+ .@"struct" => {
const fields_len = ty.structFieldCount(zcu);
var count: u8 = 0;
var i: u32 = 0;
@@ -103,7 +103,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
}
return count;
},
- .Float => {
+ .float => {
const float_bits = maybe_float_bits.* orelse {
maybe_float_bits.* = ty.floatBits(target);
return 1;
@@ -111,7 +111,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
if (ty.floatBits(target) == float_bits) return 1;
return invalid;
},
- .Void => return 0,
+ .void => return 0,
else => return invalid,
}
}
@@ -119,14 +119,14 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
if (getFloatArrayType(Type.fromInterned(field_ty), zcu)) |some| return some;
}
return null;
},
- .Struct => {
+ .@"struct" => {
const fields_len = ty.structFieldCount(zcu);
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
@@ -135,7 +135,7 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
}
return null;
},
- .Float => return ty,
+ .float => return ty,
else => return null,
}
}
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 90bd360630..d693c06ec9 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -1286,7 +1286,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() },
else => {
switch (operand_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
var op_reg: Register = undefined;
var dest_reg: Register = undefined;
@@ -1316,8 +1316,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = dest_reg };
},
- .Vector => return self.fail("TODO bitwise not for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO bitwise not for vectors", .{}),
+ .int => {
const int_info = operand_ty.intInfo(zcu);
if (int_info.bits <= 32) {
var op_reg: Register = undefined;
@@ -1375,9 +1375,9 @@ fn minMax(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM min/max on floats", .{}),
- .Vector => return self.fail("TODO ARM min/max on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM min/max on floats", .{}),
+ .vector => return self.fail("TODO ARM min/max on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -1596,8 +1596,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits < 32) {
@@ -1710,8 +1710,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 16) {
@@ -1873,8 +1873,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const overflow_bit_offset: u32 = @intCast(tuple_ty.structFieldOffset(1, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -3021,7 +3021,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = ty_pl.ty.toType().childType(zcu);
- if (struct_ty.zigTypeTag(zcu) == .Union) {
+ if (struct_ty.zigTypeTag(zcu) == .@"union") {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
}
@@ -3411,9 +3411,9 @@ fn addSub(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3468,9 +3468,9 @@ fn mul(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3502,8 +3502,8 @@ fn divFloat(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
else => unreachable,
}
}
@@ -3519,9 +3519,9 @@ fn divTrunc(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3563,9 +3563,9 @@ fn divFloor(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3612,9 +3612,9 @@ fn divExact(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => return self.fail("TODO ARM div_exact", .{}),
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => return self.fail("TODO ARM div_exact", .{}),
else => unreachable,
}
}
@@ -3630,9 +3630,9 @@ fn rem(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3700,9 +3700,9 @@ fn modulo(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO ARM binary operations on floats", .{}),
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => return self.fail("TODO ARM zcu", .{}),
+ .float => return self.fail("TODO ARM binary operations on floats", .{}),
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => return self.fail("TODO ARM zcu", .{}),
else => unreachable,
}
}
@@ -3719,8 +3719,8 @@ fn wrappingArithmetic(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
// Generate an add/sub/mul
@@ -3758,8 +3758,8 @@ fn bitwise(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
@@ -3804,8 +3804,8 @@ fn shiftExact(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3844,8 +3844,8 @@ fn shiftNormal(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO ARM binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 32) {
// Generate a shl_exact/shr_exact
@@ -3888,7 +3888,7 @@ fn booleanOp(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3923,7 +3923,7 @@ fn ptrArithmetic(
const pt = self.pt;
const zcu = pt.zcu;
switch (lhs_ty.zigTypeTag(zcu)) {
- .Pointer => {
+ .pointer => {
assert(rhs_ty.eql(Type.usize, zcu));
const ptr_ty = lhs_ty;
@@ -4259,8 +4259,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const ip = &zcu.intern_pool;
const fn_ty = switch (ty.zigTypeTag(zcu)) {
- .Fn => ty,
- .Pointer => ty.childType(zcu),
+ .@"fn" => ty,
+ .pointer => ty.childType(zcu),
else => unreachable,
};
@@ -4337,7 +4337,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return self.fail("TODO implement calling bitcasted functions", .{});
},
} else {
- assert(ty.zigTypeTag(zcu) == .Pointer);
+ assert(ty.zigTypeTag(zcu) == .pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.usize, .lr, mcv);
@@ -4494,7 +4494,7 @@ fn cmp(
const pt = self.pt;
const zcu = pt.zcu;
const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
- .Optional => blk: {
+ .optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :blk Type.u1;
@@ -4504,12 +4504,12 @@ fn cmp(
return self.fail("TODO ARM cmp non-pointer optionals", .{});
}
},
- .Float => return self.fail("TODO ARM cmp floats", .{}),
- .Enum => lhs_ty.intTagType(zcu),
- .Int => lhs_ty,
- .Bool => Type.u1,
- .Pointer => Type.usize,
- .ErrorSet => Type.u16,
+ .float => return self.fail("TODO ARM cmp floats", .{}),
+ .@"enum" => lhs_ty.intTagType(zcu),
+ .int => lhs_ty,
+ .bool => Type.u1,
+ .pointer => Type.usize,
+ .error_set => Type.u16,
else => unreachable,
};
@@ -6211,7 +6211,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
result.return_value = .{ .none = {} };
@@ -6258,7 +6258,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 8;
},
.Unspecified => {
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
result.return_value = .{ .none = {} };
diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig
index 718350164c..b29bb523ab 100644
--- a/src/arch/arm/abi.zig
+++ b/src/arch/arm/abi.zig
@@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
const max_byval_size = 512;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > 64) return .memory;
@@ -53,7 +53,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
}
return Class.arrSize(bit_size, 32);
},
- .Union => {
+ .@"union" => {
const bit_size = ty.bitSize(zcu);
const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
@@ -73,48 +73,48 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
}
return Class.arrSize(bit_size, 32);
},
- .Bool, .Float => return .byval,
- .Int => {
+ .bool, .float => return .byval,
+ .int => {
// TODO this is incorrect for _BitInt(128) but implementing
// this correctly makes implementing compiler-rt impossible.
// const bit_size = ty.bitSize(zcu);
// if (bit_size > 64) return .memory;
return .byval;
},
- .Enum, .ErrorSet => {
+ .@"enum", .error_set => {
const bit_size = ty.bitSize(zcu);
if (bit_size > 64) return .memory;
return .byval;
},
- .Vector => {
+ .vector => {
const bit_size = ty.bitSize(zcu);
// TODO is this controlled by a cpu feature?
if (ctx == .ret and bit_size > 128) return .memory;
if (bit_size > 512) return .memory;
return .byval;
},
- .Optional => {
+ .optional => {
assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
- .Pointer => {
+ .pointer => {
assert(!ty.isSlice(zcu));
return .byval;
},
- .ErrorUnion,
- .Frame,
- .AnyFrame,
- .NoReturn,
- .Void,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .Fn,
- .Opaque,
- .EnumLiteral,
- .Array,
+ .error_union,
+ .frame,
+ .@"anyframe",
+ .noreturn,
+ .void,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .@"fn",
+ .@"opaque",
+ .enum_literal,
+ .array,
=> unreachable,
}
}
@@ -125,7 +125,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
const target = zcu.getTarget();
const invalid = std.math.maxInt(u32);
switch (ty.zigTypeTag(zcu)) {
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
var max_count: u32 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
@@ -136,7 +136,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
}
return max_count;
},
- .Struct => {
+ .@"struct" => {
const fields_len = ty.structFieldCount(zcu);
var count: u32 = 0;
var i: u32 = 0;
@@ -149,7 +149,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
}
return count;
},
- .Float => {
+ .float => {
const float_bits = maybe_float_bits.* orelse {
const float_bits = ty.floatBits(target);
if (float_bits != 32 and float_bits != 64) return invalid;
@@ -159,7 +159,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
if (ty.floatBits(target) == float_bits) return 1;
return invalid;
},
- .Void => return 0,
+ .void => return 0,
else => return invalid,
}
}
diff --git a/src/arch/arm/bits.zig b/src/arch/arm/bits.zig
index 5802b90953..37386b9c62 100644
--- a/src/arch/arm/bits.zig
+++ b/src/arch/arm/bits.zig
@@ -1299,7 +1299,7 @@ pub const Instruction = union(enum) {
}
pub fn pop(cond: Condition, args: anytype) Instruction {
- if (@typeInfo(@TypeOf(args)) != .Struct) {
+ if (@typeInfo(@TypeOf(args)) != .@"struct") {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
@@ -1323,7 +1323,7 @@ pub const Instruction = union(enum) {
}
pub fn push(cond: Condition, args: anytype) Instruction {
- if (@typeInfo(@TypeOf(args)) != .Struct) {
+ if (@typeInfo(@TypeOf(args)) != .@"struct") {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 2df0dbc328..a70618a394 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -675,14 +675,14 @@ fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, compt
) |inst, *tracking| tracking.resurrect(inst, state.scope_generation);
for (deaths) |death| try func.processDeath(death);
- const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).Array.len]RegisterLock;
+ const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock;
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
if (opts.update_tracking)
{} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa);
var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity(
stack.get(),
- @typeInfo(ExpectedContents).Array.len,
+ @typeInfo(ExpectedContents).array.len,
);
defer if (!opts.update_tracking) {
for (reg_locks.items) |lock| func.register_manager.unlockReg(lock);
@@ -1382,7 +1382,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
- .Enum => {
+ .@"enum" => {
const enum_ty = Type.fromInterned(lazy_sym.ty);
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
@@ -1945,7 +1945,7 @@ fn memSize(func: *Func, ty: Type) Memory.Size {
const pt = func.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
+ .float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
else => Memory.Size.fromByteSize(ty.abiSize(zcu)),
};
}
@@ -2094,24 +2094,24 @@ fn typeRegClass(func: *Func, ty: Type) abi.RegisterClass {
const pt = func.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Float => .float,
- .Vector => .vector,
+ .float => .float,
+ .vector => .vector,
else => .int,
};
}
fn regGeneralClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet {
return switch (ty.zigTypeTag(func.pt.zcu)) {
- .Float => abi.Registers.Float.general_purpose,
- .Vector => abi.Registers.Vector.general_purpose,
+ .float => abi.Registers.Float.general_purpose,
+ .vector => abi.Registers.Vector.general_purpose,
else => abi.Registers.Integer.general_purpose,
};
}
fn regTempClassForType(func: *Func, ty: Type) RegisterManager.RegisterBitSet {
return switch (ty.zigTypeTag(func.pt.zcu)) {
- .Float => abi.Registers.Float.temporary,
- .Vector => abi.Registers.Vector.general_purpose, // there are no temporary vector registers
+ .float => abi.Registers.Float.temporary,
+ .vector => abi.Registers.Vector.general_purpose, // there are no temporary vector registers
else => abi.Registers.Integer.temporary,
};
}
@@ -2122,8 +2122,8 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool
const bit_size = elem_ty.bitSize(zcu);
const min_size: u64 = switch (elem_ty.zigTypeTag(zcu)) {
- .Float => if (func.hasFeature(.d)) 64 else 32,
- .Vector => 256, // TODO: calculate it from avl * vsew
+ .float => if (func.hasFeature(.d)) 64 else 32,
+ .vector => 256, // TODO: calculate it from avl * vsew
else => 64,
};
@@ -2131,7 +2131,7 @@ fn allocRegOrMem(func: *Func, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool
if (func.register_manager.tryAllocReg(inst, func.regGeneralClassForType(elem_ty))) |reg| {
return .{ .register = reg };
}
- } else if (reg_ok and elem_ty.zigTypeTag(zcu) == .Vector) {
+ } else if (reg_ok and elem_ty.zigTypeTag(zcu) == .vector) {
return func.fail("did you forget to extend vector registers before allocating", .{});
}
@@ -2358,7 +2358,7 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
(try func.allocRegOrMem(func.typeOfIndex(inst), inst, true)).register;
switch (ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
_ = try func.addInst(.{
.tag = .pseudo_not,
.data = .{
@@ -2369,7 +2369,7 @@ fn airNot(func: *Func, inst: Air.Inst.Index) !void {
},
});
},
- .Int => {
+ .int => {
const size = ty.bitSize(zcu);
if (!math.isPowerOfTwo(size))
return func.fail("TODO: airNot non-pow 2 int size", .{});
@@ -2485,7 +2485,7 @@ fn binOp(
// don't have support for certain sizes of addition
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => {}, // works differently and fails in a different place
+ .vector => {}, // works differently and fails in a different place
else => if (lhs_ty.bitSize(zcu) > 64) return func.fail("TODO: binOp >= 64 bits", .{}),
}
@@ -2578,7 +2578,7 @@ fn genBinOp(
}
switch (lhs_ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
const mnem: Mnemonic = switch (tag) {
.add, .add_wrap => switch (bit_size) {
8, 16, 64 => .add,
@@ -2617,7 +2617,7 @@ fn genBinOp(
},
});
},
- .Float => {
+ .float => {
const mir_tag: Mnemonic = switch (tag) {
.add => switch (bit_size) {
32 => .fadds,
@@ -2648,7 +2648,7 @@ fn genBinOp(
},
});
},
- .Vector => {
+ .vector => {
const num_elem = lhs_ty.vectorLen(zcu);
const elem_size = lhs_ty.childType(zcu).bitSize(zcu);
@@ -2656,18 +2656,18 @@ fn genBinOp(
const mir_tag: Mnemonic = switch (tag) {
.add => switch (child_ty.zigTypeTag(zcu)) {
- .Int => .vaddvv,
- .Float => .vfaddvv,
+ .int => .vaddvv,
+ .float => .vfaddvv,
else => unreachable,
},
.sub => switch (child_ty.zigTypeTag(zcu)) {
- .Int => .vsubvv,
- .Float => .vfsubvv,
+ .int => .vsubvv,
+ .float => .vfsubvv,
else => unreachable,
},
.mul => switch (child_ty.zigTypeTag(zcu)) {
- .Int => .vmulvv,
- .Float => .vfmulvv,
+ .int => .vmulvv,
+ .float => .vfmulvv,
else => unreachable,
},
else => return func.fail("TODO: genBinOp {s} Vector", .{@tagName(tag)}),
@@ -2905,7 +2905,7 @@ fn genBinOp(
// s0 was 0, leaving a2 unchanged as a0.
.min, .max => {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
const mask_reg, const mask_lock = try func.allocReg(.int);
@@ -2981,8 +2981,8 @@ fn airAddWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return func.fail("TODO implement add with overflow for Vector type", .{}),
- .Int => {
+ .vector => return func.fail("TODO implement add with overflow for Vector type", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
const tuple_ty = func.typeOfIndex(inst);
@@ -3263,7 +3263,7 @@ fn airMulWithOverflow(func: *Func, inst: Air.Inst.Index) !void {
switch (lhs_ty.zigTypeTag(zcu)) {
else => |x| return func.fail("TODO: airMulWithOverflow {s}", .{@tagName(x)}),
- .Int => {
+ .int => {
if (std.debug.runtime_safety) assert(lhs_ty.eql(rhs_ty, zcu));
const trunc_reg = try func.copyToTmpRegister(lhs_ty, .{ .register = dest_reg });
@@ -4129,7 +4129,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
const operand = try func.resolveInst(ty_op.operand);
switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (ty.zigTypeTag(zcu) == .Vector) {
+ .int => if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
const int_info = scalar_ty.intInfo(zcu);
@@ -4182,7 +4182,7 @@ fn airAbs(func: *Func, inst: Air.Inst.Index) !void {
break :result return_mcv;
},
- .Float => {
+ .float => {
const float_bits = scalar_ty.floatBits(zcu.getTarget());
const mnem: Mnemonic = switch (float_bits) {
16 => return func.fail("TODO: airAbs 16-bit float", .{}),
@@ -4228,7 +4228,7 @@ fn airByteSwap(func: *Func, inst: Air.Inst.Index) !void {
const operand = try func.resolveInst(ty_op.operand);
switch (ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
const int_bits = ty.intInfo(zcu).bits;
// bytes are no-op
@@ -4308,7 +4308,7 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
defer func.register_manager.unlockReg(dst_lock);
switch (ty.zigTypeTag(zcu)) {
- .Float => {
+ .float => {
assert(dst_class == .float);
switch (operand_bit_size) {
@@ -4334,7 +4334,7 @@ fn airUnaryMath(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
else => return func.fail("TODO: airUnaryMath Float {s}", .{@tagName(tag)}),
}
},
- .Int => {
+ .int => {
assert(dst_class == .int);
switch (tag) {
@@ -4824,8 +4824,8 @@ fn genCall(
.air => |callee| fn_info: {
const callee_ty = func.typeOf(callee);
break :fn_info switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => callee_ty,
- .Pointer => callee_ty.childType(zcu),
+ .@"fn" => callee_ty,
+ .pointer => callee_ty.childType(zcu),
else => unreachable,
};
},
@@ -4977,7 +4977,7 @@ fn genCall(
else => return func.fail("TODO implement calling bitcasted functions", .{}),
}
} else {
- assert(func.typeOf(callee).zigTypeTag(zcu) == .Pointer);
+ assert(func.typeOf(callee).zigTypeTag(zcu) == .pointer);
const addr_reg, const addr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_lock);
try func.genSetReg(Type.u64, addr_reg, .{ .air_ref = callee });
@@ -5105,20 +5105,20 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const lhs_ty = func.typeOf(bin_op.lhs);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Int,
- .Enum,
- .Bool,
- .Pointer,
- .ErrorSet,
- .Optional,
+ .int,
+ .@"enum",
+ .bool,
+ .pointer,
+ .error_set,
+ .optional,
=> {
const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
- .Enum => lhs_ty.intTagType(zcu),
- .Int => lhs_ty,
- .Bool => Type.u1,
- .Pointer => Type.u64,
- .ErrorSet => Type.anyerror,
- .Optional => blk: {
+ .@"enum" => lhs_ty.intTagType(zcu),
+ .int => lhs_ty,
+ .bool => Type.u1,
+ .pointer => Type.u64,
+ .error_set => Type.anyerror,
+ .optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :blk Type.u1;
@@ -5138,7 +5138,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
return func.fail("TODO riscv cmp for ints > 64 bits", .{});
}
},
- .Float => {
+ .float => {
const float_bits = lhs_ty.floatBits(func.target.*);
const float_reg_size: u32 = if (func.hasFeature(.d)) 64 else 32;
if (float_bits > float_reg_size) {
@@ -7456,8 +7456,8 @@ fn airAtomicRmw(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("TODO: airAtomicRmw non-pow 2", .{});
switch (val_ty.zigTypeTag(pt.zcu)) {
- .Enum, .Int => {},
- inline .Bool, .Float, .Pointer => |ty| return func.fail("TODO: airAtomicRmw {s}", .{@tagName(ty)}),
+ .@"enum", .int => {},
+ inline .bool, .float, .pointer => |ty| return func.fail("TODO: airAtomicRmw {s}", .{@tagName(ty)}),
else => unreachable,
}
@@ -7861,7 +7861,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (result_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
if (result_ty.containerLayout(zcu) == .@"packed") {
const struct_obj = zcu.typeToStruct(result_ty).?;
@@ -7916,7 +7916,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
}
break :result .{ .load_frame = .{ .index = frame_index } };
},
- .Array => {
+ .array => {
const elem_ty = result_ty.childType(zcu);
const frame_index = try func.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
const elem_size: u32 = @intCast(elem_ty.abiSize(zcu));
@@ -8099,7 +8099,7 @@ fn resolveCallingConventionValues(
result.stack_align = .@"16";
// Return values
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = InstTracking.init(.unreach);
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
result.return_value = InstTracking.init(.none);
diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig
index 5e8f57cc0b..3ce6313083 100644
--- a/src/arch/riscv64/abi.zig
+++ b/src/arch/riscv64/abi.zig
@@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
const max_byval_size = target.ptrBitWidth() * 2;
switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
@@ -44,7 +44,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
if (bit_size > max_byval_size / 2) return .double_integer;
return .integer;
},
- .Union => {
+ .@"union" => {
const bit_size = ty.bitSize(zcu);
if (ty.containerLayout(zcu) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
@@ -55,40 +55,40 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
if (bit_size > max_byval_size / 2) return .double_integer;
return .integer;
},
- .Bool => return .integer,
- .Float => return .byval,
- .Int, .Enum, .ErrorSet => {
+ .bool => return .integer,
+ .float => return .byval,
+ .int, .@"enum", .error_set => {
const bit_size = ty.bitSize(zcu);
if (bit_size > max_byval_size) return .memory;
return .byval;
},
- .Vector => {
+ .vector => {
const bit_size = ty.bitSize(zcu);
if (bit_size > max_byval_size) return .memory;
return .integer;
},
- .Optional => {
+ .optional => {
std.debug.assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
- .Pointer => {
+ .pointer => {
std.debug.assert(!ty.isSlice(zcu));
return .byval;
},
- .ErrorUnion,
- .Frame,
- .AnyFrame,
- .NoReturn,
- .Void,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .Fn,
- .Opaque,
- .EnumLiteral,
- .Array,
+ .error_union,
+ .frame,
+ .@"anyframe",
+ .noreturn,
+ .void,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .@"fn",
+ .@"opaque",
+ .enum_literal,
+ .array,
=> unreachable,
}
}
@@ -104,11 +104,11 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
.none, .none, .none, .none,
};
switch (ty.zigTypeTag(zcu)) {
- .Bool, .Void, .NoReturn => {
+ .bool, .void, .noreturn => {
result[0] = .integer;
return result;
},
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .pointer => switch (ty.ptrSize(zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@@ -119,14 +119,14 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
return result;
},
},
- .Optional => {
+ .optional => {
if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer;
return result;
}
return memory_class;
},
- .Int, .Enum, .ErrorSet => {
+ .int, .@"enum", .error_set => {
const int_bits = ty.intInfo(zcu).bits;
if (int_bits <= 64) {
result[0] = .integer;
@@ -139,7 +139,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
}
unreachable; // support > 128 bit int arguments
},
- .Float => {
+ .float => {
const target = zcu.getTarget();
const features = target.cpu.features;
@@ -151,7 +151,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
}
unreachable; // support split float args
},
- .ErrorUnion => {
+ .error_union => {
const payload_ty = ty.errorUnionPayload(zcu);
const payload_bits = payload_ty.bitSize(zcu);
@@ -163,7 +163,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
return memory_class;
},
- .Struct, .Union => {
+ .@"struct", .@"union" => {
const layout = ty.containerLayout(zcu);
const ty_size = ty.abiSize(zcu);
@@ -176,7 +176,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
return memory_class;
},
- .Array => {
+ .array => {
const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) {
result[0] = .integer;
@@ -189,7 +189,7 @@ pub fn classifySystem(ty: Type, zcu: *Zcu) [8]SystemClass {
}
return memory_class;
},
- .Vector => {
+ .vector => {
// we pass vectors through integer registers if they are small enough to fit.
const vec_bits = ty.totalVectorBits(zcu);
if (vec_bits <= 64) {
diff --git a/src/arch/riscv64/bits.zig b/src/arch/riscv64/bits.zig
index 9899345c0a..5427a87e01 100644
--- a/src/arch/riscv64/bits.zig
+++ b/src/arch/riscv64/bits.zig
@@ -115,7 +115,7 @@ pub const Immediate = union(enum) {
}
pub fn asBits(imm: Immediate, comptime T: type) T {
- const int_info = @typeInfo(T).Int;
+ const int_info = @typeInfo(T).int;
if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T");
return switch (imm) {
.signed => |x| @bitCast(@as(std.meta.Int(.signed, int_info.bits), @intCast(x))),
@@ -189,7 +189,7 @@ pub const Register = enum(u8) {
/// The goal of this function is to return the same ID for `zero` and `x0` but two
/// seperate IDs for `x0` and `f0`. We will assume that each register set has 32 registers
/// and is repeated twice, once for the named version, once for the number version.
- pub fn id(reg: Register) std.math.IntFittingRange(0, @typeInfo(Register).Enum.fields.len) {
+ pub fn id(reg: Register) std.math.IntFittingRange(0, @typeInfo(Register).@"enum".fields.len) {
const base = switch (@intFromEnum(reg)) {
// zig fmt: off
@intFromEnum(Register.zero) ... @intFromEnum(Register.x31) => @intFromEnum(Register.zero),
@@ -252,7 +252,7 @@ pub const FrameIndex = enum(u32) {
/// Other indices are used for local variable stack slots
_,
- pub const named_count = @typeInfo(FrameIndex).Enum.fields.len;
+ pub const named_count = @typeInfo(FrameIndex).@"enum".fields.len;
pub fn isNamed(fi: FrameIndex) bool {
return @intFromEnum(fi) < named_count;
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index e6c5a6d217..2cb07fb25f 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -770,8 +770,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
switch (int_info.bits) {
@@ -1231,8 +1231,8 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
switch (operand_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO byteswap for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO byteswap for vectors", .{}),
+ .int => {
const int_info = operand_ty.intInfo(zcu);
if (int_info.bits == 8) break :result operand;
@@ -1310,8 +1310,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const fn_ty = switch (ty.zigTypeTag(zcu)) {
- .Fn => ty,
- .Pointer => ty.childType(zcu),
+ .@"fn" => ty,
+ .pointer => ty.childType(zcu),
else => unreachable,
};
@@ -1363,7 +1363,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return self.fail("TODO implement calling bitcasted functions", .{});
},
} else {
- assert(ty.zigTypeTag(zcu) == .Pointer);
+ assert(ty.zigTypeTag(zcu) == .pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(ty, .o7, mcv);
@@ -1419,13 +1419,13 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const lhs_ty = self.typeOf(bin_op.lhs);
const int_ty = switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => unreachable, // Handled by cmp_vector.
- .Enum => lhs_ty.intTagType(zcu),
- .Int => lhs_ty,
- .Bool => Type.u1,
- .Pointer => Type.usize,
- .ErrorSet => Type.u16,
- .Optional => blk: {
+ .vector => unreachable, // Handled by cmp_vector.
+ .@"enum" => lhs_ty.intTagType(zcu),
+ .int => lhs_ty,
+ .bool => Type.u1,
+ .pointer => Type.usize,
+ .error_set => Type.u16,
+ .optional => blk: {
const payload_ty = lhs_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
break :blk Type.u1;
@@ -1435,7 +1435,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{});
}
},
- .Float => return self.fail("TODO SPARCv9 cmp floats", .{}),
+ .float => return self.fail("TODO SPARCv9 cmp floats", .{}),
else => unreachable,
};
@@ -2031,8 +2031,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
switch (int_info.bits) {
@@ -2105,7 +2105,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
},
else => {
switch (operand_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
const op_reg = switch (operand) {
.register => |r| r,
else => try self.copyToTmpRegister(operand_ty, operand),
@@ -2136,8 +2136,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = dest_reg };
},
- .Vector => return self.fail("TODO bitwise not for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO bitwise not for vectors", .{}),
+ .int => {
const int_info = operand_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const op_reg = switch (operand) {
@@ -2329,8 +2329,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(extra.rhs);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
try self.spillConditionFlagsIfOccupied();
@@ -2858,9 +2858,9 @@ fn binOp(
.cmp_eq,
=> {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO binary operations on floats", .{}),
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO binary operations on floats", .{}),
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2932,8 +2932,8 @@ fn binOp(
// Truncate if necessary
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const result_reg = result.register;
@@ -2949,8 +2949,8 @@ fn binOp(
.div_trunc => {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
assert(lhs_ty.eql(rhs_ty, zcu));
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
@@ -2982,7 +2982,7 @@ fn binOp(
.ptr_add => {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Pointer => {
+ .pointer => {
const ptr_ty = lhs_ty;
const elem_ty = switch (ptr_ty.ptrSize(zcu)) {
.One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
@@ -3014,7 +3014,7 @@ fn binOp(
.bool_or,
=> {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
assert(lhs != .immediate); // should have been handled by Sema
assert(rhs != .immediate); // should have been handled by Sema
@@ -3044,8 +3044,8 @@ fn binOp(
// Truncate if necessary
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
// 32 and 64 bit operands doesn't need truncating
@@ -3066,8 +3066,8 @@ fn binOp(
.shr_exact,
=> {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO binary operations on vectors", .{}),
- .Int => {
+ .vector => return self.fail("TODO binary operations on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate;
@@ -4329,9 +4329,9 @@ fn minMax(
const zcu = pt.zcu;
assert(lhs_ty.eql(rhs_ty, zcu));
switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => return self.fail("TODO min/max on floats", .{}),
- .Vector => return self.fail("TODO min/max on vectors", .{}),
- .Int => {
+ .float => return self.fail("TODO min/max on floats", .{}),
+ .vector => return self.fail("TODO min/max on vectors", .{}),
+ .int => {
const int_info = lhs_ty.intInfo(zcu);
if (int_info.bits <= 64) {
// TODO skip register setting when one of the operands
@@ -4515,7 +4515,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
result.stack_byte_count = next_stack_offset;
result.stack_align = .@"16";
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBits(zcu)) {
result.return_value = .{ .none = {} };
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 7627d3b277..a3a51c7233 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1004,19 +1004,19 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
64 => .f64,
80, 128 => .i32,
else => unreachable,
},
- .Int, .Enum => switch (ty.intInfo(zcu).bits) {
+ .int, .@"enum" => switch (ty.intInfo(zcu).bits) {
0...32 => .i32,
33...64 => .i64,
else => .i32,
},
- .Struct => blk: {
+ .@"struct" => blk: {
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
break :blk typeToValtype(backing_int_ty, pt, target);
@@ -1024,11 +1024,11 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype {
break :blk .i32;
}
},
- .Vector => switch (determineSimdStoreStrategy(ty, zcu, target)) {
+ .vector => switch (determineSimdStoreStrategy(ty, zcu, target)) {
.direct => .v128,
.unrolled => .i32,
},
- .Union => switch (ty.containerLayout(zcu)) {
+ .@"union" => switch (ty.containerLayout(zcu)) {
.@"packed" => blk: {
const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(zcu)))) catch @panic("out of memory");
break :blk typeToValtype(int_ty, pt, target);
@@ -1430,7 +1430,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
const ty_classes = abi.classifyType(ty, zcu);
assert(ty_classes[0] != .none);
switch (ty.zigTypeTag(zcu)) {
- .Struct, .Union => {
+ .@"struct", .@"union" => {
if (ty_classes[0] == .indirect) {
return func.lowerToStack(value);
}
@@ -1445,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
else => try func.emitWValue(value),
}
},
- .Int, .Float => {
+ .int, .float => {
if (ty_classes[1] == .none) {
return func.lowerToStack(value);
}
@@ -1719,27 +1719,27 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Type,
- .ComptimeInt,
- .ComptimeFloat,
- .EnumLiteral,
- .Undefined,
- .Null,
- .Opaque,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .enum_literal,
+ .undefined,
+ .null,
+ .@"opaque",
=> unreachable,
- .NoReturn,
- .Void,
- .Bool,
- .ErrorSet,
- .Fn,
- .AnyFrame,
+ .noreturn,
+ .void,
+ .bool,
+ .error_set,
+ .@"fn",
+ .@"anyframe",
=> return false,
- .Array,
- .Frame,
+ .array,
+ .frame,
=> return ty.hasRuntimeBitsIgnoreComptime(zcu),
- .Union => {
+ .@"union" => {
if (zcu.typeToUnion(ty)) |union_obj| {
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
return ty.abiSize(zcu) > 8;
@@ -1747,30 +1747,30 @@ fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool {
}
return ty.hasRuntimeBitsIgnoreComptime(zcu);
},
- .Struct => {
+ .@"struct" => {
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target);
}
return ty.hasRuntimeBitsIgnoreComptime(zcu);
},
- .Vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
- .Int => return ty.intInfo(zcu).bits > 64,
- .Enum => return ty.intInfo(zcu).bits > 64,
- .Float => return ty.floatBits(target) > 64,
- .ErrorUnion => {
+ .vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
+ .int => return ty.intInfo(zcu).bits > 64,
+ .@"enum" => return ty.intInfo(zcu).bits > 64,
+ .float => return ty.floatBits(target) > 64,
+ .error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
}
return true;
},
- .Optional => {
+ .optional => {
if (ty.isPtrLikeOptional(zcu)) return false;
const pl_type = ty.optionalChild(zcu);
- if (pl_type.zigTypeTag(zcu) == .ErrorSet) return false;
+ if (pl_type.zigTypeTag(zcu) == .error_set) return false;
return pl_type.hasRuntimeBitsIgnoreComptime(zcu);
},
- .Pointer => {
+ .pointer => {
// Slices act like struct and will be passed by reference
if (ty.isSlice(zcu)) return true;
return false;
@@ -1788,7 +1788,7 @@ const SimdStoreStrategy = enum {
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag(zcu) == .Vector);
+ std.debug.assert(ty.zigTypeTag(zcu) == .vector);
if (ty.bitSize(zcu) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
const features = target.cpu.features;
@@ -2106,7 +2106,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
switch (ret_ty.zigTypeTag(zcu)) {
// Aggregate types can be lowered as a singular value
- .Struct, .Union => {
+ .@"struct", .@"union" => {
const scalar_type = abi.scalarType(ret_ty, zcu);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
@@ -2189,8 +2189,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const fn_ty = switch (ty.zigTypeTag(zcu)) {
- .Fn => ty,
- .Pointer => ty.childType(zcu),
+ .@"fn" => ty,
+ .pointer => ty.childType(zcu),
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType(zcu);
@@ -2261,7 +2261,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag(zcu) == .Pointer);
+ std.debug.assert(ty.zigTypeTag(zcu) == .pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
@@ -2281,7 +2281,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .Struct or ret_ty.zigTypeTag(zcu) == .Union) {
+ } else if (zcu.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
const scalar_type = abi.scalarType(ret_ty, zcu);
@@ -2371,7 +2371,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const zcu = pt.zcu;
const abi_size = ty.abiSize(zcu);
switch (ty.zigTypeTag(zcu)) {
- .ErrorUnion => {
+ .error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.store(lhs, rhs, Type.anyerror, 0);
@@ -2380,7 +2380,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Optional => {
+ .optional => {
if (ty.isPtrLikeOptional(zcu)) {
return func.store(lhs, rhs, Type.usize, 0);
}
@@ -2388,18 +2388,18 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return func.store(lhs, rhs, Type.u8, 0);
}
- if (pl_ty.zigTypeTag(zcu) == .ErrorSet) {
+ if (pl_ty.zigTypeTag(zcu) == .error_set) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, pt, func.target.*)) {
+ .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target.*)) {
const len = @as(u32, @intCast(abi_size));
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
+ .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target.*)) {
.unrolled => {
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2418,7 +2418,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
},
- .Pointer => {
+ .pointer => {
if (ty.isSlice(zcu)) {
// store pointer first
// lower it to the stack so we do not have to store rhs into a local first
@@ -2433,7 +2433,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return;
}
},
- .Int, .Enum, .Float => if (abi_size > 8 and abi_size <= 16) {
+ .int, .@"enum", .float => if (abi_size > 8 and abi_size <= 16) {
try func.emitWValue(lhs);
const lsb = try func.load(rhs, Type.u64, 0);
try func.store(.stack, lsb, Type.u64, 0 + lhs.offset());
@@ -2521,7 +2521,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
// load local's value from memory by its stack position
try func.emitWValue(operand);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
// stores as := opcode, offset, alignment (opcode::memarg)
@@ -2571,7 +2571,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When we have an argument that's passed using more than a single parameter,
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
- if (arg_ty.zigTypeTag(zcu) != .Int and arg_ty.zigTypeTag(zcu) != .Float) {
+ if (arg_ty.zigTypeTag(zcu) != .int and arg_ty.zigTypeTag(zcu) != .float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(pt)},
@@ -2647,7 +2647,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
if (isByRef(ty, pt, func.target.*)) {
- if (ty.zigTypeTag(zcu) == .Int) {
+ if (ty.zigTypeTag(zcu) == .int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
@@ -2822,7 +2822,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const scalar_ty = ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (ty.zigTypeTag(zcu) == .Vector) {
+ .int => if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
} else {
const int_bits = ty.intInfo(zcu).bits;
@@ -2887,7 +2887,7 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
},
- .Float => {
+ .float => {
const result = try func.floatOp(.fabs, ty, &.{operand});
return func.finishAir(inst, result, &.{ty_op.operand});
},
@@ -2907,7 +2907,7 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
const pt = func.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -3021,7 +3021,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs_ty = func.typeOf(bin_op.lhs);
const rhs_ty = func.typeOf(bin_op.rhs);
- if (lhs_ty.zigTypeTag(zcu) == .Vector or rhs_ty.zigTypeTag(zcu) == .Vector) {
+ if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
@@ -3139,7 +3139,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
- .Pointer => off: {
+ .pointer => off: {
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
@@ -3147,11 +3147,11 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
else => unreachable,
};
},
- .Struct => switch (base_ty.containerLayout(zcu)) {
+ .@"struct" => switch (base_ty.containerLayout(zcu)) {
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
- .Union => switch (base_ty.containerLayout(zcu)) {
+ .@"union" => switch (base_ty.containerLayout(zcu)) {
.auto => off: {
// Keep in sync with the `un` case of `generateSymbol`.
const layout = base_ty.unionGetLayout(zcu);
@@ -3184,7 +3184,7 @@ fn lowerUavRef(
const zcu = pt.zcu;
const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav.val));
- const is_fn_body = ty.zigTypeTag(zcu) == .Fn;
+ const is_fn_body = ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return .{ .imm32 = 0xaaaaaaaa };
}
@@ -3404,34 +3404,34 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Bool, .ErrorSet => return .{ .imm32 = 0xaaaaaaaa },
- .Int, .Enum => switch (ty.intInfo(zcu).bits) {
+ .bool, .error_set => return .{ .imm32 = 0xaaaaaaaa },
+ .int, .@"enum" => switch (ty.intInfo(zcu).bits) {
0...32 => return .{ .imm32 = 0xaaaaaaaa },
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
- .Float => switch (ty.floatBits(func.target.*)) {
+ .float => switch (ty.floatBits(func.target.*)) {
16 => return .{ .imm32 = 0xaaaaaaaa },
32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
else => unreachable,
},
- .Pointer => switch (func.arch()) {
+ .pointer => switch (func.arch()) {
.wasm32 => return .{ .imm32 = 0xaaaaaaaa },
.wasm64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
- .Optional => {
+ .optional => {
const pl_ty = ty.optionalChild(zcu);
if (ty.optionalReprIsPayload(zcu)) {
return func.emitUndefined(pl_ty);
}
return .{ .imm32 = 0xaaaaaaaa };
},
- .ErrorUnion => {
+ .error_union => {
return .{ .imm32 = 0xaaaaaaaa };
},
- .Struct => {
+ .@"struct" => {
const packed_struct = zcu.typeToPackedStruct(ty).?;
return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
},
@@ -3604,7 +3604,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
assert(!(lhs != .stack and rhs == .stack));
const pt = func.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Optional and !ty.optionalReprIsPayload(zcu)) {
+ if (ty.zigTypeTag(zcu) == .optional and !ty.optionalReprIsPayload(zcu)) {
const payload_ty = ty.optionalChild(zcu);
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// When we hit this case, we must check the value of optionals
@@ -3620,7 +3620,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
+ if (ty.zigTypeTag(zcu) != .int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
break :blk ty.intInfo(zcu).signedness;
@@ -3743,7 +3743,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = pt.zcu;
const result = result: {
- if (operand_ty.zigTypeTag(zcu) == .Bool) {
+ if (operand_ty.zigTypeTag(zcu) == .bool) {
try func.emitWValue(operand);
try func.addTag(.i32_eqz);
const not_tmp = try func.allocLocal(operand_ty);
@@ -3922,14 +3922,14 @@ fn structFieldPtr(
const offset = switch (struct_ty.containerLayout(zcu)) {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
- .Struct => offset: {
+ .@"struct" => offset: {
if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
break :offset @as(u32, 0);
}
const struct_type = zcu.typeToStruct(struct_ty).?;
break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
- .Union => 0,
+ .@"union" => 0,
else => unreachable,
},
else => struct_ty.structFieldOffset(index, zcu),
@@ -3961,7 +3961,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result: WValue = switch (struct_ty.containerLayout(zcu)) {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
- .Struct => result: {
+ .@"struct" => result: {
const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
@@ -3981,7 +3981,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
try func.binOp(operand, const_wvalue, backing_ty, .shr);
- if (field_ty.zigTypeTag(zcu) == .Float) {
+ if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try func.bitcast(field_ty, int_type, truncated);
@@ -3995,7 +3995,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
break :result try func.trunc(shifted_value, field_ty, backing_ty);
},
- .Union => result: {
+ .@"union" => result: {
if (isByRef(struct_ty, pt, func.target.*)) {
if (!isByRef(field_ty, pt, func.target.*)) {
break :result try func.load(operand, field_ty, 0);
@@ -4007,7 +4007,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu))));
- if (field_ty.zigTypeTag(zcu) == .Float) {
+ if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try func.bitcast(field_ty, int_type, truncated);
@@ -4136,7 +4136,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for errors that are not present in any branch. This is fine as this default
// case will never be hit for those cases but we do save runtime cost and size
// by using a jump table for this instead of if-else chains.
- break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .ErrorSet) switch_br.cases_len else unreachable;
+ break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .error_set) switch_br.cases_len else unreachable;
};
func.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
@@ -4147,7 +4147,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag(zcu) != .Int) break :blk .unsigned;
+ if (target_ty.zigTypeTag(zcu) != .int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
break :blk target_ty.intInfo(zcu).signedness;
@@ -4364,7 +4364,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand_ty = func.typeOf(ty_op.operand);
const pt = func.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector or operand_ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector or operand_ty.zigTypeTag(zcu) == .vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) {
@@ -4682,7 +4682,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
- if (wanted_ty.zigTypeTag(zcu) == .Vector or op_ty.zigTypeTag(zcu) == .Vector) {
+ if (wanted_ty.zigTypeTag(zcu) == .vector or op_ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: trunc for vectors", .{});
}
@@ -4990,7 +4990,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag(zcu) == .Vector);
+ std.debug.assert(array_ty.zigTypeTag(zcu) == .vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
@@ -5281,7 +5281,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(zcu)) {
- .Array => {
+ .array => {
const result = try func.allocStack(result_ty);
const elem_ty = result_ty.childType(zcu);
const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
@@ -5320,7 +5320,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
break :result_value result;
},
- .Struct => switch (result_ty.containerLayout(zcu)) {
+ .@"struct" => switch (result_ty.containerLayout(zcu)) {
.@"packed" => {
if (isByRef(result_ty, pt, func.target.*)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
@@ -5386,7 +5386,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result_value result;
},
},
- .Vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
+ .vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
else => unreachable,
}
};
@@ -5458,7 +5458,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
} else {
const operand = try func.resolveInst(extra.init);
const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu))));
- if (field_ty.zigTypeTag(zcu) == .Float) {
+ if (field_ty.zigTypeTag(zcu) == .float) {
const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
const bitcasted = try func.bitcast(field_ty, int_type, operand);
break :result try func.trunc(bitcasted, int_type, union_int_type);
@@ -5810,7 +5810,7 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- if (op_ty.zigTypeTag(zcu) == .Vector) {
+ if (op_ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement @popCount for vectors", .{});
}
@@ -5862,7 +5862,7 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement @bitReverse for vectors", .{});
}
@@ -6028,7 +6028,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const pt = func.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -6075,7 +6075,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOf(extra.lhs);
const rhs_ty = func.typeOf(extra.rhs);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -6121,7 +6121,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = func.pt;
const zcu = pt.zcu;
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -6251,7 +6251,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = func.typeOfIndex(inst);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
@@ -6262,7 +6262,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.zigTypeTag(zcu) == .Float) {
+ if (ty.zigTypeTag(zcu) == .float) {
var fn_name_buf: [64]u8 = undefined;
const float_bits = ty.floatBits(func.target.*);
const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
@@ -6292,7 +6292,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
const ty = func.typeOfIndex(inst);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: `@mulAdd` for vectors", .{});
}
@@ -6326,7 +6326,7 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: `@clz` for vectors", .{});
}
@@ -6378,7 +6378,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOf(ty_op.operand);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: `@ctz` for vectors", .{});
}
@@ -6571,7 +6571,7 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
return func.fail("TODO: @byteSwap for vectors", .{});
}
const int_info = ty.intInfo(zcu);
diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig
index b6e66ad85e..5a1d2fdb0b 100644
--- a/src/arch/wasm/abi.zig
+++ b/src/arch/wasm/abi.zig
@@ -27,7 +27,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
const target = zcu.getTarget();
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return none;
switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const struct_type = zcu.typeToStruct(ty).?;
if (struct_type.layout == .@"packed") {
if (ty.bitSize(zcu) <= 64) return direct;
@@ -45,30 +45,30 @@ pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
}
return classifyType(field_ty, zcu);
},
- .Int, .Enum, .ErrorSet => {
+ .int, .@"enum", .error_set => {
const int_bits = ty.intInfo(zcu).bits;
if (int_bits <= 64) return direct;
if (int_bits <= 128) return .{ .direct, .direct };
return memory;
},
- .Float => {
+ .float => {
const float_bits = ty.floatBits(target);
if (float_bits <= 64) return direct;
if (float_bits <= 128) return .{ .direct, .direct };
return memory;
},
- .Bool => return direct,
- .Vector => return direct,
- .Array => return memory,
- .Optional => {
+ .bool => return direct,
+ .vector => return direct,
+ .array => return memory,
+ .optional => {
assert(ty.isPtrLikeOptional(zcu));
return direct;
},
- .Pointer => {
+ .pointer => {
assert(!ty.isSlice(zcu));
return direct;
},
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
if (ty.bitSize(zcu) <= 64) return direct;
@@ -80,19 +80,19 @@ pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, zcu);
},
- .ErrorUnion,
- .Frame,
- .AnyFrame,
- .NoReturn,
- .Void,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .Fn,
- .Opaque,
- .EnumLiteral,
+ .error_union,
+ .frame,
+ .@"anyframe",
+ .noreturn,
+ .void,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .@"fn",
+ .@"opaque",
+ .enum_literal,
=> unreachable,
}
}
@@ -103,7 +103,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) [2]Class {
pub fn scalarType(ty: Type, zcu: *Zcu) Type {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
if (zcu.typeToPackedStruct(ty)) |packed_struct| {
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu);
} else {
@@ -111,7 +111,7 @@ pub fn scalarType(ty: Type, zcu: *Zcu) Type {
return scalarType(ty.fieldType(0, zcu), zcu);
}
},
- .Union => {
+ .@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
if (union_obj.flagsUnordered(ip).layout != .@"packed") {
const layout = Type.getUnionLayout(union_obj, zcu);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 4385c33286..adde0b461f 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2419,7 +2419,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu)) {
- .Enum => {
+ .@"enum" => {
const enum_ty = Type.fromInterned(lazy_sym.ty);
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)});
@@ -2704,13 +2704,13 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
if (reg_ok) need_mem: {
if (abi_size <= @as(u32, switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16,
80 => break :need_mem,
else => unreachable,
@@ -2733,11 +2733,11 @@ fn regClassForType(self: *Self, ty: Type) RegisterManager.RegisterBitSet {
const pt = self.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
80 => abi.RegisterClass.x87,
else => abi.RegisterClass.sse,
},
- .Vector => switch (ty.childType(zcu).toIntern()) {
+ .vector => switch (ty.childType(zcu).toIntern()) {
.bool_type, .u1_type => abi.RegisterClass.gp,
else => if (ty.isAbiInt(zcu) and ty.intInfo(zcu).bits == 1)
abi.RegisterClass.gp
@@ -2801,14 +2801,14 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt
) |inst, *tracking| tracking.resurrect(inst, state.scope_generation);
for (deaths) |death| try self.processDeath(death);
- const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).Array.len]RegisterLock;
+ const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock;
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
if (opts.update_tracking)
{} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity(
stack.get(),
- @typeInfo(ExpectedContents).Array.len,
+ @typeInfo(ExpectedContents).array.len,
);
defer if (!opts.update_tracking) {
for (reg_locks.items) |lock| self.register_manager.unlockReg(lock);
@@ -3475,8 +3475,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
break :dst dst_mcv;
};
- if (dst_ty.zigTypeTag(zcu) == .Vector) {
- assert(src_ty.zigTypeTag(zcu) == .Vector and dst_ty.vectorLen(zcu) == src_ty.vectorLen(zcu));
+ if (dst_ty.zigTypeTag(zcu) == .vector) {
+ assert(src_ty.zigTypeTag(zcu) == .vector and dst_ty.vectorLen(zcu) == src_ty.vectorLen(zcu));
const dst_elem_ty = dst_ty.childType(zcu);
const dst_elem_abi_size: u32 = @intCast(dst_elem_ty.abiSize(zcu));
const src_elem_ty = src_ty.childType(zcu);
@@ -3714,7 +3714,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const dst_ty = self.typeOfIndex(inst);
switch (dst_ty.zigTypeTag(zcu)) {
- .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
+ .float, .vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
else => {},
}
const dst_abi_size: u32 = @intCast(dst_ty.abiSize(zcu));
@@ -3941,7 +3941,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airAddSat for {}",
.{ty.fmt(pt)},
);
@@ -4042,7 +4042,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ty = self.typeOf(bin_op.lhs);
- if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airSubSat for {}",
.{ty.fmt(pt)},
);
@@ -4216,7 +4216,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- if (ty.zigTypeTag(zcu) == .Vector or ty.abiSize(zcu) > 8) return self.fail(
+ if (ty.zigTypeTag(zcu) == .vector or ty.abiSize(zcu) > 8) return self.fail(
"TODO implement airMulSat for {}",
.{ty.fmt(pt)},
);
@@ -4287,8 +4287,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const ty = self.typeOf(bin_op.lhs);
switch (ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
+ .int => {
try self.spillEflagsIfOccupied();
try self.spillRegisters(&.{ .rcx, .rdi, .rsi });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rcx, .rdi, .rsi });
@@ -4354,8 +4354,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
switch (lhs_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
- .Int => {
+ .vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
+ .int => {
try self.spillEflagsIfOccupied();
try self.spillRegisters(&.{ .rcx, .rdi, .rsi });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rcx, .rdi, .rsi });
@@ -4510,8 +4510,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tuple_ty = self.typeOfIndex(inst);
const dst_ty = self.typeOf(bin_op.lhs);
const result: MCValue = switch (dst_ty.zigTypeTag(zcu)) {
- .Vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
- .Int => result: {
+ .vector => return self.fail("TODO implement airMulWithOverflow for {}", .{dst_ty.fmt(pt)}),
+ .int => result: {
const dst_info = dst_ty.intInfo(zcu);
if (dst_info.bits > 128 and dst_info.signedness == .unsigned) {
const slow_inc = self.hasFeature(.slow_incdec);
@@ -5005,7 +5005,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.typeOf(bin_op.rhs);
const result: MCValue = result: {
switch (lhs_ty.zigTypeTag(zcu)) {
- .Int => {
+ .int => {
try self.spillRegisters(&.{.rcx});
try self.register_manager.getKnownReg(.rcx, null);
const lhs_mcv = try self.resolveInst(bin_op.lhs);
@@ -5047,8 +5047,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
break :result dst_mcv;
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .int => if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
else => null,
16 => switch (lhs_ty.vectorLen(zcu)) {
else => null,
@@ -6212,7 +6212,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airClz for {}", .{
+ if (src_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement airClz for {}", .{
src_ty.fmt(pt),
});
@@ -6409,7 +6409,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement airCtz for {}", .{
+ if (src_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement airCtz for {}", .{
src_ty.fmt(pt),
});
@@ -6571,7 +6571,7 @@ fn airPopCount(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const src_abi_size: u32 = @intCast(src_ty.abiSize(zcu));
- if (src_ty.zigTypeTag(zcu) == .Vector or src_abi_size > 16)
+ if (src_ty.zigTypeTag(zcu) == .vector or src_abi_size > 16)
return self.fail("TODO implement airPopCount for {}", .{src_ty.fmt(pt)});
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -6724,7 +6724,7 @@ fn genByteSwap(
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const has_movbe = self.hasFeature(.movbe);
- if (src_ty.zigTypeTag(zcu) == .Vector) return self.fail(
+ if (src_ty.zigTypeTag(zcu) == .vector) return self.fail(
"TODO implement genByteSwap for {}",
.{src_ty.fmt(pt)},
);
@@ -7036,7 +7036,7 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
const result = result: {
const scalar_bits = ty.scalarType(zcu).floatBits(self.target.*);
if (scalar_bits == 80) {
- if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement floatSign for {}", .{
+ if (ty.zigTypeTag(zcu) != .float) return self.fail("TODO implement floatSign for {}", .{
ty.fmt(pt),
});
@@ -7209,14 +7209,14 @@ fn getRoundTag(self: *Self, ty: Type) ?Mir.Inst.FixedTag {
const pt = self.pt;
const zcu = pt.zcu;
return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
@@ -7243,7 +7243,7 @@ fn genRoundLibcall(self: *Self, ty: Type, src_mcv: MCValue, mode: RoundMode) !MC
const zcu = pt.zcu;
if (self.getRoundTag(ty)) |_| return .none;
- if (ty.zigTypeTag(zcu) != .Float)
+ if (ty.zigTypeTag(zcu) != .float)
return self.fail("TODO implement genRound for {}", .{ty.fmt(pt)});
var callee_buf: ["__trunc?".len]u8 = undefined;
@@ -7314,7 +7314,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
else => null,
- .Int => switch (ty.abiSize(zcu)) {
+ .int => switch (ty.abiSize(zcu)) {
0 => unreachable,
1...8 => {
try self.spillEflagsIfOccupied();
@@ -7442,10 +7442,10 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
},
},
- .Float => return self.floatSign(inst, ty_op.operand, ty),
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => return self.floatSign(inst, ty_op.operand, ty),
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
- .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
+ .int => switch (ty.childType(zcu).intInfo(zcu).bits) {
else => null,
8 => switch (ty.vectorLen(zcu)) {
else => null,
@@ -7478,7 +7478,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
5...8 => if (self.hasFeature(.avx2)) .{ .vp_d, .abs } else null,
},
},
- .Float => return self.floatSign(inst, ty_op.operand, ty),
+ .float => return self.floatSign(inst, ty_op.operand, ty),
},
}) orelse return self.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
@@ -7515,7 +7515,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (ty.zigTypeTag(zcu)) {
- .Float => {
+ .float => {
const float_bits = ty.floatBits(self.target.*);
if (switch (float_bits) {
16 => !self.hasFeature(.f16c),
@@ -7547,7 +7547,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
16 => {
assert(self.hasFeature(.f16c));
const mat_src_reg = if (src_mcv.isRegister())
@@ -7568,8 +7568,8 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
64 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(zcu)) {
1 => {
try self.asmRegisterRegister(
@@ -8496,7 +8496,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
const pt = self.pt;
const zcu = pt.zcu;
const src_ty = self.typeOf(src_air);
- if (src_ty.zigTypeTag(zcu) == .Vector)
+ if (src_ty.zigTypeTag(zcu) == .vector)
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(pt)});
var src_mcv = try self.resolveInst(src_air);
@@ -9291,7 +9291,7 @@ fn genShiftBinOp(
) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
- if (lhs_ty.zigTypeTag(zcu) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{
+ if (lhs_ty.zigTypeTag(zcu) == .vector) return self.fail("TODO implement genShiftBinOp for {}", .{
lhs_ty.fmt(pt),
});
@@ -9350,7 +9350,7 @@ fn genMulDivBinOp(
) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
- if (dst_ty.zigTypeTag(zcu) == .Vector or dst_ty.zigTypeTag(zcu) == .Float) return self.fail(
+ if (dst_ty.zigTypeTag(zcu) == .vector or dst_ty.zigTypeTag(zcu) == .float) return self.fail(
"TODO implement genMulDivBinOp for {s} from {} to {}",
.{ @tagName(tag), src_ty.fmt(pt), dst_ty.fmt(pt) },
);
@@ -9938,8 +9938,8 @@ fn genBinOp(
const sse_op = switch (lhs_ty.zigTypeTag(zcu)) {
else => false,
- .Float => true,
- .Vector => switch (lhs_ty.childType(zcu).toIntern()) {
+ .float => true,
+ .vector => switch (lhs_ty.childType(zcu).toIntern()) {
.bool_type, .u1_type => false,
else => true,
},
@@ -9966,12 +9966,12 @@ fn genBinOp(
const ordered_air: [2]Air.Inst.Ref = if (lhs_ty.isVector(zcu) and
switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Bool => false,
- .Int => switch (air_tag) {
+ .bool => false,
+ .int => switch (air_tag) {
.cmp_lt, .cmp_gte => true,
else => false,
},
- .Float => switch (air_tag) {
+ .float => switch (air_tag) {
.cmp_gte, .cmp_gt => true,
else => false,
},
@@ -10337,7 +10337,7 @@ fn genBinOp(
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
else => unreachable,
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
16 => {
assert(self.hasFeature(.f16c));
const tmp_reg =
@@ -10430,9 +10430,9 @@ fn genBinOp(
80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
- .Int => switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
+ .int => switch (lhs_ty.childType(zcu).intInfo(zcu).bits) {
8 => switch (lhs_ty.vectorLen(zcu)) {
1...16 => switch (air_tag) {
.add,
@@ -10779,7 +10779,7 @@ fn genBinOp(
},
else => null,
},
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
16 => tag: {
assert(self.hasFeature(.f16c));
switch (lhs_ty.vectorLen(zcu)) {
@@ -11101,7 +11101,7 @@ fn genBinOp(
lhs_reg,
try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
- .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
+ .vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
) else try self.asmRegisterRegisterRegister(
mir_tag,
@@ -11119,7 +11119,7 @@ fn genBinOp(
dst_reg,
try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
- .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
+ .vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
) else try self.asmRegisterRegister(
mir_tag,
@@ -11147,7 +11147,7 @@ fn genBinOp(
lhs_reg,
try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
- .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
+ .vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
imm,
) else try self.asmRegisterRegisterRegisterImmediate(
@@ -11167,7 +11167,7 @@ fn genBinOp(
dst_reg,
try src_mcv.mem(self, switch (lhs_ty.zigTypeTag(zcu)) {
else => Memory.Size.fromSize(abi_size),
- .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
+ .vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
imm,
) else try self.asmRegisterRegisterImmediate(
@@ -11199,14 +11199,14 @@ fn genBinOp(
try self.asmRegisterRegisterRegisterImmediate(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .cmp },
64 => .{ .v_sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ .v_ss, .cmp },
2...8 => .{ .v_ps, .cmp },
@@ -11233,14 +11233,14 @@ fn genBinOp(
);
try self.asmRegisterRegisterRegisterRegister(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ps, .blendv },
64 => .{ .v_pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...8 => .{ .v_ps, .blendv },
else => null,
@@ -11267,14 +11267,14 @@ fn genBinOp(
const has_blend = self.hasFeature(.sse4_1);
try self.asmRegisterRegisterImmediate(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ss, .cmp },
64 => .{ ._sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ ._ss, .cmp },
2...4 => .{ ._ps, .cmp },
@@ -11300,14 +11300,14 @@ fn genBinOp(
);
if (has_blend) try self.asmRegisterRegisterRegister(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .blendv },
64 => .{ ._pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => .{ ._ps, .blendv },
else => null,
@@ -11330,14 +11330,14 @@ fn genBinOp(
mask_reg,
) else {
const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target.*)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => ._ps,
else => null,
@@ -11362,7 +11362,7 @@ fn genBinOp(
},
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => {
switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .Int => switch (air_tag) {
+ .int => switch (air_tag) {
.cmp_lt,
.cmp_eq,
.cmp_gt,
@@ -11396,7 +11396,7 @@ fn genBinOp(
},
else => unreachable,
},
- .Float => {},
+ .float => {},
else => unreachable,
}
@@ -12268,8 +12268,8 @@ fn genCall(self: *Self, info: union(enum) {
.air => |callee| fn_info: {
const callee_ty = self.typeOf(callee);
break :fn_info switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => callee_ty,
- .Pointer => callee_ty.childType(zcu),
+ .@"fn" => callee_ty,
+ .pointer => callee_ty.childType(zcu),
else => unreachable,
};
},
@@ -12544,7 +12544,7 @@ fn genCall(self: *Self, info: union(enum) {
else => return self.fail("TODO implement calling bitcasted functions", .{}),
}
} else {
- assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer);
+ assert(self.typeOf(callee).zigTypeTag(zcu) == .pointer);
try self.genSetReg(.rax, Type.usize, .{ .air_ref = callee }, .{});
try self.asmRegister(.{ ._, .call }, .rax);
},
@@ -12650,7 +12650,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
defer for (rhs_locks) |rhs_lock| if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
switch (ty.zigTypeTag(zcu)) {
- .Float => {
+ .float => {
const float_bits = ty.floatBits(self.target.*);
if (switch (float_bits) {
16 => !self.hasFeature(.f16c),
@@ -12685,7 +12685,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
};
}
},
- .Optional => if (!ty.optionalReprIsPayload(zcu)) {
+ .optional => if (!ty.optionalReprIsPayload(zcu)) {
const opt_ty = ty;
const opt_abi_size: u31 = @intCast(opt_ty.abiSize(zcu));
ty = opt_ty.optionalChild(zcu);
@@ -12982,7 +12982,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
},
);
},
- .Float => {
+ .float => {
const flipped = switch (op) {
.lt, .lte => true,
.eq, .gte, .gt, .neq => false,
@@ -14659,7 +14659,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
else => {},
}
},
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
.insert = .{ .vp_w, .insr },
.extract = .{ .vp_w, .extr },
@@ -14680,15 +14680,15 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
else => {},
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Bool => switch (ty.vectorLen(zcu)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .bool => switch (ty.vectorLen(zcu)) {
33...64 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_q, .mov }
else
.{ ._q, .mov } },
else => {},
},
- .Int => switch (ty.childType(zcu).intInfo(zcu).bits) {
+ .int => switch (ty.childType(zcu).intInfo(zcu).bits) {
1...8 => switch (ty.vectorLen(zcu)) {
1...16 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
@@ -14754,7 +14754,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
},
else => {},
},
- .Pointer, .Optional => if (ty.childType(zcu).isPtrAtRuntime(zcu))
+ .pointer, .optional => if (ty.childType(zcu).isPtrAtRuntime(zcu))
switch (ty.vectorLen(zcu)) {
1...2 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
@@ -14768,7 +14768,7 @@ fn moveStrategy(self: *Self, ty: Type, class: Register.Class, aligned: bool) !Mo
}
else
unreachable,
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
16 => switch (ty.vectorLen(zcu)) {
1...8 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
@@ -15072,7 +15072,7 @@ fn genSetReg(
17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null,
else => null,
},
- .Float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
16, 128 => switch (abi_size) {
2...16 => if (self.hasFeature(.avx))
.{ .v_, .movdqa }
@@ -15368,7 +15368,7 @@ fn genSetMem(
}
},
.register_overflow => |ro| switch (ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
try self.genSetMem(
base,
disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))),
@@ -15384,7 +15384,7 @@ fn genSetMem(
opts,
);
},
- .Optional => {
+ .optional => {
assert(!ty.optionalReprIsPayload(zcu));
const child_ty = ty.optionalChild(zcu);
try self.genSetMem(base, disp, child_ty, .{ .register = ro.reg }, opts);
@@ -15768,7 +15768,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(dst_lock);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(zcu)) {
- .Float => switch (dst_ty.floatBits(self.target.*)) {
+ .float => switch (dst_ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
16, 80, 128 => null,
@@ -16702,7 +16702,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (scalar_ty.zigTypeTag(zcu)) {
else => {},
- .Bool => {
+ .bool => {
const regs =
try self.register_manager.allocRegs(2, .{ inst, null }, abi.RegisterClass.gp);
const reg_locks = self.register_manager.lockRegsAssumeUnused(2, regs);
@@ -16742,7 +16742,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
);
break :result .{ .register = regs[0] };
},
- .Int => if (self.hasFeature(.avx2)) avx2: {
+ .int => if (self.hasFeature(.avx2)) avx2: {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (scalar_ty.intInfo(zcu).bits) {
else => null,
1...8 => switch (vector_len) {
@@ -16835,7 +16835,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
);
break :result .{ .register = dst_reg };
},
- .Float => switch (scalar_ty.floatBits(self.target.*)) {
+ .float => switch (scalar_ty.floatBits(self.target.*)) {
32 => switch (vector_len) {
1 => {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -17264,7 +17264,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) {
else => null,
- .Int => switch (abi_size) {
+ .int => switch (abi_size) {
0 => unreachable,
1...16 => if (has_avx)
.{ .vp_b, .blendv }
@@ -17278,7 +17278,7 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
null,
else => null,
},
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
else => unreachable,
16, 80, 128 => null,
32 => switch (vec_len) {
@@ -17334,8 +17334,8 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void {
) else {
const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) {
else => null,
- .Int => .p_,
- .Float => switch (elem_ty.floatBits(self.target.*)) {
+ .int => .p_,
+ .float => switch (elem_ty.floatBits(self.target.*)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => null,
@@ -18132,8 +18132,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
if (has_avx) try self.asmRegisterRegisterRegister(
.{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
- .Int => .vp_,
- .Float => switch (elem_ty.floatBits(self.target.*)) {
+ .int => .vp_,
+ .float => switch (elem_ty.floatBits(self.target.*)) {
32 => .v_ps,
64 => .v_pd,
16, 80, 128 => break :result null,
@@ -18146,8 +18146,8 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
) else try self.asmRegisterRegister(
.{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
- .Int => .p_,
- .Float => switch (elem_ty.floatBits(self.target.*)) {
+ .int => .p_,
+ .float => switch (elem_ty.floatBits(self.target.*)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => break :result null,
@@ -18235,7 +18235,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = result: {
switch (result_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, zcu));
if (result_ty.containerLayout(zcu) == .@"packed") {
const struct_obj = zcu.typeToStruct(result_ty).?;
@@ -18342,7 +18342,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
break :result .{ .load_frame = .{ .index = frame_index } };
},
- .Array, .Vector => {
+ .array, .vector => {
const elem_ty = result_ty.childType(zcu);
if (result_ty.isVector(zcu) and elem_ty.toIntern() == .bool_type) {
const result_size: u32 = @intCast(result_ty.abiSize(zcu));
@@ -18483,7 +18483,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
32, 64 => !self.hasFeature(.fma),
else => unreachable,
}) {
- if (ty.zigTypeTag(zcu) != .Float) return self.fail("TODO implement airMulAdd for {}", .{
+ if (ty.zigTypeTag(zcu) != .float) return self.fail("TODO implement airMulAdd for {}", .{
ty.fmt(pt),
});
@@ -18533,14 +18533,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const mir_tag = @as(?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or
mem.eql(u2, &order, &.{ 3, 1, 2 }))
switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd132 },
64 => .{ .v_sd, .fmadd132 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd132 },
2...8 => .{ .v_ps, .fmadd132 },
@@ -18560,14 +18560,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 }))
switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd213 },
64 => .{ .v_sd, .fmadd213 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd213 },
2...8 => .{ .v_ps, .fmadd213 },
@@ -18587,14 +18587,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 }))
switch (ty.zigTypeTag(zcu)) {
- .Float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd231 },
64 => .{ .v_sd, .fmadd231 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .Float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd231 },
2...8 => .{ .v_ps, .fmadd231 },
@@ -18971,7 +18971,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking {
/// and as a register.
fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue {
const mcv = try self.resolveInst(operand);
- const ti = @typeInfo(T).Int;
+ const ti = @typeInfo(T).int;
switch (mcv) {
.immediate => |imm| {
// This immediate is unsigned.
@@ -19078,7 +19078,7 @@ fn resolveCallingConventionValues(
}
// Return values
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = InstTracking.init(.unreach);
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// TODO: is this even possible for C calling convention?
@@ -19266,7 +19266,7 @@ fn resolveCallingConventionValues(
result.stack_align = .@"16";
// Return values
- if (ret_ty.zigTypeTag(zcu) == .NoReturn) {
+ if (ret_ty.zigTypeTag(zcu) == .noreturn) {
result.return_value = InstTracking.init(.unreach);
} else if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
result.return_value = InstTracking.init(.none);
@@ -19372,7 +19372,7 @@ fn memSize(self: *Self, ty: Type) Memory.Size {
const pt = self.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
+ .float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
else => Memory.Size.fromSize(@intCast(ty.abiSize(zcu))),
};
}
@@ -19467,7 +19467,7 @@ fn regBitSize(self: *Self, ty: Type) u64 {
5...8 => 64,
else => unreachable,
},
- .Float => switch (abi_size) {
+ .float => switch (abi_size) {
1...16 => 128,
17...32 => 256,
else => unreachable,
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index 641008d5d8..1738a382f6 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -845,7 +845,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
const mnemonic_to_encodings_map = init: {
@setEvalBranchQuota(5_000);
- const mnemonic_count = @typeInfo(Mnemonic).Enum.fields.len;
+ const mnemonic_count = @typeInfo(Mnemonic).@"enum".fields.len;
var mnemonic_map: [mnemonic_count][]Data = .{&.{}} ** mnemonic_count;
const encodings = @import("encodings.zig");
for (encodings.table) |entry| mnemonic_map[@intFromEnum(entry[0])].len += 1;
@@ -856,8 +856,8 @@ const mnemonic_to_encodings_map = init: {
storage_i += value.len;
}
var mnemonic_i: [mnemonic_count]usize = .{0} ** mnemonic_count;
- const ops_len = @typeInfo(std.meta.FieldType(Data, .ops)).Array.len;
- const opc_len = @typeInfo(std.meta.FieldType(Data, .opc)).Array.len;
+ const ops_len = @typeInfo(std.meta.FieldType(Data, .ops)).array.len;
+ const opc_len = @typeInfo(std.meta.FieldType(Data, .opc)).array.len;
for (encodings.table) |entry| {
const i = &mnemonic_i[@intFromEnum(entry[0])];
mnemonic_map[@intFromEnum(entry[0])][i.*] = .{
diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig
index 2c5192f1c2..76e068ece9 100644
--- a/src/arch/x86_64/Lower.zig
+++ b/src/arch/x86_64/Lower.zig
@@ -571,7 +571,7 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
@setEvalBranchQuota(2_000);
comptime var max_len = 0;
- inline for (@typeInfo(Mnemonic).Enum.fields) |field| max_len = @max(field.name.len, max_len);
+ inline for (@typeInfo(Mnemonic).@"enum".fields) |field| max_len = @max(field.name.len, max_len);
var buf: [max_len]u8 = undefined;
const fixes_name = @tagName(fixes);
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index a42ec0635a..14de8ee69c 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -1185,8 +1185,8 @@ pub const Memory = struct {
extra: u32,
pub const Info = packed struct(u32) {
- base: @typeInfo(bits.Memory.Base).Union.tag_type.?,
- mod: @typeInfo(bits.Memory.Mod).Union.tag_type.?,
+ base: @typeInfo(bits.Memory.Base).@"union".tag_type.?,
+ mod: @typeInfo(bits.Memory.Mod).@"union".tag_type.?,
size: bits.Memory.Size,
index: Register,
scale: bits.Memory.Scale,
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 23fba0bb1c..2e411e0381 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -54,26 +54,26 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.zigTypeTag(zcu)) {
- .Pointer,
- .Int,
- .Bool,
- .Enum,
- .Void,
- .NoReturn,
- .ErrorSet,
- .Struct,
- .Union,
- .Optional,
- .Array,
- .ErrorUnion,
- .AnyFrame,
- .Frame,
+ .pointer,
+ .int,
+ .bool,
+ .@"enum",
+ .void,
+ .noreturn,
+ .error_set,
+ .@"struct",
+ .@"union",
+ .optional,
+ .array,
+ .error_union,
+ .@"anyframe",
+ .frame,
=> switch (ty.abiSize(zcu)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(zcu)) {
- .Int => return .win_i128,
- .Struct, .Union => if (ty.containerLayout(zcu) == .@"packed") {
+ .int => return .win_i128,
+ .@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed") {
return .win_i128;
} else {
return .memory;
@@ -82,16 +82,16 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
},
},
- .Float, .Vector => return .sse,
+ .float, .vector => return .sse,
- .Type,
- .ComptimeFloat,
- .ComptimeInt,
- .Undefined,
- .Null,
- .Fn,
- .Opaque,
- .EnumLiteral,
+ .type,
+ .comptime_float,
+ .comptime_int,
+ .undefined,
+ .null,
+ .@"fn",
+ .@"opaque",
+ .enum_literal,
=> unreachable,
}
}
@@ -107,7 +107,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(zcu)) {
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .pointer => switch (ty.ptrSize(zcu)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@@ -118,7 +118,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
return result;
},
},
- .Int, .Enum, .ErrorSet => {
+ .int, .@"enum", .error_set => {
const bits = ty.intInfo(zcu).bits;
if (bits <= 64) {
result[0] = .integer;
@@ -144,11 +144,11 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
}
return memory_class;
},
- .Bool, .Void, .NoReturn => {
+ .bool, .void, .noreturn => {
result[0] = .integer;
return result;
},
- .Float => switch (ty.floatBits(target)) {
+ .float => switch (ty.floatBits(target)) {
16 => {
if (ctx == .field) {
result[0] = .memory;
@@ -184,7 +184,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
},
else => unreachable,
},
- .Vector => {
+ .vector => {
const elem_ty = ty.childType(zcu);
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
if (elem_ty.toIntern() == .bool_type) {
@@ -249,14 +249,14 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
};
return memory_class;
},
- .Optional => {
+ .optional => {
if (ty.isPtrLikeOptional(zcu)) {
result[0] = .integer;
return result;
}
return memory_class;
},
- .Struct, .Union => {
+ .@"struct", .@"union" => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
@@ -305,7 +305,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: std.Target, ctx: Context) [8
}
return result;
},
- .Array => {
+ .array => {
const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) {
result[0] = .integer;
diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig
index e1f587d5a6..95397e8064 100644
--- a/src/arch/x86_64/bits.zig
+++ b/src/arch/x86_64/bits.zig
@@ -423,7 +423,7 @@ pub const FrameIndex = enum(u32) {
// Other indices are used for local variable stack slots
_,
- pub const named_count = @typeInfo(FrameIndex).Enum.fields.len;
+ pub const named_count = @typeInfo(FrameIndex).@"enum".fields.len;
pub fn isNamed(fi: FrameIndex) bool {
return @intFromEnum(fi) < named_count;
@@ -463,7 +463,7 @@ pub const Memory = struct {
frame: FrameIndex,
reloc: u32,
- pub const Tag = @typeInfo(Base).Union.tag_type.?;
+ pub const Tag = @typeInfo(Base).@"union".tag_type.?;
pub fn isExtended(self: Base) bool {
return switch (self) {
diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig
index 484b7fde26..0dc1dbb7c9 100644
--- a/src/arch/x86_64/encoder.zig
+++ b/src/arch/x86_64/encoder.zig
@@ -2262,7 +2262,7 @@ const Assembler = struct {
}
fn mnemonicFromString(bytes: []const u8) ?Instruction.Mnemonic {
- const ti = @typeInfo(Instruction.Mnemonic).Enum;
+ const ti = @typeInfo(Instruction.Mnemonic).@"enum";
inline for (ti.fields) |field| {
if (std.mem.eql(u8, bytes, field.name)) {
return @field(Instruction.Mnemonic, field.name);
@@ -2278,7 +2278,7 @@ const Assembler = struct {
_ = try as.expect(.comma);
try as.skip(1, .{.space});
}
- if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) {
+ if (@typeInfo(@TypeOf(cond)) != .enum_literal) {
@compileError("invalid condition in the rule: " ++ @typeName(@TypeOf(cond)));
}
switch (cond) {
@@ -2315,7 +2315,7 @@ const Assembler = struct {
}
fn registerFromString(bytes: []const u8) ?Register {
- const ti = @typeInfo(Register).Enum;
+ const ti = @typeInfo(Register).@"enum";
inline for (ti.fields) |field| {
if (std.mem.eql(u8, bytes, field.name)) {
return @field(Register, field.name);
@@ -2405,7 +2405,7 @@ const Assembler = struct {
fn parseMemoryRule(as: *Assembler, rule: anytype) ParseError!MemoryParseResult {
var res: MemoryParseResult = .{};
inline for (rule, 0..) |cond, i| {
- if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) {
+ if (@typeInfo(@TypeOf(cond)) != .enum_literal) {
@compileError("unsupported condition type in the rule: " ++ @typeName(@TypeOf(cond)));
}
switch (cond) {
diff --git a/src/codegen.zig b/src/codegen.zig
index 7e6a911f5f..67217c37e8 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -113,8 +113,8 @@ pub fn generateLazyFunction(
fn writeFloat(comptime F: type, f: F, target: std.Target, endian: std.builtin.Endian, code: []u8) void {
_ = target;
- const bits = @typeInfo(F).Float.bits;
- const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } });
+ const bits = @typeInfo(F).float.bits;
+ const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
const int: Int = @bitCast(f);
mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
}
@@ -167,7 +167,7 @@ pub fn generateLazySymbol(
}
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
return Result.ok;
- } else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(pt.zcu) == .Enum) {
+ } else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(pt.zcu) == .@"enum") {
alignment.* = .@"1";
const enum_ty = Type.fromInterned(lazy_sym.ty);
const tag_names = enum_ty.enumFields(pt.zcu);
@@ -519,7 +519,7 @@ pub fn generateSymbol(
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
- if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .Pointer) {
+ if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(zcu)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
@@ -678,7 +678,7 @@ fn lowerPtr(
const base_ptr = Value.fromInterned(field.base);
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
- .Pointer => off: {
+ .pointer => off: {
assert(base_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
@@ -686,7 +686,7 @@ fn lowerPtr(
else => unreachable,
};
},
- .Struct, .Union => switch (base_ty.containerLayout(zcu)) {
+ .@"struct", .@"union" => switch (base_ty.containerLayout(zcu)) {
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
@@ -721,7 +721,7 @@ fn lowerUavRef(
const uav_val = uav.val;
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)});
- const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
+ const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, ptr_width_bytes);
return Result.ok;
@@ -768,7 +768,7 @@ fn lowerNavRef(
const ptr_width = target.ptrBitWidth();
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
- const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
+ const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
try code.appendNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok;
@@ -880,7 +880,7 @@ fn genNavRef(
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? } };
}
- } else if (ty.zigTypeTag(zcu) == .Pointer) {
+ } else if (ty.zigTypeTag(zcu) == .pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? } };
@@ -955,8 +955,8 @@ pub fn genTypedValue(
if (val.isUndef(zcu)) return .{ .mcv = .undef };
switch (ty.zigTypeTag(zcu)) {
- .Void => return .{ .mcv = .none },
- .Pointer => switch (ty.ptrSize(zcu)) {
+ .void => return .{ .mcv = .none },
+ .pointer => switch (ty.ptrSize(zcu)) {
.Slice => {},
else => switch (val.toIntern()) {
.null_value => {
@@ -991,7 +991,7 @@ pub fn genTypedValue(
},
},
},
- .Int => {
+ .int => {
const info = ty.intInfo(zcu);
if (info.bits <= target.ptrBitWidth()) {
const unsigned: u64 = switch (info.signedness) {
@@ -1001,10 +1001,10 @@ pub fn genTypedValue(
return .{ .mcv = .{ .immediate = unsigned } };
}
},
- .Bool => {
+ .bool => {
return .{ .mcv = .{ .immediate = @intFromBool(val.toBool()) } };
},
- .Optional => {
+ .optional => {
if (ty.isPtrLikeOptional(zcu)) {
return genTypedValue(
lf,
@@ -1017,7 +1017,7 @@ pub fn genTypedValue(
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
}
},
- .Enum => {
+ .@"enum" => {
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
return genTypedValue(
lf,
@@ -1027,12 +1027,12 @@ pub fn genTypedValue(
target,
);
},
- .ErrorSet => {
+ .error_set => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = try pt.getErrorValue(err_name);
return .{ .mcv = .{ .immediate = error_index } };
},
- .ErrorUnion => {
+ .error_union => {
const err_type = ty.errorUnionSet(zcu);
const payload_type = ty.errorUnionPayload(zcu);
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
@@ -1060,14 +1060,14 @@ pub fn genTypedValue(
}
},
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
+ .comptime_int => unreachable,
+ .comptime_float => unreachable,
+ .type => unreachable,
+ .enum_literal => unreachable,
+ .noreturn => unreachable,
+ .undefined => unreachable,
+ .null => unreachable,
+ .@"opaque" => unreachable,
else => {},
}
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 8703e9b124..f3b8c7e72a 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1401,7 +1401,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderCType(writer, ctype);
try writer.writeByte(')');
- } else if (field_ty.zigTypeTag(zcu) == .Float) {
+ } else if (field_ty.zigTypeTag(zcu) == .float) {
try writer.writeByte('(');
try dg.renderCType(writer, ctype);
try writer.writeByte(')');
@@ -4473,8 +4473,8 @@ fn airCall(
const callee_ty = f.typeOf(pl_op.operand);
const fn_info = zcu.typeToFunc(switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => callee_ty,
- .Pointer => callee_ty.childType(zcu),
+ .@"fn" => callee_ty,
+ .pointer => callee_ty.childType(zcu),
else => unreachable,
}).?;
const ret_ty = Type.fromInterned(fn_info.return_type);
@@ -5848,7 +5848,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .Pointer;
+ const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .pointer;
const error_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty;
const error_ty = error_union_ty.errorUnionSet(zcu);
const payload_ty = error_union_ty.errorUnionPayload(zcu);
@@ -7011,23 +7011,23 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
.Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
.Min => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" } },
- .Float => .{ .builtin = .{ .operation = "min" } },
+ .int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" } },
+ .float => .{ .builtin = .{ .operation = "min" } },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" } },
- .Float => .{ .builtin = .{ .operation = "max" } },
+ .int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" } },
+ .float => .{ .builtin = .{ .operation = "max" } },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits } },
- .Float => .{ .builtin = .{ .operation = "add" } },
+ .int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits } },
+ .float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits } },
- .Float => .{ .builtin = .{ .operation = "mul" } },
+ .int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits } },
+ .float => .{ .builtin = .{ .operation = "mul" } },
else => unreachable,
},
};
@@ -7050,38 +7050,38 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, switch (reduce.operation) {
.Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool => Value.false,
- .Int => try pt.intValue(scalar_ty, 0),
+ .bool => Value.false,
+ .int => try pt.intValue(scalar_ty, 0),
else => unreachable,
},
.And => switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool => Value.true,
- .Int => switch (scalar_ty.intInfo(zcu).signedness) {
+ .bool => Value.true,
+ .int => switch (scalar_ty.intInfo(zcu).signedness) {
.unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty),
.signed => try pt.intValue(scalar_ty, -1),
},
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => try pt.intValue(scalar_ty, 0),
- .Float => try pt.floatValue(scalar_ty, 0.0),
+ .int => try pt.intValue(scalar_ty, 0),
+ .float => try pt.floatValue(scalar_ty, 0.0),
else => unreachable,
},
.Mul => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => try pt.intValue(scalar_ty, 1),
- .Float => try pt.floatValue(scalar_ty, 1.0),
+ .int => try pt.intValue(scalar_ty, 1),
+ .float => try pt.floatValue(scalar_ty, 1.0),
else => unreachable,
},
.Min => switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool => Value.true,
- .Int => try scalar_ty.maxIntScalar(pt, scalar_ty),
- .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
+ .bool => Value.true,
+ .int => try scalar_ty.maxIntScalar(pt, scalar_ty),
+ .float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool => Value.false,
- .Int => try scalar_ty.minIntScalar(pt, scalar_ty),
- .Float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
+ .bool => Value.false,
+ .int => try scalar_ty.minIntScalar(pt, scalar_ty),
+ .float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
}, .Initializer);
@@ -7765,7 +7765,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri
}
fn undefPattern(comptime IntType: type) IntType {
- const int_info = @typeInfo(IntType).Int;
+ const int_info = @typeInfo(IntType).int;
const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3)));
}
@@ -8027,7 +8027,7 @@ const Vectorize = struct {
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
- return if (ty.zigTypeTag(zcu) == .Vector) index: {
+ return if (ty.zigTypeTag(zcu) == .vector) index: {
const local = try f.allocLocal(inst, Type.usize);
try writer.writeAll("for (");
@@ -8063,7 +8063,7 @@ const Vectorize = struct {
fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .Array, .Vector => return true,
+ .array, .vector => return true,
else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
};
}
diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig
index 018b0586d0..1e0c23a96b 100644
--- a/src/codegen/c/Type.zig
+++ b/src/codegen/c/Type.zig
@@ -669,7 +669,7 @@ const Index = enum(u32) {
_,
- const first_pool_index: u32 = @typeInfo(CType.Index).Enum.fields.len;
+ const first_pool_index: u32 = @typeInfo(CType.Index).@"enum".fields.len;
const basic_hashes = init: {
@setEvalBranchQuota(1_600);
var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined;
@@ -740,7 +740,7 @@ pub const Info = union(enum) {
aggregate: Aggregate,
function: Function,
- const Tag = @typeInfo(Info).Union.tag_type.?;
+ const Tag = @typeInfo(Info).@"union".tag_type.?;
pub const Pointer = struct {
elem_ctype: CType,
@@ -783,7 +783,7 @@ pub const Info = union(enum) {
pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field {
assert(index < slice.len);
const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index +
- index * @typeInfo(Pool.Field).Struct.fields.len));
+ index * @typeInfo(Pool.Field).@"struct".fields.len));
return .{
.name = .{ .index = extra.name },
.ctype = .{ .index = extra.ctype },
@@ -991,7 +991,7 @@ pub const Pool = struct {
_,
const first_named_index: u32 = 1 << 31;
- const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).Enum.fields.len;
+ const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).@"enum".fields.len;
};
const Adapter = struct {
@@ -1127,7 +1127,7 @@ pub const Pool = struct {
allocator,
FwdDeclAnon,
extra,
- fields.len * @typeInfo(Field).Struct.fields.len,
+ fields.len * @typeInfo(Field).@"struct".fields.len,
);
for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity(
&hasher,
@@ -1184,7 +1184,7 @@ pub const Pool = struct {
allocator,
AggregateAnon,
extra,
- aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len,
+ aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len,
);
for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{
.name = field.name.index,
@@ -1213,7 +1213,7 @@ pub const Pool = struct {
allocator,
Aggregate,
extra,
- aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len,
+ aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len,
);
for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{
.name = field.name.index,
@@ -1672,7 +1672,7 @@ pub const Pool = struct {
defer scratch.shrinkRetainingCapacity(scratch_top);
try scratch.ensureUnusedCapacity(
allocator,
- loaded_struct.field_types.len * @typeInfo(Field).Struct.fields.len,
+ loaded_struct.field_types.len * @typeInfo(Field).@"struct".fields.len,
);
var hasher = Hasher.init;
var tag: Pool.Tag = .aggregate_struct;
@@ -1709,14 +1709,14 @@ pub const Pool = struct {
}
const fields_len: u32 = @intCast(@divExact(
scratch.items.len - scratch_top,
- @typeInfo(Field).Struct.fields.len,
+ @typeInfo(Field).@"struct".fields.len,
));
if (fields_len == 0) return CType.void;
try pool.ensureUnusedCapacity(allocator, 1);
const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
.fwd_decl = fwd_decl.index,
.fields_len = fields_len,
- }, fields_len * @typeInfo(Field).Struct.fields.len);
+ }, fields_len * @typeInfo(Field).@"struct".fields.len);
pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index);
},
@@ -1734,7 +1734,7 @@ pub const Pool = struct {
const scratch_top = scratch.items.len;
defer scratch.shrinkRetainingCapacity(scratch_top);
try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len *
- @typeInfo(Field).Struct.fields.len);
+ @typeInfo(Field).@"struct".fields.len);
var hasher = Hasher.init;
for (0..anon_struct_info.types.len) |field_index| {
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
@@ -1765,7 +1765,7 @@ pub const Pool = struct {
}
const fields_len: u32 = @intCast(@divExact(
scratch.items.len - scratch_top,
- @typeInfo(Field).Struct.fields.len,
+ @typeInfo(Field).@"struct".fields.len,
));
if (fields_len == 0) return CType.void;
if (kind.isForward()) {
@@ -1775,7 +1775,7 @@ pub const Pool = struct {
&hasher,
FwdDeclAnon,
.{ .fields_len = fields_len },
- fields_len * @typeInfo(Field).Struct.fields.len,
+ fields_len * @typeInfo(Field).@"struct".fields.len,
);
pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
return pool.tagTrailingExtra(
@@ -1790,7 +1790,7 @@ pub const Pool = struct {
const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{
.fwd_decl = fwd_decl.index,
.fields_len = fields_len,
- }, fields_len * @typeInfo(Field).Struct.fields.len);
+ }, fields_len * @typeInfo(Field).@"struct".fields.len);
pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index);
},
@@ -1812,7 +1812,7 @@ pub const Pool = struct {
defer scratch.shrinkRetainingCapacity(scratch_top);
try scratch.ensureUnusedCapacity(
allocator,
- loaded_union.field_types.len * @typeInfo(Field).Struct.fields.len,
+ loaded_union.field_types.len * @typeInfo(Field).@"struct".fields.len,
);
var hasher = Hasher.init;
var tag: Pool.Tag = .aggregate_union;
@@ -1850,7 +1850,7 @@ pub const Pool = struct {
}
const fields_len: u32 = @intCast(@divExact(
scratch.items.len - scratch_top,
- @typeInfo(Field).Struct.fields.len,
+ @typeInfo(Field).@"struct".fields.len,
));
if (!has_tag) {
if (fields_len == 0) return CType.void;
@@ -1860,7 +1860,7 @@ pub const Pool = struct {
&hasher,
Aggregate,
.{ .fwd_decl = fwd_decl.index, .fields_len = fields_len },
- fields_len * @typeInfo(Field).Struct.fields.len,
+ fields_len * @typeInfo(Field).@"struct".fields.len,
);
pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index);
@@ -1898,7 +1898,7 @@ pub const Pool = struct {
.id = 0,
.fields_len = fields_len,
},
- fields_len * @typeInfo(Field).Struct.fields.len,
+ fields_len * @typeInfo(Field).@"struct".fields.len,
);
pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]);
break :payload_ctype pool.tagTrailingExtraAssumeCapacity(
@@ -2087,7 +2087,7 @@ pub const Pool = struct {
.tag = tag,
.data = try pool.addExtra(allocator, FwdDeclAnon, .{
.fields_len = fields.len,
- }, fields.len * @typeInfo(Field).Struct.fields.len),
+ }, fields.len * @typeInfo(Field).@"struct".fields.len),
});
for (0..fields.len) |field_index| {
const field = fields.at(field_index, source_pool);
@@ -2115,11 +2115,11 @@ pub const Pool = struct {
.index = anon.index,
.id = anon.id,
.fields_len = aggregate_info.fields.len,
- }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len),
+ }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len),
.fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{
.fwd_decl = pool_adapter.copy(fwd_decl).index,
.fields_len = aggregate_info.fields.len,
- }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len),
+ }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len),
},
});
for (0..aggregate_info.fields.len) |field_index| {
@@ -2182,7 +2182,7 @@ pub const Pool = struct {
const init: Hasher = .{ .impl = Impl.init(0) };
fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void {
- inline for (@typeInfo(Extra).Struct.fields) |field| {
+ inline for (@typeInfo(Extra).@"struct".fields) |field| {
const value = @field(extra, field.name);
switch (field.type) {
Pool.Tag, String, CType => unreachable,
@@ -2429,7 +2429,7 @@ pub const Pool = struct {
) !ExtraIndex {
try pool.extra.ensureUnusedCapacity(
allocator,
- @typeInfo(Extra).Struct.fields.len + trailing_len,
+ @typeInfo(Extra).@"struct".fields.len + trailing_len,
);
defer pool.addExtraAssumeCapacity(Extra, extra);
return @intCast(pool.extra.items.len);
@@ -2442,7 +2442,7 @@ pub const Pool = struct {
comptime Extra: type,
extra: Extra,
) void {
- inline for (@typeInfo(Extra).Struct.fields) |field| {
+ inline for (@typeInfo(Extra).@"struct".fields) |field| {
const value = @field(extra, field.name);
array.appendAssumeCapacity(switch (field.type) {
u32 => value,
@@ -2505,7 +2505,7 @@ pub const Pool = struct {
extra_index: ExtraIndex,
) struct { extra: Extra, trail: ExtraTrail } {
var extra: Extra = undefined;
- const fields = @typeInfo(Extra).Struct.fields;
+ const fields = @typeInfo(Extra).@"struct".fields;
inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value|
@field(extra, field.name) = switch (field.type) {
u32 => value,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 1352e6e3a1..e7cb57d76e 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1538,7 +1538,7 @@ pub const Object = struct {
try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
}
}
- if (param_ty.zigTypeTag(zcu) != .Optional) {
+ if (param_ty.zigTypeTag(zcu) != .optional) {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
}
if (ptr_info.flags.is_const) {
@@ -1907,8 +1907,8 @@ pub const Object = struct {
if (o.debug_type_map.get(ty)) |debug_type| return debug_type;
switch (ty.zigTypeTag(zcu)) {
- .Void,
- .NoReturn,
+ .void,
+ .noreturn,
=> {
const debug_void_type = try o.builder.debugSignedType(
try o.builder.metadataString("void"),
@@ -1917,7 +1917,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_void_type);
return debug_void_type;
},
- .Int => {
+ .int => {
const info = ty.intInfo(zcu);
assert(info.bits != 0);
const name = try o.allocTypeName(ty);
@@ -1931,7 +1931,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_int_type);
return debug_int_type;
},
- .Enum => {
+ .@"enum" => {
if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) {
const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty);
try o.debug_type_map.put(gpa, ty, debug_enum_type);
@@ -1985,7 +1985,7 @@ pub const Object = struct {
try o.debug_enums.append(gpa, debug_enum_type);
return debug_enum_type;
},
- .Float => {
+ .float => {
const bits = ty.floatBits(target);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
@@ -1996,7 +1996,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_float_type);
return debug_float_type;
},
- .Bool => {
+ .bool => {
const debug_bool_type = try o.builder.debugBoolType(
try o.builder.metadataString("bool"),
8, // lldb cannot handle non-byte sized types
@@ -2004,7 +2004,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_bool_type);
return debug_bool_type;
},
- .Pointer => {
+ .pointer => {
// Normalize everything that the debug info does not represent.
const ptr_info = ty.ptrInfo(zcu);
@@ -2126,7 +2126,7 @@ pub const Object = struct {
return debug_ptr_type;
},
- .Opaque => {
+ .@"opaque" => {
if (ty.toIntern() == .anyopaque_type) {
const debug_opaque_type = try o.builder.debugSignedType(
try o.builder.metadataString("anyopaque"),
@@ -2158,7 +2158,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_opaque_type);
return debug_opaque_type;
},
- .Array => {
+ .array => {
const debug_array_type = try o.builder.debugArrayType(
.none, // Name
.none, // File
@@ -2177,14 +2177,14 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_array_type);
return debug_array_type;
},
- .Vector => {
+ .vector => {
const elem_ty = ty.elemType2(zcu);
// Vector elements cannot be padded since that would make
// @bitSizOf(elem) * len > @bitSizOf(vec).
// Neither gdb nor lldb seem to be able to display non-byte sized
// vectors properly.
const debug_elem_type = switch (elem_ty.zigTypeTag(zcu)) {
- .Int => blk: {
+ .int => blk: {
const info = elem_ty.intInfo(zcu);
assert(info.bits != 0);
const name = try o.allocTypeName(ty);
@@ -2195,7 +2195,7 @@ pub const Object = struct {
.unsigned => try o.builder.debugUnsignedType(builder_name, info.bits),
};
},
- .Bool => try o.builder.debugBoolType(
+ .bool => try o.builder.debugBoolType(
try o.builder.metadataString("bool"),
1,
),
@@ -2221,7 +2221,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_vector_type);
return debug_vector_type;
},
- .Optional => {
+ .optional => {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const child_ty = ty.optionalChild(zcu);
@@ -2302,7 +2302,7 @@ pub const Object = struct {
return debug_optional_type;
},
- .ErrorUnion => {
+ .error_union => {
const payload_ty = ty.errorUnionPayload(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// TODO: Maybe remove?
@@ -2375,7 +2375,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_error_union_type);
return debug_error_union_type;
},
- .ErrorSet => {
+ .error_set => {
const debug_error_set = try o.builder.debugUnsignedType(
try o.builder.metadataString("anyerror"),
16,
@@ -2383,7 +2383,7 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_error_set);
return debug_error_set;
},
- .Struct => {
+ .@"struct" => {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
@@ -2531,7 +2531,7 @@ pub const Object = struct {
return debug_struct_type;
},
- .Union => {
+ .@"union" => {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
@@ -2693,7 +2693,7 @@ pub const Object = struct {
return debug_tagged_union_type;
},
- .Fn => {
+ .@"fn" => {
const fn_info = zcu.typeToFunc(ty).?;
var debug_param_types = std.ArrayList(Builder.Metadata).init(gpa);
@@ -2741,15 +2741,15 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_function_type);
return debug_function_type;
},
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .EnumLiteral => unreachable,
+ .comptime_int => unreachable,
+ .comptime_float => unreachable,
+ .type => unreachable,
+ .undefined => unreachable,
+ .null => unreachable,
+ .enum_literal => unreachable,
- .Frame => @panic("TODO implement lowerDebugType for Frame types"),
- .AnyFrame => @panic("TODO implement lowerDebugType for AnyFrame types"),
+ .frame => @panic("TODO implement lowerDebugType for Frame types"),
+ .@"anyframe" => @panic("TODO implement lowerDebugType for AnyFrame types"),
}
}
@@ -3539,9 +3539,9 @@ pub const Object = struct {
const pt = o.pt;
const zcu = pt.zcu;
const lower_elem_ty = switch (elem_ty.zigTypeTag(zcu)) {
- .Opaque => true,
- .Fn => !zcu.typeToFunc(elem_ty).?.is_generic,
- .Array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu),
+ .@"opaque" => true,
+ .@"fn" => !zcu.typeToFunc(elem_ty).?.is_generic,
+ .array => elem_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu),
else => elem_ty.hasRuntimeBitsIgnoreComptime(zcu),
};
return if (lower_elem_ty) try o.lowerType(elem_ty) else .i8;
@@ -3883,7 +3883,7 @@ pub const Object = struct {
},
else => |payload| try o.lowerValue(payload),
};
- assert(payload_ty.zigTypeTag(zcu) != .Fn);
+ assert(payload_ty.zigTypeTag(zcu) != .@"fn");
var fields: [3]Builder.Type = undefined;
var vals: [3]Builder.Constant = undefined;
@@ -4303,7 +4303,7 @@ pub const Object = struct {
.field => |field| {
const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) {
- .Pointer => off: {
+ .pointer => off: {
assert(agg_ty.isSlice(zcu));
break :off switch (field.index) {
Value.slice_ptr_index => 0,
@@ -4311,7 +4311,7 @@ pub const Object = struct {
else => unreachable,
};
},
- .Struct, .Union => switch (agg_ty.containerLayout(zcu)) {
+ .@"struct", .@"union" => switch (agg_ty.containerLayout(zcu)) {
.auto => agg_ty.structFieldOffset(@intCast(field.index), zcu),
.@"extern", .@"packed" => unreachable,
},
@@ -4344,7 +4344,7 @@ pub const Object = struct {
const ptr_ty = Type.fromInterned(uav.orig_ty);
- const is_fn_body = uav_ty.zigTypeTag(zcu) == .Fn;
+ const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
if ((!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) or
(is_fn_body and zcu.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty);
@@ -4383,7 +4383,7 @@ pub const Object = struct {
const nav_ty = Type.fromInterned(owner_nav.typeOf(ip));
const ptr_ty = try pt.navPtrType(owner_nav_index);
- const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn;
+ const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
if ((!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) or
(is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic))
{
@@ -4435,13 +4435,13 @@ pub const Object = struct {
const pt = o.pt;
const zcu = pt.zcu;
const int_ty = switch (ty.zigTypeTag(zcu)) {
- .Int => ty,
- .Enum => ty.intTagType(zcu),
- .Float => {
+ .int => ty,
+ .@"enum" => ty.intTagType(zcu),
+ .float => {
if (!is_rmw_xchg) return .none;
return o.builder.intType(@intCast(ty.abiSize(zcu) * 8));
},
- .Bool => return .i8,
+ .bool => return .i8,
else => return .none,
};
const bit_count = int_ty.intInfo(zcu).bits;
@@ -4693,7 +4693,7 @@ pub const NavGen = struct {
const global_index = o.nav_map.get(nav_index).?;
const decl_name = decl_name: {
- if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .Fn) {
+ if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .@"fn") {
if (lib_name.toSlice(ip)) |lib_name_slice| {
if (!std.mem.eql(u8, lib_name_slice, "c")) {
break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice });
@@ -5192,8 +5192,8 @@ pub const FuncGen = struct {
const ip = &zcu.intern_pool;
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => callee_ty,
- .Pointer => callee_ty.childType(zcu),
+ .@"fn" => callee_ty,
+ .pointer => callee_ty.childType(zcu),
else => unreachable,
};
const fn_info = zcu.typeToFunc(zig_fn_ty).?;
@@ -5410,7 +5410,7 @@ pub const FuncGen = struct {
try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
}
}
- if (param_ty.zigTypeTag(zcu) != .Optional) {
+ if (param_ty.zigTypeTag(zcu) != .optional) {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
}
if (ptr_info.flags.is_const) {
@@ -5773,9 +5773,9 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const scalar_ty = operand_ty.scalarType(zcu);
const int_ty = switch (scalar_ty.zigTypeTag(zcu)) {
- .Enum => scalar_ty.intTagType(zcu),
- .Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
- .Optional => blk: {
+ .@"enum" => scalar_ty.intTagType(zcu),
+ .int, .bool, .pointer, .error_set => scalar_ty,
+ .optional => blk: {
const payload_ty = operand_ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or
operand_ty.optionalReprIsPayload(zcu))
@@ -5848,7 +5848,7 @@ pub const FuncGen = struct {
);
return phi.toValue();
},
- .Float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }),
+ .float => return self.buildFloatCmp(fast, op, operand_ty, .{ lhs, rhs }),
else => unreachable,
};
const is_signed = int_ty.isSignedInt(zcu);
@@ -5909,7 +5909,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
- if (inst_ty.zigTypeTag(zcu) == .Fn or isByRef(inst_ty, zcu)) {
+ if (inst_ty.zigTypeTag(zcu) == .@"fn" or isByRef(inst_ty, zcu)) {
break :ty .ptr;
}
break :ty raw_llvm_ty;
@@ -6605,7 +6605,7 @@ pub const FuncGen = struct {
if (!isByRef(struct_ty, zcu)) {
assert(!isByRef(field_ty, zcu));
switch (struct_ty.zigTypeTag(zcu)) {
- .Struct => switch (struct_ty.containerLayout(zcu)) {
+ .@"struct" => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_type = zcu.typeToStruct(struct_ty).?;
const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
@@ -6614,7 +6614,7 @@ pub const FuncGen = struct {
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
const shifted_value = try self.wip.bin(.lshr, containing_int, shift_amt, "");
const elem_llvm_ty = try o.lowerType(field_ty);
- if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+ if (field_ty.zigTypeTag(zcu) == .float or field_ty.zigTypeTag(zcu) == .vector) {
const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
@@ -6632,11 +6632,11 @@ pub const FuncGen = struct {
return self.wip.extractValue(struct_llvm_val, &.{llvm_field_index}, "");
},
},
- .Union => {
+ .@"union" => {
assert(struct_ty.containerLayout(zcu) == .@"packed");
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
- if (field_ty.zigTypeTag(zcu) == .Float or field_ty.zigTypeTag(zcu) == .Vector) {
+ if (field_ty.zigTypeTag(zcu) == .float or field_ty.zigTypeTag(zcu) == .vector) {
const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
@@ -6654,7 +6654,7 @@ pub const FuncGen = struct {
}
switch (struct_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
const layout = struct_ty.containerLayout(zcu);
assert(layout != .@"packed");
const struct_llvm_ty = try o.lowerType(struct_ty);
@@ -6677,7 +6677,7 @@ pub const FuncGen = struct {
return self.load(field_ptr, field_ptr_ty);
}
},
- .Union => {
+ .@"union" => {
const union_llvm_ty = try o.lowerType(struct_ty);
const layout = struct_ty.unionGetLayout(zcu);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
@@ -6934,7 +6934,7 @@ pub const FuncGen = struct {
if (output != .none) {
const output_inst = try self.resolveInst(output);
const output_ty = self.typeOf(output);
- assert(output_ty.zigTypeTag(zcu) == .Pointer);
+ assert(output_ty.zigTypeTag(zcu) == .pointer);
const elem_llvm_ty = try o.lowerPtrElemTy(output_ty.childType(zcu));
switch (constraint[0]) {
@@ -8280,7 +8280,7 @@ pub const FuncGen = struct {
.gte => .sge,
};
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const vec_len = ty.vectorLen(zcu);
const vector_result_ty = try o.builder.vectorType(.normal, vec_len, .i32);
@@ -8457,7 +8457,7 @@ pub const FuncGen = struct {
([1]Builder.Type{scalar_llvm_ty} ** 3)[0..params.len],
scalar_llvm_ty,
);
- if (ty.zigTypeTag(zcu) == .Vector) {
+ if (ty.zigTypeTag(zcu) == .vector) {
const result = try o.builder.poisonValue(llvm_ty);
return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(zcu));
}
@@ -8658,7 +8658,7 @@ pub const FuncGen = struct {
const scalar_ty = operand_ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => return self.wip.callIntrinsic(
+ .int => return self.wip.callIntrinsic(
.normal,
.none,
.abs,
@@ -8666,7 +8666,7 @@ pub const FuncGen = struct {
&.{ operand, try o.builder.intValue(.i1, 0) },
"",
),
- .Float => return self.buildFloatOp(.fabs, .normal, operand_ty, 1, .{operand}),
+ .float => return self.buildFloatOp(.fabs, .normal, operand_ty, 1, .{operand}),
else => unreachable,
}
}
@@ -8806,11 +8806,11 @@ pub const FuncGen = struct {
return self.wip.conv(.unsigned, operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag(zcu) == .Int and inst_ty.isPtrAtRuntime(zcu)) {
+ if (operand_ty.zigTypeTag(zcu) == .int and inst_ty.isPtrAtRuntime(zcu)) {
return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag(zcu) == .Vector and inst_ty.zigTypeTag(zcu) == .Array) {
+ if (operand_ty.zigTypeTag(zcu) == .vector and inst_ty.zigTypeTag(zcu) == .array) {
const elem_ty = operand_ty.childType(zcu);
if (!result_is_ref) {
return self.ng.todo("implement bitcast vector to non-ref array", .{});
@@ -8837,7 +8837,7 @@ pub const FuncGen = struct {
}
}
return array_ptr;
- } else if (operand_ty.zigTypeTag(zcu) == .Array and inst_ty.zigTypeTag(zcu) == .Vector) {
+ } else if (operand_ty.zigTypeTag(zcu) == .array and inst_ty.zigTypeTag(zcu) == .vector) {
const elem_ty = operand_ty.childType(zcu);
const llvm_vector_ty = try o.lowerType(inst_ty);
if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{});
@@ -8883,7 +8883,7 @@ pub const FuncGen = struct {
}
if (llvm_dest_ty.isStruct(&o.builder) or
- ((operand_ty.zigTypeTag(zcu) == .Vector or inst_ty.zigTypeTag(zcu) == .Vector) and
+ ((operand_ty.zigTypeTag(zcu) == .vector or inst_ty.zigTypeTag(zcu) == .vector) and
operand_ty.bitSize(zcu) != inst_ty.bitSize(zcu)))
{
// Both our operand and our result are values, not pointers,
@@ -9687,7 +9687,7 @@ pub const FuncGen = struct {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
// The truncated result at the end will be the correct bswap
const scalar_ty = try o.builder.intType(@intCast(bits + 8));
- if (operand_ty.zigTypeTag(zcu) == .Vector) {
+ if (operand_ty.zigTypeTag(zcu) == .vector) {
const vec_len = operand_ty.vectorLen(zcu);
llvm_operand_ty = try o.builder.vectorType(.normal, vec_len, scalar_ty);
} else llvm_operand_ty = scalar_ty;
@@ -9993,7 +9993,7 @@ pub const FuncGen = struct {
else => unreachable,
}, &.{llvm_operand_ty}, &.{operand}, ""),
.Min, .Max => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
+ .int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
.Min => if (scalar_ty.isSignedInt(zcu))
.@"vector.reduce.smin"
else
@@ -10004,7 +10004,7 @@ pub const FuncGen = struct {
.@"vector.reduce.umax",
else => unreachable,
}, &.{llvm_operand_ty}, &.{operand}, ""),
- .Float => if (intrinsicsAllowed(scalar_ty, target))
+ .float => if (intrinsicsAllowed(scalar_ty, target))
return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) {
.Min => .@"vector.reduce.fmin",
.Max => .@"vector.reduce.fmax",
@@ -10013,12 +10013,12 @@ pub const FuncGen = struct {
else => unreachable,
},
.Add, .Mul => switch (scalar_ty.zigTypeTag(zcu)) {
- .Int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
+ .int => return self.wip.callIntrinsic(.normal, .none, switch (reduce.operation) {
.Add => .@"vector.reduce.add",
.Mul => .@"vector.reduce.mul",
else => unreachable,
}, &.{llvm_operand_ty}, &.{operand}, ""),
- .Float => if (intrinsicsAllowed(scalar_ty, target))
+ .float => if (intrinsicsAllowed(scalar_ty, target))
return self.wip.callIntrinsic(fast, .none, switch (reduce.operation) {
.Add => .@"vector.reduce.fadd",
.Mul => .@"vector.reduce.fmul",
@@ -10095,7 +10095,7 @@ pub const FuncGen = struct {
const llvm_result_ty = try o.lowerType(result_ty);
switch (result_ty.zigTypeTag(zcu)) {
- .Vector => {
+ .vector => {
var vector = try o.builder.poisonValue(llvm_result_ty);
for (elements, 0..) |elem, i| {
const index_u32 = try o.builder.intValue(.i32, i);
@@ -10104,7 +10104,7 @@ pub const FuncGen = struct {
}
return vector;
},
- .Struct => {
+ .@"struct" => {
if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntTypeUnordered(ip);
assert(backing_int_ty != .none);
@@ -10170,7 +10170,7 @@ pub const FuncGen = struct {
return result;
}
},
- .Array => {
+ .array => {
assert(isByRef(result_ty, zcu));
const llvm_usize = try o.lowerType(Type.usize);
@@ -10577,7 +10577,7 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const struct_ty = struct_ptr_ty.childType(zcu);
switch (struct_ty.zigTypeTag(zcu)) {
- .Struct => switch (struct_ty.containerLayout(zcu)) {
+ .@"struct" => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const result_ty = self.typeOfIndex(inst);
const result_ty_info = result_ty.ptrInfo(zcu);
@@ -10618,7 +10618,7 @@ pub const FuncGen = struct {
}
},
},
- .Union => {
+ .@"union" => {
const layout = struct_ty.unionGetLayout(zcu);
if (layout.payload_size == 0 or struct_ty.containerLayout(zcu) == .@"packed") return struct_ptr;
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
@@ -10761,7 +10761,7 @@ pub const FuncGen = struct {
return result_ptr;
}
- if (elem_ty.zigTypeTag(zcu) == .Float or elem_ty.zigTypeTag(zcu) == .Vector) {
+ if (elem_ty.zigTypeTag(zcu) == .float or elem_ty.zigTypeTag(zcu) == .vector) {
const same_size_int = try o.builder.intType(@intCast(elem_bits));
const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
@@ -11432,7 +11432,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.isSlice(zcu) or
- (ty.zigTypeTag(zcu) == .Optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
+ (ty.zigTypeTag(zcu) == .optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
{
it.llvm_index += 1;
return .slice;
@@ -11707,8 +11707,8 @@ fn ccAbiPromoteInt(
else => {},
}
const int_info = switch (ty.zigTypeTag(zcu)) {
- .Bool => Type.u1.intInfo(zcu),
- .Int, .Enum, .ErrorSet => ty.intInfo(zcu),
+ .bool => Type.u1.intInfo(zcu),
+ .int, .@"enum", .error_set => ty.intInfo(zcu),
else => return null,
};
return switch (target.os.tag) {
@@ -11753,30 +11753,30 @@ fn isByRef(ty: Type, zcu: *Zcu) bool {
const ip = &zcu.intern_pool;
switch (ty.zigTypeTag(zcu)) {
- .Type,
- .ComptimeInt,
- .ComptimeFloat,
- .EnumLiteral,
- .Undefined,
- .Null,
- .Opaque,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .enum_literal,
+ .undefined,
+ .null,
+ .@"opaque",
=> unreachable,
- .NoReturn,
- .Void,
- .Bool,
- .Int,
- .Float,
- .Pointer,
- .ErrorSet,
- .Fn,
- .Enum,
- .Vector,
- .AnyFrame,
+ .noreturn,
+ .void,
+ .bool,
+ .int,
+ .float,
+ .pointer,
+ .error_set,
+ .@"fn",
+ .@"enum",
+ .vector,
+ .@"anyframe",
=> return false,
- .Array, .Frame => return ty.hasRuntimeBits(zcu),
- .Struct => {
+ .array, .frame => return ty.hasRuntimeBits(zcu),
+ .@"struct" => {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
@@ -11807,18 +11807,18 @@ fn isByRef(ty: Type, zcu: *Zcu) bool {
}
return false;
},
- .Union => switch (ty.containerLayout(zcu)) {
+ .@"union" => switch (ty.containerLayout(zcu)) {
.@"packed" => return false,
else => return ty.hasRuntimeBits(zcu),
},
- .ErrorUnion => {
+ .error_union => {
const payload_ty = ty.errorUnionPayload(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
}
return true;
},
- .Optional => {
+ .optional => {
const payload_ty = ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
return false;
@@ -11833,21 +11833,21 @@ fn isByRef(ty: Type, zcu: *Zcu) bool {
fn isScalar(zcu: *Zcu, ty: Type) bool {
return switch (ty.zigTypeTag(zcu)) {
- .Void,
- .Bool,
- .NoReturn,
- .Int,
- .Float,
- .Pointer,
- .Optional,
- .ErrorSet,
- .Enum,
- .AnyFrame,
- .Vector,
+ .void,
+ .bool,
+ .noreturn,
+ .int,
+ .float,
+ .pointer,
+ .optional,
+ .error_set,
+ .@"enum",
+ .@"anyframe",
+ .vector,
=> true,
- .Struct => ty.containerLayout(zcu) == .@"packed",
- .Union => ty.containerLayout(zcu) == .@"packed",
+ .@"struct" => ty.containerLayout(zcu) == .@"packed",
+ .@"union" => ty.containerLayout(zcu) == .@"packed",
else => false,
};
}
diff --git a/src/codegen/llvm/BitcodeReader.zig b/src/codegen/llvm/BitcodeReader.zig
index 668e610a69..940b965340 100644
--- a/src/codegen/llvm/BitcodeReader.zig
+++ b/src/codegen/llvm/BitcodeReader.zig
@@ -273,7 +273,7 @@ fn startBlock(bc: *BitcodeReader, block_id: ?u32, new_abbrev_len: u6) !void {
};
try state.abbrevs.abbrevs.ensureTotalCapacity(
bc.allocator,
- @typeInfo(Abbrev.Builtin).Enum.fields.len + abbrevs.len,
+ @typeInfo(Abbrev.Builtin).@"enum".fields.len + abbrevs.len,
);
assert(state.abbrevs.abbrevs.items.len == @intFromEnum(Abbrev.Builtin.end_block));
@@ -309,7 +309,7 @@ fn startBlock(bc: *BitcodeReader, block_id: ?u32, new_abbrev_len: u6) !void {
.{ .encoding = .{ .vbr = 6 } }, // ops
},
});
- assert(state.abbrevs.abbrevs.items.len == @typeInfo(Abbrev.Builtin).Enum.fields.len);
+ assert(state.abbrevs.abbrevs.items.len == @typeInfo(Abbrev.Builtin).@"enum".fields.len);
for (abbrevs) |abbrev| try state.abbrevs.addAbbrevAssumeCapacity(bc.allocator, abbrev);
}
@@ -448,7 +448,7 @@ const Abbrev = struct {
define_abbrev,
unabbrev_record,
- const first_record_id: u32 = std.math.maxInt(u32) - @typeInfo(Builtin).Enum.fields.len + 1;
+ const first_record_id: u32 = std.math.maxInt(u32) - @typeInfo(Builtin).@"enum".fields.len + 1;
fn toRecordId(builtin: Builtin) u32 {
return first_record_id + @intFromEnum(builtin);
}
diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig
index 9ada51acad..50b43319da 100644
--- a/src/codegen/llvm/Builder.zig
+++ b/src/codegen/llvm/Builder.zig
@@ -1115,7 +1115,7 @@ pub const Attribute = union(Kind) {
=> |kind| {
const field = comptime blk: {
@setEvalBranchQuota(10_000);
- for (@typeInfo(Attribute).Union.fields) |field| {
+ for (@typeInfo(Attribute).@"union".fields) |field| {
if (std.mem.eql(u8, field.name, @tagName(kind))) break :blk field;
}
unreachable;
@@ -1232,11 +1232,11 @@ pub const Attribute = union(Kind) {
.dereferenceable_or_null,
=> |size| try writer.print(" {s}({d})", .{ @tagName(attribute), size }),
.nofpclass => |fpclass| {
- const Int = @typeInfo(FpClass).Struct.backing_integer.?;
+ const Int = @typeInfo(FpClass).@"struct".backing_integer.?;
try writer.print(" {s}(", .{@tagName(attribute)});
var any = false;
var remaining: Int = @bitCast(fpclass);
- inline for (@typeInfo(FpClass).Struct.decls) |decl| {
+ inline for (@typeInfo(FpClass).@"struct".decls) |decl| {
const pattern: Int = @bitCast(@field(FpClass, decl.name));
if (remaining & pattern == pattern) {
if (!any) {
@@ -1259,7 +1259,7 @@ pub const Attribute = union(Kind) {
.allockind => |allockind| {
try writer.print(" {s}(\"", .{@tagName(attribute)});
var any = false;
- inline for (@typeInfo(AllocKind).Struct.fields) |field| {
+ inline for (@typeInfo(AllocKind).@"struct".fields) |field| {
if (comptime std.mem.eql(u8, field.name, "_")) continue;
if (@field(allockind, field.name)) {
if (!any) {
@@ -1418,7 +1418,7 @@ pub const Attribute = union(Kind) {
none = std.math.maxInt(u32),
_,
- pub const len = @typeInfo(Kind).Enum.fields.len - 2;
+ pub const len = @typeInfo(Kind).@"enum".fields.len - 2;
pub fn fromString(str: String) Kind {
assert(!str.isAnon());
@@ -5037,7 +5037,7 @@ pub const Function = struct {
index: Instruction.ExtraIndex,
) struct { data: T, trail: ExtraDataTrail } {
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, self.extra[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
@@ -6151,7 +6151,7 @@ pub const WipFunction = struct {
fn addExtra(wip_extra: *@This(), extra: anytype) Instruction.ExtraIndex {
const result = wip_extra.index;
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
const value = @field(extra, field.name);
wip_extra.items[wip_extra.index] = switch (field.type) {
u32 => value,
@@ -6175,7 +6175,7 @@ pub const WipFunction = struct {
}
fn appendSlice(wip_extra: *@This(), slice: anytype) void {
- if (@typeInfo(@TypeOf(slice)).Pointer.child == Value)
+ if (@typeInfo(@TypeOf(slice)).pointer.child == Value)
@compileError("use appendMappedValues");
const data: []const u32 = @ptrCast(slice);
@memcpy(wip_extra.items[wip_extra.index..][0..data.len], data);
@@ -6760,7 +6760,7 @@ pub const WipFunction = struct {
) Allocator.Error!void {
try self.extra.ensureUnusedCapacity(
self.builder.gpa,
- count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ count * (@typeInfo(Extra).@"struct".fields.len + trail_len),
);
}
@@ -6799,7 +6799,7 @@ pub const WipFunction = struct {
fn addExtraAssumeCapacity(self: *WipFunction, extra: anytype) Instruction.ExtraIndex {
const result: Instruction.ExtraIndex = @intCast(self.extra.items.len);
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
const value = @field(extra, field.name);
self.extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
@@ -6848,7 +6848,7 @@ pub const WipFunction = struct {
index: Instruction.ExtraIndex,
) struct { data: T, trail: ExtraDataTrail } {
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, self.extra.items[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
@@ -7926,17 +7926,17 @@ pub const Metadata = enum(u32) {
writer: anytype,
) @TypeOf(writer).Error!void {
var need_pipe = false;
- inline for (@typeInfo(DIFlags).Struct.fields) |field| {
+ inline for (@typeInfo(DIFlags).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
- .Bool => if (@field(self, field.name)) {
+ .bool => if (@field(self, field.name)) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DIFlag{s}", .{field.name});
},
- .Enum => if (@field(self, field.name) != .Zero) {
+ .@"enum" => if (@field(self, field.name) != .Zero) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DIFlag{s}", .{@tagName(@field(self, field.name))});
},
- .Int => assert(@field(self, field.name) == 0),
+ .int => assert(@field(self, field.name) == 0),
else => @compileError("bad field type: " ++ field.name ++ ": " ++
@typeName(field.type)),
}
@@ -7988,17 +7988,17 @@ pub const Metadata = enum(u32) {
writer: anytype,
) @TypeOf(writer).Error!void {
var need_pipe = false;
- inline for (@typeInfo(DISPFlags).Struct.fields) |field| {
+ inline for (@typeInfo(DISPFlags).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
- .Bool => if (@field(self, field.name)) {
+ .bool => if (@field(self, field.name)) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DISPFlag{s}", .{field.name});
},
- .Enum => if (@field(self, field.name) != .Zero) {
+ .@"enum" => if (@field(self, field.name) != .Zero) {
if (need_pipe) try writer.writeAll(" | ") else need_pipe = true;
try writer.print("DISPFlag{s}", .{@tagName(@field(self, field.name))});
},
- .Int => assert(@field(self, field.name) == 0),
+ .int => assert(@field(self, field.name) == 0),
else => @compileError("bad field type: " ++ field.name ++ ": " ++
@typeName(field.type)),
}
@@ -8281,16 +8281,16 @@ pub const Metadata = enum(u32) {
}!std.fmt.Formatter(format) {
const Node = @TypeOf(node);
const MaybeNode = switch (@typeInfo(Node)) {
- .Optional => Node,
- .Null => ?noreturn,
+ .optional => Node,
+ .null => ?noreturn,
else => ?Node,
};
- const Some = @typeInfo(MaybeNode).Optional.child;
+ const Some = @typeInfo(MaybeNode).optional.child;
return .{ .data = .{
.formatter = formatter,
.prefix = prefix,
.node = if (@as(MaybeNode, node)) |some| switch (@typeInfo(Some)) {
- .Enum => |enum_info| switch (Some) {
+ .@"enum" => |enum_info| switch (Some) {
Metadata => switch (some) {
.none => .none,
else => try formatter.refUnwrapped(some.unwrap(formatter.builder)),
@@ -8301,18 +8301,18 @@ pub const Metadata = enum(u32) {
else
@compileError("unknown type to format: " ++ @typeName(Node)),
},
- .EnumLiteral => .{ .raw = @tagName(some) },
- .Bool => .{ .bool = some },
- .Struct => switch (Some) {
+ .enum_literal => .{ .raw = @tagName(some) },
+ .bool => .{ .bool = some },
+ .@"struct" => switch (Some) {
DIFlags => .{ .di_flags = some },
Subprogram.DISPFlags => .{ .sp_flags = some },
else => @compileError("unknown type to format: " ++ @typeName(Node)),
},
- .Int, .ComptimeInt => .{ .u64 = some },
- .Pointer => .{ .raw = some },
+ .int, .comptime_int => .{ .u64 = some },
+ .pointer => .{ .raw = some },
else => @compileError("unknown type to format: " ++ @typeName(Node)),
} else switch (@typeInfo(Node)) {
- .Optional, .Null => .none,
+ .optional, .null => .none,
else => unreachable,
},
} };
@@ -8414,7 +8414,7 @@ pub const Metadata = enum(u32) {
}
fmt_str = fmt_str ++ ")\n";
- var fmt_args: @Type(.{ .Struct = .{
+ var fmt_args: @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &fields,
.decls = &.{},
@@ -8501,10 +8501,10 @@ pub fn init(options: Options) Allocator.Error!Builder {
}
{
- const static_len = @typeInfo(Type).Enum.fields.len - 1;
+ const static_len = @typeInfo(Type).@"enum".fields.len - 1;
try self.type_map.ensureTotalCapacity(self.gpa, static_len);
try self.type_items.ensureTotalCapacity(self.gpa, static_len);
- inline for (@typeInfo(Type.Simple).Enum.fields) |simple_field| {
+ inline for (@typeInfo(Type.Simple).@"enum".fields) |simple_field| {
const result = self.getOrPutTypeNoExtraAssumeCapacity(
.{ .tag = .simple, .data = simple_field.value },
);
@@ -9031,14 +9031,14 @@ pub fn getIntrinsic(
pub fn intConst(self: *Builder, ty: Type, value: anytype) Allocator.Error!Constant {
const int_value = switch (@typeInfo(@TypeOf(value))) {
- .Int, .ComptimeInt => value,
- .Enum => @intFromEnum(value),
+ .int, .comptime_int => value,
+ .@"enum" => @intFromEnum(value),
else => @compileError("intConst expected an integral value, got " ++ @typeName(@TypeOf(value))),
};
var limbs: [
switch (@typeInfo(@TypeOf(int_value))) {
- .Int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits),
- .ComptimeInt => std.math.big.int.calcLimbLen(int_value),
+ .int => |info| std.math.big.int.calcTwosCompLimbCount(info.bits),
+ .comptime_int => std.math.big.int.calcLimbLen(int_value),
else => unreachable,
}
]std.math.big.Limb = undefined;
@@ -10759,7 +10759,7 @@ fn ensureUnusedTypeCapacity(
try self.type_items.ensureUnusedCapacity(self.gpa, count);
try self.type_extra.ensureUnusedCapacity(
self.gpa,
- count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ count * (@typeInfo(Extra).@"struct".fields.len + trail_len),
);
}
@@ -10789,7 +10789,7 @@ fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { n
fn addTypeExtraAssumeCapacity(self: *Builder, extra: anytype) Type.Item.ExtraIndex {
const result: Type.Item.ExtraIndex = @intCast(self.type_extra.items.len);
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
const value = @field(extra, field.name);
self.type_extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
@@ -10827,7 +10827,7 @@ fn typeExtraDataTrail(
index: Type.Item.ExtraIndex,
) struct { data: T, trail: TypeExtraDataTrail } {
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, self.type_extra.items[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
@@ -11642,7 +11642,7 @@ fn ensureUnusedConstantCapacity(
try self.constant_items.ensureUnusedCapacity(self.gpa, count);
try self.constant_extra.ensureUnusedCapacity(
self.gpa,
- count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ count * (@typeInfo(Extra).@"struct".fields.len + trail_len),
);
}
@@ -11717,7 +11717,7 @@ fn getOrPutConstantAggregateAssumeCapacity(
fn addConstantExtraAssumeCapacity(self: *Builder, extra: anytype) Constant.Item.ExtraIndex {
const result: Constant.Item.ExtraIndex = @intCast(self.constant_extra.items.len);
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
const value = @field(extra, field.name);
self.constant_extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
@@ -11756,7 +11756,7 @@ fn constantExtraDataTrail(
index: Constant.Item.ExtraIndex,
) struct { data: T, trail: ConstantExtraDataTrail } {
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, self.constant_extra.items[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
@@ -11784,13 +11784,13 @@ fn ensureUnusedMetadataCapacity(
try self.metadata_items.ensureUnusedCapacity(self.gpa, count);
try self.metadata_extra.ensureUnusedCapacity(
self.gpa,
- count * (@typeInfo(Extra).Struct.fields.len + trail_len),
+ count * (@typeInfo(Extra).@"struct".fields.len + trail_len),
);
}
fn addMetadataExtraAssumeCapacity(self: *Builder, extra: anytype) Metadata.Item.ExtraIndex {
const result: Metadata.Item.ExtraIndex = @intCast(self.metadata_extra.items.len);
- inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
const value = @field(extra, field.name);
self.metadata_extra.appendAssumeCapacity(switch (field.type) {
u32 => value,
@@ -11829,7 +11829,7 @@ fn metadataExtraDataTrail(
index: Metadata.Item.ExtraIndex,
) struct { data: T, trail: MetadataExtraDataTrail } {
var result: T = undefined;
- const fields = @typeInfo(T).Struct.fields;
+ const fields = @typeInfo(T).@"struct".fields;
inline for (fields, self.metadata_extra.items[index..][0..fields.len]) |field, value|
@field(result, field.name) = switch (field.type) {
u32 => value,
@@ -13921,7 +13921,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co
const MetadataKindBlock = ir.MetadataKindBlock;
var metadata_kind_block = try module_block.enterSubBlock(MetadataKindBlock, true);
- inline for (@typeInfo(ir.FixedMetadataKind).Enum.fields) |field| {
+ inline for (@typeInfo(ir.FixedMetadataKind).@"enum".fields) |field| {
// don't include `dbg` in stripped functions
if (!(self.strip and std.mem.eql(u8, field.name, "dbg"))) {
try metadata_kind_block.writeAbbrev(MetadataKindBlock.Kind{
@@ -14197,7 +14197,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co
const limbs_len = std.math.divCeil(u32, extra.bit_width, 64) catch unreachable;
try record.ensureTotalCapacity(self.gpa, 3 + limbs_len);
record.appendAssumeCapacity(@as(
- @typeInfo(MetadataBlock.Enumerator.Flags).Struct.backing_integer.?,
+ @typeInfo(MetadataBlock.Enumerator.Flags).@"struct".backing_integer.?,
@bitCast(flags),
));
record.appendAssumeCapacity(extra.bit_width);
diff --git a/src/codegen/llvm/bitcode_writer.zig b/src/codegen/llvm/bitcode_writer.zig
index 0b821a32e7..049a15fe17 100644
--- a/src/codegen/llvm/bitcode_writer.zig
+++ b/src/codegen/llvm/bitcode_writer.zig
@@ -407,14 +407,14 @@ fn charTo6Bit(c: u8) u8 {
fn BufType(comptime T: type, comptime min_len: usize) type {
return std.meta.Int(.unsigned, @max(min_len, @bitSizeOf(switch (@typeInfo(T)) {
- .ComptimeInt => u32,
- .Int => |info| if (info.signedness == .unsigned)
+ .comptime_int => u32,
+ .int => |info| if (info.signedness == .unsigned)
T
else
@compileError("Unsupported type: " ++ @typeName(T)),
- .Enum => |info| info.tag_type,
- .Bool => u1,
- .Struct => |info| switch (info.layout) {
+ .@"enum" => |info| info.tag_type,
+ .bool => u1,
+ .@"struct" => |info| switch (info.layout) {
.auto, .@"extern" => @compileError("Unsupported type: " ++ @typeName(T)),
.@"packed" => std.meta.Int(.unsigned, @bitSizeOf(T)),
},
@@ -424,10 +424,10 @@ fn BufType(comptime T: type, comptime min_len: usize) type {
fn bufValue(value: anytype, comptime min_len: usize) BufType(@TypeOf(value), min_len) {
return switch (@typeInfo(@TypeOf(value))) {
- .ComptimeInt, .Int => @intCast(value),
- .Enum => @intFromEnum(value),
- .Bool => @intFromBool(value),
- .Struct => @intCast(@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(value))), @bitCast(value))),
+ .comptime_int, .int => @intCast(value),
+ .@"enum" => @intFromEnum(value),
+ .bool => @intFromBool(value),
+ .@"struct" => @intCast(@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(value))), @bitCast(value))),
else => unreachable,
};
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index d1df1ba77e..345e80a23c 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -439,7 +439,7 @@ const NavGen = struct {
const zcu = pt.zcu;
if (try self.air.value(inst, pt)) |val| {
const ty = self.typeOf(inst);
- if (ty.zigTypeTag(zcu) == .Fn) {
+ if (ty.zigTypeTag(zcu) == .@"fn") {
const fn_nav = switch (zcu.intern_pool.indexToKey(val.ip_index)) {
.@"extern" => |@"extern"| @"extern".owner_nav,
.func => |func| func.owner_nav,
@@ -641,16 +641,16 @@ const NavGen = struct {
fn isSpvVector(self: *NavGen, ty: Type) bool {
const zcu = self.pt.zcu;
const target = self.getTarget();
- if (ty.zigTypeTag(zcu) != .Vector) return false;
+ if (ty.zigTypeTag(zcu) != .vector) return false;
// TODO: This check must be expanded for types that can be represented
// as integers (enums / packed structs?) and types that are represented
// by multiple SPIR-V values.
const scalar_ty = ty.scalarType(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool,
- .Int,
- .Float,
+ .bool,
+ .int,
+ .float,
=> {},
else => return false,
}
@@ -668,26 +668,26 @@ const NavGen = struct {
const zcu = self.pt.zcu;
const target = self.getTarget();
var scalar_ty = ty.scalarType(zcu);
- if (scalar_ty.zigTypeTag(zcu) == .Enum) {
+ if (scalar_ty.zigTypeTag(zcu) == .@"enum") {
scalar_ty = scalar_ty.intTagType(zcu);
}
const vector_len = if (ty.isVector(zcu)) ty.vectorLen(zcu) else null;
return switch (scalar_ty.zigTypeTag(zcu)) {
- .Bool => ArithmeticTypeInfo{
+ .bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
.backing_bits = self.backingIntBits(1).?,
.vector_len = vector_len,
.signedness = .unsigned, // Technically, but doesn't matter for this class.
.class = .bool,
},
- .Float => ArithmeticTypeInfo{
+ .float => ArithmeticTypeInfo{
.bits = scalar_ty.floatBits(target),
.backing_bits = scalar_ty.floatBits(target), // TODO: F80?
.vector_len = vector_len,
.signedness = .signed, // Technically, but doesn't matter for this class.
.class = .float,
},
- .Int => blk: {
+ .int => blk: {
const int_info = scalar_ty.intInfo(zcu);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
@@ -705,8 +705,8 @@ const NavGen = struct {
.composite_integer,
};
},
- .Enum => unreachable,
- .Vector => unreachable,
+ .@"enum" => unreachable,
+ .vector => unreachable,
else => unreachable, // Unhandled arithmetic type
};
}
@@ -748,8 +748,8 @@ const NavGen = struct {
const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) {
- .Int => |int| int.signedness,
- .ComptimeInt => if (value < 0) .signed else .unsigned,
+ .int => |int| int.signedness,
+ .comptime_int => if (value < 0) .signed else .unsigned,
else => unreachable,
};
@@ -1243,7 +1243,7 @@ const NavGen = struct {
else => {},
}
- // const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
+ // const is_fn_body = decl_ty.zigTypeTag(zcu) == .@"fn";
if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
// Pointer to nothing - return undefined
return self.spv.constUndef(ty_id);
@@ -1539,11 +1539,11 @@ const NavGen = struct {
const section = &self.spv.sections.types_globals_constants;
switch (ty.zigTypeTag(zcu)) {
- .NoReturn => {
+ .noreturn => {
assert(repr == .direct);
return try self.spv.voidType();
},
- .Void => switch (repr) {
+ .void => switch (repr) {
.direct => {
return try self.spv.voidType();
},
@@ -1557,11 +1557,11 @@ const NavGen = struct {
return result_id;
},
},
- .Bool => switch (repr) {
+ .bool => switch (repr) {
.direct => return try self.spv.boolType(),
.indirect => return try self.resolveType(Type.u1, .indirect),
},
- .Int => {
+ .int => {
const int_info = ty.intInfo(zcu);
if (int_info.bits == 0) {
// Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
@@ -1576,11 +1576,11 @@ const NavGen = struct {
}
return try self.intType(int_info.signedness, int_info.bits);
},
- .Enum => {
+ .@"enum" => {
const tag_ty = ty.intTagType(zcu);
return try self.resolveType(tag_ty, repr);
},
- .Float => {
+ .float => {
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
// so if the float is not supported, just return an error.
const bits = ty.floatBits(target);
@@ -1598,7 +1598,7 @@ const NavGen = struct {
return try self.spv.floatType(bits);
},
- .Array => {
+ .array => {
const elem_ty = ty.childType(zcu);
const elem_ty_id = try self.resolveType(elem_ty, .indirect);
const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(zcu)) orelse {
@@ -1633,7 +1633,7 @@ const NavGen = struct {
return try self.arrayType(total_len, elem_ty_id);
}
},
- .Fn => switch (repr) {
+ .@"fn" => switch (repr) {
.direct => {
const fn_info = zcu.typeToFunc(ty).?;
@@ -1676,7 +1676,7 @@ const NavGen = struct {
return try self.resolveType(Type.usize, .indirect);
},
},
- .Pointer => {
+ .pointer => {
const ptr_info = ty.ptrInfo(zcu);
const storage_class = self.spvStorageClass(ptr_info.flags.address_space);
@@ -1692,7 +1692,7 @@ const NavGen = struct {
&.{ "ptr", "len" },
);
},
- .Vector => {
+ .vector => {
const elem_ty = ty.childType(zcu);
const elem_ty_id = try self.resolveType(elem_ty, repr);
const len = ty.vectorLen(zcu);
@@ -1703,7 +1703,7 @@ const NavGen = struct {
return try self.arrayType(len, elem_ty_id);
}
},
- .Struct => {
+ .@"struct" => {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
const member_types = try self.gpa.alloc(IdRef, tuple.values.len);
@@ -1757,7 +1757,7 @@ const NavGen = struct {
try self.spv.debugName(result_id, type_name);
return result_id;
},
- .Optional => {
+ .optional => {
const payload_ty = ty.optionalChild(zcu);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
// Just use a bool.
@@ -1779,9 +1779,9 @@ const NavGen = struct {
&.{ "payload", "valid" },
);
},
- .Union => return try self.resolveUnionType(ty),
- .ErrorSet => return try self.resolveType(Type.u16, repr),
- .ErrorUnion => {
+ .@"union" => return try self.resolveUnionType(ty),
+ .error_set => return try self.resolveType(Type.u16, repr),
+ .error_union => {
const payload_ty = ty.errorUnionPayload(zcu);
const error_ty_id = try self.resolveType(Type.anyerror, .indirect);
@@ -1808,7 +1808,7 @@ const NavGen = struct {
return try self.spv.structType(&member_types, &member_names);
},
- .Opaque => {
+ .@"opaque" => {
const type_name = try self.resolveTypeName(ty);
defer self.gpa.free(type_name);
@@ -1820,15 +1820,15 @@ const NavGen = struct {
return result_id;
},
- .Null,
- .Undefined,
- .EnumLiteral,
- .ComptimeFloat,
- .ComptimeInt,
- .Type,
+ .null,
+ .undefined,
+ .enum_literal,
+ .comptime_float,
+ .comptime_int,
+ .type,
=> unreachable, // Must be comptime.
- .Frame, .AnyFrame => unreachable, // TODO
+ .frame, .@"anyframe" => unreachable, // TODO
}
}
@@ -2429,7 +2429,7 @@ const NavGen = struct {
const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
const result_ty = try v.resultType(self, lhs.ty);
- assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .Bool);
+ assert(condition.ty.scalarType(zcu).zigTypeTag(zcu) == .bool);
const cond = try v.prepare(self, condition);
const object_1 = try v.prepare(self, lhs);
@@ -3119,7 +3119,7 @@ const NavGen = struct {
fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
const zcu = self.pt.zcu;
switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
const false_id = try self.constBool(false, .indirect);
// The operation below requires inputs in direct representation, but the operand
// is actually in indirect representation.
@@ -3145,7 +3145,7 @@ const NavGen = struct {
fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef {
const zcu = self.pt.zcu;
switch (ty.scalarType(zcu).zigTypeTag(zcu)) {
- .Bool => {
+ .bool => {
const result = try self.intFromBool(Temporary.init(ty, operand_id));
return try result.materialize(self);
},
@@ -4281,17 +4281,17 @@ const NavGen = struct {
const is_vector = lhs.ty.isVector(zcu);
switch (scalar_ty.zigTypeTag(zcu)) {
- .Int, .Bool, .Float => {},
- .Enum => {
+ .int, .bool, .float => {},
+ .@"enum" => {
assert(!is_vector);
const ty = lhs.ty.intTagType(zcu);
return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
},
- .ErrorSet => {
+ .error_set => {
assert(!is_vector);
return try self.cmp(op, lhs.pun(Type.u16), rhs.pun(Type.u16));
},
- .Pointer => {
+ .pointer => {
assert(!is_vector);
// Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
// currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using
@@ -4317,7 +4317,7 @@ const NavGen = struct {
const rhs_int = Temporary.init(Type.usize, rhs_int_id);
return try self.cmp(op, lhs_int, rhs_int);
},
- .Optional => {
+ .optional => {
assert(!is_vector);
const ty = lhs.ty;
@@ -4478,7 +4478,7 @@ const NavGen = struct {
// TODO: Some more cases are missing here
// See fn bitCast in llvm.zig
- if (src_ty.zigTypeTag(zcu) == .Int and dst_ty.isPtrAtRuntime(zcu)) {
+ if (src_ty.zigTypeTag(zcu) == .int and dst_ty.isPtrAtRuntime(zcu)) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = dst_ty_id,
@@ -4520,7 +4520,7 @@ const NavGen = struct {
// the result here.
// TODO: This detail could cause stuff like @as(*const i1, @ptrCast(&@as(u1, 1))) to break
// should we change the representation of strange integers?
- if (dst_ty.zigTypeTag(zcu) == .Int) {
+ if (dst_ty.zigTypeTag(zcu) == .int) {
const info = self.arithmeticTypeInfo(dst_ty);
const result = try self.normalize(Temporary.init(dst_ty, result_id), info);
return try result.materialize(self);
@@ -4729,7 +4729,7 @@ const NavGen = struct {
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
switch (result_ty.zigTypeTag(zcu)) {
- .Struct => {
+ .@"struct" => {
if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
_ = struct_type;
unreachable; // TODO
@@ -4777,7 +4777,7 @@ const NavGen = struct {
constituents[0..index],
);
},
- .Vector => {
+ .vector => {
const n_elems = result_ty.vectorLen(zcu);
const elem_ids = try self.gpa.alloc(IdRef, n_elems);
defer self.gpa.free(elem_ids);
@@ -4788,7 +4788,7 @@ const NavGen = struct {
return try self.constructVector(result_ty, elem_ids);
},
- .Array => {
+ .array => {
const array_info = result_ty.arrayInfo(zcu);
const n_elems: usize = @intCast(result_ty.arrayLenIncludingSentinel(zcu));
const elem_ids = try self.gpa.alloc(IdRef, n_elems);
@@ -5153,11 +5153,11 @@ const NavGen = struct {
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
switch (object_ty.zigTypeTag(zcu)) {
- .Struct => switch (object_ty.containerLayout(zcu)) {
+ .@"struct" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => return try self.extractField(field_ty, object_id, field_index),
},
- .Union => switch (object_ty.containerLayout(zcu)) {
+ .@"union" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
// Store, ptr-elem-ptr, pointer-cast, load
@@ -5229,17 +5229,17 @@ const NavGen = struct {
const zcu = self.pt.zcu;
const object_ty = object_ptr_ty.childType(zcu);
switch (object_ty.zigTypeTag(zcu)) {
- .Pointer => {
+ .pointer => {
assert(object_ty.isSlice(zcu));
return self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
- .Struct => switch (object_ty.containerLayout(zcu)) {
+ .@"struct" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
},
- .Union => switch (object_ty.containerLayout(zcu)) {
+ .@"union" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => unreachable, // TODO
else => {
const layout = self.unionLayout(object_ty);
@@ -6179,15 +6179,15 @@ const NavGen = struct {
var cond_indirect = try self.convertToIndirect(cond_ty, cond);
const cond_words: u32 = switch (cond_ty.zigTypeTag(zcu)) {
- .Bool, .ErrorSet => 1,
- .Int => blk: {
+ .bool, .error_set => 1,
+ .int => blk: {
const bits = cond_ty.intInfo(zcu).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) 1 else 2;
},
- .Enum => blk: {
+ .@"enum" => blk: {
const int_ty = cond_ty.intTagType(zcu);
const int_info = int_ty.intInfo(zcu);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
@@ -6195,7 +6195,7 @@ const NavGen = struct {
};
break :blk if (backing_bits <= 32) 1 else 2;
},
- .Pointer => blk: {
+ .pointer => blk: {
cond_indirect = try self.intFromPtr(cond_indirect);
break :blk target.ptrBitWidth() / 32;
},
@@ -6248,13 +6248,13 @@ const NavGen = struct {
for (case.items) |item| {
const value = (try self.air.value(item, pt)) orelse unreachable;
const int_val: u64 = switch (cond_ty.zigTypeTag(zcu)) {
- .Bool, .Int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu),
- .Enum => blk: {
+ .bool, .int => if (cond_ty.isSignedInt(zcu)) @bitCast(value.toSignedInt(zcu)) else value.toUnsignedInt(zcu),
+ .@"enum" => blk: {
// TODO: figure out of cond_ty is correct (something with enum literals)
break :blk (try value.intFromEnum(cond_ty, pt)).toUnsignedInt(zcu); // TODO: composite integer constants
},
- .ErrorSet => value.getErrorInt(zcu),
- .Pointer => value.toUnsignedInt(zcu),
+ .error_set => value.getErrorInt(zcu),
+ .pointer => value.toUnsignedInt(zcu),
else => unreachable,
};
const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
@@ -6496,8 +6496,8 @@ const NavGen = struct {
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
- .Fn => callee_ty,
- .Pointer => return self.fail("cannot call function pointers", .{}),
+ .@"fn" => callee_ty,
+ .pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
};
const fn_info = zcu.typeToFunc(zig_fn_ty).?;
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index d857ce7f46..20abf8ab70 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -98,7 +98,7 @@ pub fn emitSpecConstantOp(
section.writeOperand(spec.IdRef, operands.id_result);
section.writeOperand(Opcode, opcode);
- const fields = @typeInfo(opcode.Operands()).Struct.fields;
+ const fields = @typeInfo(opcode.Operands()).@"struct".fields;
// First 2 fields are always id_result_type and id_result.
inline for (fields[2..]) |field| {
section.writeOperand(field.type, @field(operands, field.name));
@@ -122,8 +122,8 @@ pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void {
const fields = switch (@typeInfo(Operands)) {
- .Struct => |info| info.fields,
- .Void => return,
+ .@"struct" => |info| info.fields,
+ .void => return,
else => unreachable,
};
@@ -154,24 +154,24 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand)
spec.PairIdRefIdRef => section.writeWords(&.{ @intFromEnum(operand[0]), @intFromEnum(operand[1]) }),
else => switch (@typeInfo(Operand)) {
- .Enum => section.writeWord(@intFromEnum(operand)),
- .Optional => |info| if (operand) |child| {
+ .@"enum" => section.writeWord(@intFromEnum(operand)),
+ .optional => |info| if (operand) |child| {
section.writeOperand(info.child, child);
},
- .Pointer => |info| {
+ .pointer => |info| {
std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
for (operand) |item| {
section.writeOperand(info.child, item);
}
},
- .Struct => |info| {
+ .@"struct" => |info| {
if (info.layout == .@"packed") {
section.writeWord(@as(Word, @bitCast(operand)));
} else {
section.writeExtendedMask(Operand, operand);
}
},
- .Union => section.writeExtendedUnion(Operand, operand),
+ .@"union" => section.writeExtendedUnion(Operand, operand),
else => unreachable,
},
}
@@ -207,12 +207,12 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
var mask: Word = 0;
- inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
+ inline for (@typeInfo(Operand).@"struct".fields, 0..) |field, bit| {
switch (@typeInfo(field.type)) {
- .Optional => if (@field(operand, field.name) != null) {
+ .optional => if (@field(operand, field.name) != null) {
mask |= 1 << @as(u5, @intCast(bit));
},
- .Bool => if (@field(operand, field.name)) {
+ .bool => if (@field(operand, field.name)) {
mask |= 1 << @as(u5, @intCast(bit));
},
else => unreachable,
@@ -221,12 +221,12 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand
section.writeWord(mask);
- inline for (@typeInfo(Operand).Struct.fields) |field| {
+ inline for (@typeInfo(Operand).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
- .Optional => |info| if (@field(operand, field.name)) |child| {
+ .optional => |info| if (@field(operand, field.name)) |child| {
section.writeOperands(info.child, child);
},
- .Bool => {},
+ .bool => {},
else => unreachable,
}
}
@@ -236,7 +236,7 @@ fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operan
const tag = std.meta.activeTag(operand);
section.writeWord(@intFromEnum(tag));
- inline for (@typeInfo(Operand).Union.fields) |field| {
+ inline for (@typeInfo(Operand).@"union".fields) |field| {
if (@field(Operand, field.name) == tag) {
section.writeOperands(field.type, @field(operand, field.name));
return;
@@ -251,8 +251,8 @@ fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) us
fn operandsSize(comptime Operands: type, operands: Operands) usize {
const fields = switch (@typeInfo(Operands)) {
- .Struct => |info| info.fields,
- .Void => return 0,
+ .@"struct" => |info| info.fields,
+ .void => return 0,
else => unreachable,
};
@@ -289,9 +289,9 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
=> 2,
else => switch (@typeInfo(Operand)) {
- .Enum => 1,
- .Optional => |info| if (operand) |child| operandSize(info.child, child) else 0,
- .Pointer => |info| blk: {
+ .@"enum" => 1,
+ .optional => |info| if (operand) |child| operandSize(info.child, child) else 0,
+ .pointer => |info| blk: {
std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
var total: usize = 0;
for (operand) |item| {
@@ -299,8 +299,8 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
}
break :blk total;
},
- .Struct => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand),
- .Union => extendedUnionSize(Operand, operand),
+ .@"struct" => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand),
+ .@"union" => extendedUnionSize(Operand, operand),
else => unreachable,
},
};
@@ -309,13 +309,13 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
var total: usize = 0;
var any_set = false;
- inline for (@typeInfo(Operand).Struct.fields) |field| {
+ inline for (@typeInfo(Operand).@"struct".fields) |field| {
switch (@typeInfo(field.type)) {
- .Optional => |info| if (@field(operand, field.name)) |child| {
+ .optional => |info| if (@field(operand, field.name)) |child| {
total += operandsSize(info.child, child);
any_set = true;
},
- .Bool => if (@field(operand, field.name)) {
+ .bool => if (@field(operand, field.name)) {
any_set = true;
},
else => unreachable,
@@ -326,7 +326,7 @@ fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
fn extendedUnionSize(comptime Operand: type, operand: Operand) usize {
const tag = std.meta.activeTag(operand);
- inline for (@typeInfo(Operand).Union.fields) |field| {
+ inline for (@typeInfo(Operand).@"union".fields) |field| {
if (@field(Operand, field.name) == tag) {
// Add one for the tag itself.
return 1 + operandsSize(field.type, @field(operand, field.name));
diff --git a/src/link.zig b/src/link.zig
index 409a504073..508bc81352 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -936,13 +936,11 @@ pub const File = struct {
missing_libc: bool = false,
const Int = blk: {
- const bits = @typeInfo(@This()).Struct.fields.len;
- break :blk @Type(.{
- .Int = .{
- .signedness = .unsigned,
- .bits = bits,
- },
- });
+ const bits = @typeInfo(@This()).@"struct".fields.len;
+ break :blk @Type(.{ .int = .{
+ .signedness = .unsigned,
+ .bits = bits,
+ } });
};
fn isSet(ef: ErrorFlags) bool {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index fffa412f03..81f9a29830 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1368,7 +1368,7 @@ fn getNavOutputSection(self: *Coff, nav_index: InternPool.Nav.Index) u16 {
switch (zig_ty) {
// TODO: what if this is a function pointer?
- .Fn => break :blk self.text_section_index.?,
+ .@"fn" => break :blk self.text_section_index.?,
else => {
if (val.getVariable(zcu)) |_| {
break :blk self.data_section_index.?;
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index e9bcc573dc..ab59dd2a2e 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1756,7 +1756,7 @@ pub const WipNav = struct {
var bit: usize = 0;
var carry: u1 = 1;
while (bit < bits) : (bit += 7) {
- const limb_bits = @typeInfo(std.math.big.Limb).Int.bits;
+ const limb_bits = @typeInfo(std.math.big.Limb).int.bits;
const limb_index = bit / limb_bits;
const limb_shift: std.math.Log2Int(std.math.big.Limb) = @intCast(bit % limb_bits);
const low_abs_part: u7 = @truncate(big_int.limbs[limb_index] >> limb_shift);
@@ -2477,6 +2477,18 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .declaration);
+ const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
+
+ const is_test = switch (decl_extra.data.name) {
+ .unnamed_test, .decltest => true,
+ .@"comptime", .@"usingnamespace" => false,
+ _ => decl_extra.data.name.isNamedTest(file.zir),
+ };
+ if (is_test) {
+ // This isn't actually a comptime Nav! It's a test, so it'll definitely never be referenced at comptime.
+ return;
+ }
+
const tree = try file.getTree(dwarf.gpa);
const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]);
assert(loc.line == zcu.navSrcLine(nav_index));
@@ -2515,7 +2527,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
) != abbrev_code_buf.len) return error.InputOutput;
var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf);
const abbrev_code: AbbrevCode = @enumFromInt(
- std.leb.readUleb128(@typeInfo(AbbrevCode).Enum.tag_type, abbrev_code_fbs.reader()) catch unreachable,
+ std.leb.readUleb128(@typeInfo(AbbrevCode).@"enum".tag_type, abbrev_code_fbs.reader()) catch unreachable,
);
switch (abbrev_code) {
else => unreachable,
@@ -2582,7 +2594,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
if (loaded_struct.zir_index == .none) break :decl_struct;
const value_inst = value_inst: {
- const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
@@ -2704,7 +2715,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
if (loaded_enum.zir_index == .none) break :decl_enum;
const value_inst = value_inst: {
- const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
@@ -2788,7 +2798,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
decl_union: {
const value_inst = value_inst: {
- const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
@@ -2911,7 +2920,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
decl_opaque: {
const value_inst = value_inst: {
- const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
@@ -3685,7 +3693,7 @@ pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void {
_ = nav_index;
}
-fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(AbbrevCode).Enum.tag_type {
+fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(AbbrevCode).@"enum".tag_type {
assert(abbrev_code != .null);
const entry: Entry.Index = @enumFromInt(@intFromEnum(abbrev_code));
if (dwarf.debug_abbrev.section.getUnit(DebugAbbrev.unit).getEntry(entry).len > 0) return @intFromEnum(abbrev_code);
@@ -4087,7 +4095,7 @@ pub fn resolveRelocs(dwarf: *Dwarf) RelocError!void {
}
fn DeclValEnum(comptime T: type) type {
- const decls = @typeInfo(T).Struct.decls;
+ const decls = @typeInfo(T).@"struct".decls;
@setEvalBranchQuota(7 * decls.len);
var fields: [decls.len]std.builtin.Type.EnumField = undefined;
var fields_len = 0;
@@ -4101,7 +4109,7 @@ fn DeclValEnum(comptime T: type) type {
if (min_value == null or min_value.? > value) min_value = value;
if (max_value == null or max_value.? < value) max_value = value;
}
- return @Type(.{ .Enum = .{
+ return @Type(.{ .@"enum" = .{
.tag_type = std.math.IntFittingRange(min_value orelse 0, max_value orelse 0),
.fields = fields[0..fields_len],
.decls = &.{},
@@ -4665,7 +4673,7 @@ fn addCommonEntry(dwarf: *Dwarf, unit: Unit.Index) UpdateError!Entry.Index {
fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
switch (buf.len) {
- inline 0...8 => |len| std.mem.writeInt(@Type(.{ .Int = .{
+ inline 0...8 => |len| std.mem.writeInt(@Type(.{ .int = .{
.signedness = .unsigned,
.bits = len * 8,
} }), buf[0..len], @intCast(int), dwarf.endian),
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index 5981b6ef2c..10a7aa2b79 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -1008,7 +1008,7 @@ const AddExtraOpts = struct {
pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) void {
const file_ptr = atom.file(elf_file).?;
var extras = file_ptr.atomExtra(atom.extra_index);
- inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extras, field.name) = x;
}
diff --git a/src/link/Elf/LdScript.zig b/src/link/Elf/LdScript.zig
index 2b5c64b50e..414ce035a4 100644
--- a/src/link/Elf/LdScript.zig
+++ b/src/link/Elf/LdScript.zig
@@ -108,7 +108,7 @@ const Command = enum {
as_needed,
fn fromString(s: []const u8) ?Command {
- inline for (@typeInfo(Command).Enum.fields) |field| {
+ inline for (@typeInfo(Command).@"enum".fields) |field| {
const upper_name = n: {
comptime var buf: [field.name.len]u8 = undefined;
inline for (field.name, 0..) |c, i| {
diff --git a/src/link/Elf/LinkerDefined.zig b/src/link/Elf/LinkerDefined.zig
index a754539882..131ed6ad71 100644
--- a/src/link/Elf/LinkerDefined.zig
+++ b/src/link/Elf/LinkerDefined.zig
@@ -394,14 +394,14 @@ fn addSymbolAssumeCapacity(self: *LinkerDefined) Symbol.Index {
}
pub fn addSymbolExtra(self: *LinkerDefined, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
pub fn addSymbolExtraAssumeCapacity(self: *LinkerDefined, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -412,7 +412,7 @@ pub fn addSymbolExtraAssumeCapacity(self: *LinkerDefined, extra: Symbol.Extra) u
}
pub fn symbolExtra(self: *LinkerDefined, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -426,7 +426,7 @@ pub fn symbolExtra(self: *LinkerDefined, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *LinkerDefined, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index a19d327fcc..40d09c8ec4 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -1215,14 +1215,14 @@ fn addSymbolAssumeCapacity(self: *Object) Symbol.Index {
}
pub fn addSymbolExtra(self: *Object, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
pub fn addSymbolExtraAssumeCapacity(self: *Object, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -1233,7 +1233,7 @@ pub fn addSymbolExtraAssumeCapacity(self: *Object, extra: Symbol.Extra) u32 {
}
pub fn symbolExtra(self: *Object, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -1247,7 +1247,7 @@ pub fn symbolExtra(self: *Object, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *Object, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
@@ -1325,14 +1325,14 @@ pub fn atom(self: *Object, atom_index: Atom.Index) ?*Atom {
}
pub fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
pub fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -1343,7 +1343,7 @@ pub fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
}
pub fn atomExtra(self: *Object, index: u32) Atom.Extra {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@@ -1357,7 +1357,7 @@ pub fn atomExtra(self: *Object, index: u32) Atom.Extra {
}
pub fn setAtomExtra(self: *Object, index: u32, extra: Atom.Extra) void {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig
index 9463cad75a..677e63ebaf 100644
--- a/src/link/Elf/SharedObject.zig
+++ b/src/link/Elf/SharedObject.zig
@@ -423,14 +423,14 @@ fn addSymbolAssumeCapacity(self: *SharedObject) Symbol.Index {
}
pub fn addSymbolExtra(self: *SharedObject, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
pub fn addSymbolExtraAssumeCapacity(self: *SharedObject, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -441,7 +441,7 @@ pub fn addSymbolExtraAssumeCapacity(self: *SharedObject, extra: Symbol.Extra) u3
}
pub fn symbolExtra(self: *SharedObject, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -455,7 +455,7 @@ pub fn symbolExtra(self: *SharedObject, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *SharedObject, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/Elf/Symbol.zig b/src/link/Elf/Symbol.zig
index 1b1c35b645..821f9e0bb9 100644
--- a/src/link/Elf/Symbol.zig
+++ b/src/link/Elf/Symbol.zig
@@ -255,7 +255,7 @@ const AddExtraOpts = struct {
pub fn addExtra(symbol: *Symbol, opts: AddExtraOpts, elf_file: *Elf) void {
var extras = symbol.extra(elf_file);
- inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extras, field.name) = x;
}
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index df811dcfb0..549657800c 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -2034,14 +2034,14 @@ pub fn atom(self: *ZigObject, atom_index: Atom.Index) ?*Atom {
}
fn addAtomExtra(self: *ZigObject, allocator: Allocator, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -2052,7 +2052,7 @@ fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
}
pub fn atomExtra(self: ZigObject, index: u32) Atom.Extra {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@@ -2067,7 +2067,7 @@ pub fn atomExtra(self: ZigObject, index: u32) Atom.Extra {
pub fn setAtomExtra(self: *ZigObject, index: u32, extra: Atom.Extra) void {
assert(index > 0);
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
@@ -2106,14 +2106,14 @@ fn addSymbolAssumeCapacity(self: *ZigObject) Symbol.Index {
}
pub fn addSymbolExtra(self: *ZigObject, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
pub fn addSymbolExtraAssumeCapacity(self: *ZigObject, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -2124,7 +2124,7 @@ pub fn addSymbolExtraAssumeCapacity(self: *ZigObject, extra: Symbol.Extra) u32 {
}
pub fn symbolExtra(self: *ZigObject, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -2138,7 +2138,7 @@ pub fn symbolExtra(self: *ZigObject, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *ZigObject, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index 8cb28b5f29..a1c8b84deb 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -90,7 +90,7 @@ pub fn writeHeader(
.ar_fmag = undefined,
};
@memset(mem.asBytes(&hdr), 0x20);
- inline for (@typeInfo(ar_hdr).Struct.fields) |field| {
+ inline for (@typeInfo(ar_hdr).@"struct".fields) |field| {
var stream = std.io.fixedBufferStream(&@field(hdr, field.name));
stream.writer().print("0", .{}) catch unreachable;
}
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index d2072278a9..a0193c20be 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -129,7 +129,7 @@ const AddExtraOpts = struct {
pub fn addExtra(atom: *Atom, opts: AddExtraOpts, macho_file: *MachO) void {
const file = atom.getFile(macho_file);
var extra = file.getAtomExtra(atom.extra);
- inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extra, field.name) = x;
}
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 7c52f69376..f5ed166ee0 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -650,14 +650,14 @@ pub fn getSymbolRef(self: Dylib, index: Symbol.Index, macho_file: *MachO) MachO.
}
pub fn addSymbolExtra(self: *Dylib, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
fn addSymbolExtraAssumeCapacity(self: *Dylib, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -668,7 +668,7 @@ fn addSymbolExtraAssumeCapacity(self: *Dylib, extra: Symbol.Extra) u32 {
}
pub fn getSymbolExtra(self: Dylib, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -682,7 +682,7 @@ pub fn getSymbolExtra(self: Dylib, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *Dylib, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/MachO/InternalObject.zig b/src/link/MachO/InternalObject.zig
index 7e0f375ec1..4054429ef8 100644
--- a/src/link/MachO/InternalObject.zig
+++ b/src/link/MachO/InternalObject.zig
@@ -669,14 +669,14 @@ pub fn getAtoms(self: InternalObject) []const Atom.Index {
}
fn addAtomExtra(self: *InternalObject, allocator: Allocator, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
fn addAtomExtraAssumeCapacity(self: *InternalObject, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -687,7 +687,7 @@ fn addAtomExtraAssumeCapacity(self: *InternalObject, extra: Atom.Extra) u32 {
}
pub fn getAtomExtra(self: InternalObject, index: u32) Atom.Extra {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@@ -702,7 +702,7 @@ pub fn getAtomExtra(self: InternalObject, index: u32) Atom.Extra {
pub fn setAtomExtra(self: *InternalObject, index: u32, extra: Atom.Extra) void {
assert(index > 0);
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
@@ -750,14 +750,14 @@ pub fn getSymbolRef(self: InternalObject, index: Symbol.Index, macho_file: *Mach
}
pub fn addSymbolExtra(self: *InternalObject, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
fn addSymbolExtraAssumeCapacity(self: *InternalObject, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -768,7 +768,7 @@ fn addSymbolExtraAssumeCapacity(self: *InternalObject, extra: Symbol.Extra) u32
}
pub fn getSymbolExtra(self: InternalObject, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -782,7 +782,7 @@ pub fn getSymbolExtra(self: InternalObject, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *InternalObject, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index ad83777906..4d2662a838 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -2386,14 +2386,14 @@ pub fn getAtoms(self: *Object) []const Atom.Index {
}
fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -2404,7 +2404,7 @@ fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
}
pub fn getAtomExtra(self: Object, index: u32) Atom.Extra {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@@ -2419,7 +2419,7 @@ pub fn getAtomExtra(self: Object, index: u32) Atom.Extra {
pub fn setAtomExtra(self: *Object, index: u32, extra: Atom.Extra) void {
assert(index > 0);
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
@@ -2447,14 +2447,14 @@ pub fn getSymbolRef(self: Object, index: Symbol.Index, macho_file: *MachO) MachO
}
pub fn addSymbolExtra(self: *Object, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
fn addSymbolExtraAssumeCapacity(self: *Object, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -2465,7 +2465,7 @@ fn addSymbolExtraAssumeCapacity(self: *Object, extra: Symbol.Extra) u32 {
}
pub fn getSymbolExtra(self: Object, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -2479,7 +2479,7 @@ pub fn getSymbolExtra(self: Object, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *Object, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/MachO/Symbol.zig b/src/link/MachO/Symbol.zig
index 7d8dc9c046..7493d3ceab 100644
--- a/src/link/MachO/Symbol.zig
+++ b/src/link/MachO/Symbol.zig
@@ -211,7 +211,7 @@ const AddExtraOpts = struct {
pub fn addExtra(symbol: *Symbol, opts: AddExtraOpts, macho_file: *MachO) void {
var extra = symbol.getExtra(macho_file);
- inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+ inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extra, field.name) = x;
}
diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig
index fb1630fcd5..6a771a3b84 100644
--- a/src/link/MachO/ZigObject.zig
+++ b/src/link/MachO/ZigObject.zig
@@ -1581,14 +1581,14 @@ pub fn getAtoms(self: *ZigObject) []const Atom.Index {
}
fn addAtomExtra(self: *ZigObject, allocator: Allocator, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields) |field| {
self.atoms_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -1599,7 +1599,7 @@ fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
}
pub fn getAtomExtra(self: ZigObject, index: u32) Atom.Extra {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
var i: usize = index;
var result: Atom.Extra = undefined;
inline for (fields) |field| {
@@ -1614,7 +1614,7 @@ pub fn getAtomExtra(self: ZigObject, index: u32) Atom.Extra {
pub fn setAtomExtra(self: *ZigObject, index: u32, extra: Atom.Extra) void {
assert(index > 0);
- const fields = @typeInfo(Atom.Extra).Struct.fields;
+ const fields = @typeInfo(Atom.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.atoms_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
@@ -1642,14 +1642,14 @@ pub fn getSymbolRef(self: ZigObject, index: Symbol.Index, macho_file: *MachO) Ma
}
pub fn addSymbolExtra(self: *ZigObject, allocator: Allocator, extra: Symbol.Extra) !u32 {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
fn addSymbolExtraAssumeCapacity(self: *ZigObject, extra: Symbol.Extra) u32 {
const index = @as(u32, @intCast(self.symbols_extra.items.len));
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields) |field| {
self.symbols_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
@@ -1660,7 +1660,7 @@ fn addSymbolExtraAssumeCapacity(self: *ZigObject, extra: Symbol.Extra) u32 {
}
pub fn getSymbolExtra(self: ZigObject, index: u32) Symbol.Extra {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
var i: usize = index;
var result: Symbol.Extra = undefined;
inline for (fields) |field| {
@@ -1674,7 +1674,7 @@ pub fn getSymbolExtra(self: ZigObject, index: u32) Symbol.Extra {
}
pub fn setSymbolExtra(self: *ZigObject, index: u32, extra: Symbol.Extra) void {
- const fields = @typeInfo(Symbol.Extra).Struct.fields;
+ const fields = @typeInfo(Symbol.Extra).@"struct".fields;
inline for (fields, 0..) |field, i| {
self.symbols_extra.items[index + i] = switch (field.type) {
u32 => @field(extra, field.name),
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 749719d5fc..23425a2e7c 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1169,7 +1169,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
fn validateFeatures(
wasm: *const Wasm,
- to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool,
+ to_emit: *[@typeInfo(types.Feature.Tag).@"enum".fields.len]bool,
emit_features_count: *u32,
) !void {
const comp = wasm.base.comp;
@@ -1177,7 +1177,7 @@ fn validateFeatures(
const shared_memory = comp.config.shared_memory;
const cpu_features = target.cpu.features;
const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects.
- const known_features_count = @typeInfo(types.Feature.Tag).Enum.fields.len;
+ const known_features_count = @typeInfo(types.Feature.Tag).@"enum".fields.len;
var allowed = [_]bool{false} ** known_features_count;
var used = [_]u17{0} ** known_features_count;
@@ -1192,7 +1192,7 @@ fn validateFeatures(
// When the user has given an explicit list of features to enable,
// we extract them and insert each into the 'allowed' list.
if (!infer) {
- inline for (@typeInfo(std.Target.wasm.Feature).Enum.fields) |feature_field| {
+ inline for (@typeInfo(std.Target.wasm.Feature).@"enum".fields) |feature_field| {
if (cpu_features.isEnabled(feature_field.value)) {
allowed[feature_field.value] = true;
emit_features_count.* += 1;
@@ -2576,7 +2576,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (wasm.base.hasErrors()) return error.FlushFailure;
var emit_features_count: u32 = 0;
- var enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool = undefined;
+ var enabled_features: [@typeInfo(types.Feature.Tag).@"enum".fields.len]bool = undefined;
try wasm.validateFeatures(&enabled_features, &emit_features_count);
try wasm.resolveSymbolsInArchives();
if (wasm.base.hasErrors()) return error.FlushFailure;
@@ -2610,7 +2610,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
/// Writes the WebAssembly in-memory module to the file
fn writeToFile(
wasm: *Wasm,
- enabled_features: [@typeInfo(types.Feature.Tag).Enum.fields.len]bool,
+ enabled_features: [@typeInfo(types.Feature.Tag).@"enum".fields.len]bool,
feature_count: u32,
arena: Allocator,
) !void {
@@ -3867,7 +3867,7 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
pub fn getUleb128Size(uint_value: anytype) u32 {
const T = @TypeOf(uint_value);
- const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
+ const U = if (@typeInfo(T).int.bits < 8) u8 else T;
var value = @as(U, @intCast(uint_value));
var size: u32 = 0;
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 06512ae97e..fa46a1fea4 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -871,7 +871,7 @@ fn ElementType(comptime ptr: type) type {
/// signedness of the given type `T`.
/// Asserts `T` is an integer.
fn readLeb(comptime T: type, reader: anytype) !T {
- return switch (@typeInfo(T).Int.signedness) {
+ return switch (@typeInfo(T).int.signedness) {
.signed => try leb.readIleb128(T, reader),
.unsigned => try leb.readUleb128(T, reader),
};
@@ -881,7 +881,7 @@ fn readLeb(comptime T: type, reader: anytype) !T {
/// Asserts `T` is an enum
fn readEnum(comptime T: type, reader: anytype) !T {
switch (@typeInfo(T)) {
- .Enum => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))),
+ .@"enum" => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))),
else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)),
}
}
diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig
index eff9bbf638..c0d531582f 100644
--- a/src/link/Wasm/ZigObject.zig
+++ b/src/link/Wasm/ZigObject.zig
@@ -805,7 +805,7 @@ pub fn getUavVAddr(
const is_wasm32 = target.cpu.arch == .wasm32;
const zcu = wasm_file.base.comp.zcu.?;
const ty = Type.fromInterned(zcu.intern_pool.typeOf(uav));
- if (ty.zigTypeTag(zcu) == .Fn) {
+ if (ty.zigTypeTag(zcu) == .@"fn") {
std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations
try parent_atom.relocs.append(gpa, .{
.index = target_symbol_index,
diff --git a/src/link/riscv.zig b/src/link/riscv.zig
index bf23010c80..7c0282ef3b 100644
--- a/src/link/riscv.zig
+++ b/src/link/riscv.zig
@@ -12,7 +12,7 @@ pub fn writeSetSub6(comptime op: enum { set, sub }, code: *[1]u8, addend: anytyp
pub fn writeAddend(
comptime Int: type,
comptime op: enum { add, sub },
- code: *[@typeInfo(Int).Int.bits / 8]u8,
+ code: *[@typeInfo(Int).int.bits / 8]u8,
value: anytype,
) void {
var V: Int = mem.readInt(Int, code, .little);
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 7afa229401..557ba8a91c 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -204,13 +204,13 @@ pub const Value = union(enum) {
fn encode(arena: Allocator, input: anytype) YamlError!?Value {
switch (@typeInfo(@TypeOf(input))) {
- .ComptimeInt,
- .Int,
+ .comptime_int,
+ .int,
=> return Value{ .int = math.cast(i64, input) orelse return error.Overflow },
- .Float => return Value{ .float = math.lossyCast(f64, input) },
+ .float => return Value{ .float = math.lossyCast(f64, input) },
- .Struct => |info| if (info.is_tuple) {
+ .@"struct" => |info| if (info.is_tuple) {
var list = std.ArrayList(Value).init(arena);
errdefer list.deinit();
try list.ensureTotalCapacityPrecise(info.fields.len);
@@ -237,7 +237,7 @@ pub const Value = union(enum) {
return Value{ .map = map };
},
- .Union => |info| if (info.tag_type) |tag_type| {
+ .@"union" => |info| if (info.tag_type) |tag_type| {
inline for (info.fields) |field| {
if (@field(tag_type, field.name) == input) {
return try encode(arena, @field(input, field.name));
@@ -245,11 +245,11 @@ pub const Value = union(enum) {
} else unreachable;
} else return error.UntaggedUnion,
- .Array => return encode(arena, &input),
+ .array => return encode(arena, &input),
- .Pointer => |info| switch (info.size) {
+ .pointer => |info| switch (info.size) {
.One => switch (@typeInfo(info.child)) {
- .Array => |child_info| {
+ .array => |child_info| {
const Slice = []const child_info.child;
return encode(arena, @as(Slice, input));
},
@@ -284,9 +284,9 @@ pub const Value = union(enum) {
// TODO we should probably have an option to encode `null` and also
// allow for some default value too.
- .Optional => return if (input) |val| encode(arena, val) else null,
+ .optional => return if (input) |val| encode(arena, val) else null,
- .Null => return null,
+ .null => return null,
else => {
@compileError("Unhandled type: {s}" ++ @typeName(@TypeOf(input)));
@@ -339,7 +339,7 @@ pub const Yaml = struct {
pub fn parse(self: *Yaml, comptime T: type) Error!T {
if (self.docs.items.len == 0) {
- if (@typeInfo(T) == .Void) return {};
+ if (@typeInfo(T) == .void) return {};
return error.TypeMismatch;
}
@@ -348,14 +348,14 @@ pub const Yaml = struct {
}
switch (@typeInfo(T)) {
- .Array => |info| {
+ .array => |info| {
var parsed: T = undefined;
for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
},
- .Pointer => |info| {
+ .pointer => |info| {
switch (info.size) {
.Slice => {
var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len);
@@ -367,35 +367,35 @@ pub const Yaml = struct {
else => return error.TypeMismatch,
}
},
- .Union => return error.Unimplemented,
+ .@"union" => return error.Unimplemented,
else => return error.TypeMismatch,
}
}
fn parseValue(self: *Yaml, comptime T: type, value: Value) Error!T {
return switch (@typeInfo(T)) {
- .Int => math.cast(T, try value.asInt()) orelse return error.Overflow,
- .Float => if (value.asFloat()) |float| {
+ .int => math.cast(T, try value.asInt()) orelse return error.Overflow,
+ .float => if (value.asFloat()) |float| {
return math.lossyCast(T, float);
} else |_| {
return math.lossyCast(T, try value.asInt());
},
- .Struct => self.parseStruct(T, try value.asMap()),
- .Union => self.parseUnion(T, value),
- .Array => self.parseArray(T, try value.asList()),
- .Pointer => if (value.asList()) |list| {
+ .@"struct" => self.parseStruct(T, try value.asMap()),
+ .@"union" => self.parseUnion(T, value),
+ .array => self.parseArray(T, try value.asList()),
+ .pointer => if (value.asList()) |list| {
return self.parsePointer(T, .{ .list = list });
} else |_| {
return self.parsePointer(T, .{ .string = try value.asString() });
},
- .Void => error.TypeMismatch,
- .Optional => unreachable,
+ .void => error.TypeMismatch,
+ .optional => unreachable,
else => error.Unimplemented,
};
}
fn parseUnion(self: *Yaml, comptime T: type, value: Value) Error!T {
- const union_info = @typeInfo(T).Union;
+ const union_info = @typeInfo(T).@"union";
if (union_info.tag_type) |_| {
inline for (union_info.fields) |field| {
@@ -412,12 +412,12 @@ pub const Yaml = struct {
fn parseOptional(self: *Yaml, comptime T: type, value: ?Value) Error!T {
const unwrapped = value orelse return null;
- const opt_info = @typeInfo(T).Optional;
+ const opt_info = @typeInfo(T).optional;
return @as(T, try self.parseValue(opt_info.child, unwrapped));
}
fn parseStruct(self: *Yaml, comptime T: type, map: Map) Error!T {
- const struct_info = @typeInfo(T).Struct;
+ const struct_info = @typeInfo(T).@"struct";
var parsed: T = undefined;
inline for (struct_info.fields) |field| {
@@ -426,7 +426,7 @@ pub const Yaml = struct {
break :blk map.get(field_name);
};
- if (@typeInfo(field.type) == .Optional) {
+ if (@typeInfo(field.type) == .optional) {
@field(parsed, field.name) = try self.parseOptional(field.type, value);
continue;
}
@@ -442,7 +442,7 @@ pub const Yaml = struct {
}
fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T {
- const ptr_info = @typeInfo(T).Pointer;
+ const ptr_info = @typeInfo(T).pointer;
const arena = self.arena.allocator();
switch (ptr_info.size) {
@@ -462,7 +462,7 @@ pub const Yaml = struct {
}
fn parseArray(self: *Yaml, comptime T: type, list: List) Error!T {
- const array_info = @typeInfo(T).Array;
+ const array_info = @typeInfo(T).array;
if (array_info.len != list.len) return error.ArraySizeMismatch;
var parsed: T = undefined;
diff --git a/src/main.zig b/src/main.zig
index 226ec79467..1db2be6f25 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -130,7 +130,7 @@ var log_scopes: std.ArrayListUnmanaged([]const u8) = .{};
pub fn log(
comptime level: std.log.Level,
- comptime scope: @TypeOf(.EnumLiteral),
+ comptime scope: @Type(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {
@@ -5315,7 +5315,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var any_errors = false;
while (it.next()) |hash| {
if (hash.len == 0) continue;
- const digest_len = @typeInfo(Package.Manifest.MultiHashHexDigest).Array.len;
+ const digest_len = @typeInfo(Package.Manifest.MultiHashHexDigest).array.len;
if (hash.len != digest_len) {
std.log.err("invalid digest (length {d} instead of {d}): '{s}'", .{
hash.len, digest_len, hash,
diff --git a/src/mutable_value.zig b/src/mutable_value.zig
index 9fcac259df..49826c2fb2 100644
--- a/src/mutable_value.zig
+++ b/src/mutable_value.zig
@@ -210,19 +210,19 @@ pub const MutableValue = union(enum) {
},
},
.undef => |ty_ip| switch (Type.fromInterned(ty_ip).zigTypeTag(zcu)) {
- .Struct, .Array, .Vector => |type_tag| {
+ .@"struct", .array, .vector => |type_tag| {
const ty = Type.fromInterned(ty_ip);
const opt_sent = ty.sentinel(zcu);
- if (type_tag == .Struct or opt_sent != null or !allow_repeated) {
+ if (type_tag == .@"struct" or opt_sent != null or !allow_repeated) {
const len_no_sent = ip.aggregateTypeLen(ty_ip);
const elems = try arena.alloc(MutableValue, @intCast(len_no_sent + @intFromBool(opt_sent != null)));
switch (type_tag) {
- .Array, .Vector => {
+ .array, .vector => {
const elem_ty = ip.childType(ty_ip);
const undef_elem = try pt.intern(.{ .undef = elem_ty });
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
},
- .Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
+ .@"struct" => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
const field_ty = ty.fieldType(i, zcu).toIntern();
mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
},
@@ -244,7 +244,7 @@ pub const MutableValue = union(enum) {
} };
}
},
- .Union => {
+ .@"union" => {
const payload = try arena.create(MutableValue);
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt);
payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
@@ -254,7 +254,7 @@ pub const MutableValue = union(enum) {
.payload = payload,
} };
},
- .Pointer => {
+ .pointer => {
const ptr_ty = ip.indexToKey(ty_ip).ptr_type;
if (ptr_ty.flags.size != .Slice) return;
const ptr = try arena.create(MutableValue);
@@ -375,7 +375,7 @@ pub const MutableValue = union(enum) {
if (field_val.eqlTrivial(r.child.*)) return;
// We must switch to either the `aggregate` or the `bytes` representation.
const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
- if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .Struct and
+ if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .@"struct" and
is_trivial_int and
Type.fromInterned(r.ty).childType(zcu).toIntern() == .u8_type and
r.child.isTrivialInt(zcu))
@@ -402,7 +402,7 @@ pub const MutableValue = union(enum) {
},
.aggregate => |a| {
a.elems[field_idx] = field_val;
- const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .Struct;
+ const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .@"struct";
// Attempt to switch to a more efficient representation.
const is_repeated = for (a.elems) |e| {
if (!e.eqlTrivial(field_val)) break false;
@@ -457,9 +457,9 @@ pub const MutableValue = union(enum) {
.interned => |ip_index| {
const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(ip_index));
switch (ty.zigTypeTag(pt.zcu)) {
- .Array, .Vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() },
- .Struct, .Union => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() },
- .Pointer => {
+ .array, .vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() },
+ .@"struct", .@"union" => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() },
+ .pointer => {
assert(ty.isSlice(pt.zcu));
return switch (field_idx) {
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
@@ -551,7 +551,7 @@ pub const MutableValue = union(enum) {
/// Used for deciding when to switch aggregate representations without fully
/// interning many values.
fn eqlTrivial(a: MutableValue, b: MutableValue) bool {
- const Tag = @typeInfo(MutableValue).Union.tag_type.?;
+ const Tag = @typeInfo(MutableValue).@"union".tag_type.?;
if (@as(Tag, a) != @as(Tag, b)) return false;
return switch (a) {
.interned => |a_ip| a_ip == b.interned,
diff --git a/src/print_env.zig b/src/print_env.zig
index b51423656b..c68c267af4 100644
--- a/src/print_env.zig
+++ b/src/print_env.zig
@@ -47,7 +47,7 @@ pub fn cmdEnv(arena: Allocator, args: []const []const u8, stdout: std.fs.File.Wr
try jws.objectField("env");
try jws.beginObject();
- inline for (@typeInfo(std.zig.EnvVar).Enum.fields) |field| {
+ inline for (@typeInfo(std.zig.EnvVar).@"enum".fields) |field| {
try jws.objectField(field.name);
try jws.write(try @field(std.zig.EnvVar, field.name).get(arena));
}
diff --git a/src/print_value.zig b/src/print_value.zig
index 89e7750369..9c06c6bcd8 100644
--- a/src/print_value.zig
+++ b/src/print_value.zig
@@ -207,7 +207,7 @@ fn printAggregate(
const ip = &zcu.intern_pool;
const ty = Type.fromInterned(aggregate.ty);
switch (ty.zigTypeTag(zcu)) {
- .Struct => if (!ty.isTuple(zcu)) {
+ .@"struct" => if (!ty.isTuple(zcu)) {
if (is_ref) try writer.writeByte('&');
if (ty.structFieldCount(zcu) == 0) {
return writer.writeAll(".{}");
@@ -223,7 +223,7 @@ fn printAggregate(
try writer.writeAll(" }");
return;
},
- .Array => {
+ .array => {
switch (aggregate.storage) {
.bytes => |bytes| string: {
const len = ty.arrayLenIncludingSentinel(zcu);
@@ -253,7 +253,7 @@ fn printAggregate(
else => {},
}
},
- .Vector => if (ty.arrayLen(zcu) == 0) {
+ .vector => if (ty.arrayLen(zcu) == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{}");
},
@@ -362,17 +362,17 @@ fn printPtrDerivation(
try printPtrDerivation(field.parent.*, writer, level, pt, have_sema, sema);
const agg_ty = (try field.parent.ptrType(pt)).childType(zcu);
switch (agg_ty.zigTypeTag(zcu)) {
- .Struct => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
+ .@"struct" => if (agg_ty.structFieldName(field.field_idx, zcu).unwrap()) |field_name| {
try writer.print(".{i}", .{field_name.fmt(ip)});
} else {
try writer.print("[{d}]", .{field.field_idx});
},
- .Union => {
+ .@"union" => {
const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
const field_name = tag_ty.enumFieldName(field.field_idx, zcu);
try writer.print(".{i}", .{field_name.fmt(ip)});
},
- .Pointer => switch (field.field_idx) {
+ .pointer => switch (field.field_idx) {
Value.slice_ptr_index => try writer.writeAll(".ptr"),
Value.slice_len_index => try writer.writeAll(".len"),
else => unreachable,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 80f1500d0e..8d70af5f3c 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -907,7 +907,7 @@ const Writer = struct {
fn writeFieldParentPtr(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.FieldParentPtr, extended.operand).data;
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
if (flags.align_cast) try stream.writeAll("align_cast, ");
if (flags.addrspace_cast) try stream.writeAll("addrspace_cast, ");
@@ -1065,7 +1065,7 @@ const Writer = struct {
}
fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
if (flags.ptr_cast) try stream.writeAll("ptr_cast, ");
@@ -1081,7 +1081,7 @@ const Writer = struct {
}
fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
- const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).Struct.backing_integer.?;
+ const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
const flags: Zir.Inst.FullPtrCastFlags = @bitCast(@as(FlagsInt, @truncate(extended.small)));
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
if (flags.const_cast) try stream.writeAll("const_cast, ");
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 835121e0cc..ef81a5dfda 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -161,7 +161,7 @@ pub fn translate(
context.pattern_list.deinit(gpa);
}
- inline for (@typeInfo(std.zig.c_builtins).Struct.decls) |decl| {
+ inline for (@typeInfo(std.zig.c_builtins).@"struct".decls) |decl| {
const builtin = try Tag.pub_var_simple.create(arena, .{
.name = decl.name,
.init = try Tag.import_c_builtin.create(arena, decl.name),
@@ -1324,7 +1324,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
fn vectorTypeInfo(arena: mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
const typeof_call = try Tag.typeof.create(arena, vec_node);
const typeinfo_call = try Tag.typeinfo.create(arena, typeof_call);
- const vector_type_info = try Tag.field_access.create(arena, .{ .lhs = typeinfo_call, .field_name = "Vector" });
+ const vector_type_info = try Tag.field_access.create(arena, .{ .lhs = typeinfo_call, .field_name = "vector" });
return Tag.field_access.create(arena, .{ .lhs = vector_type_info, .field_name = field });
}
@@ -2008,7 +2008,7 @@ fn transImplicitCastExpr(
}
fn isBuiltinDefined(name: []const u8) bool {
- inline for (@typeInfo(std.zig.c_builtins).Struct.decls) |decl| {
+ inline for (@typeInfo(std.zig.c_builtins).@"struct".decls) |decl| {
if (std.mem.eql(u8, name, decl.name)) return true;
}
return false;
@@ -4655,7 +4655,7 @@ fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node {
fn transCreateNodeNumber(c: *Context, num: anytype, num_kind: enum { int, float }) !Node {
const fmt_s = switch (@typeInfo(@TypeOf(num))) {
- .Int, .ComptimeInt => "{d}",
+ .int, .comptime_int => "{d}",
else => "{s}",
};
const str = try std.fmt.allocPrint(c.arena, fmt_s, .{num});