aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVeikka Tuominen <git@vexu.eu>2024-03-11 22:42:32 +0200
committerGitHub <noreply@github.com>2024-03-11 22:42:32 +0200
commit4f782d1e853accbe1c4bfab2617c3813d4b1e59f (patch)
tree0eb768171ecfb058fba72d199afc951af206f8fb /src
parentd0c06ca7127110a8afeb0ef524a197049892db21 (diff)
parent6067d39522f939c08dd3f3ea4fb5889ff0024e72 (diff)
downloadzig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.tar.gz
zig-4f782d1e853accbe1c4bfab2617c3813d4b1e59f.zip
Merge pull request #18994 from ExpidusOS/feat/container-layout-rename-fields
std.builtin: make enum fields lowercase
Diffstat (limited to 'src')
-rw-r--r--src/Compilation.zig12
-rw-r--r--src/Compilation/Config.zig24
-rw-r--r--src/InternPool.zig71
-rw-r--r--src/Module.zig18
-rw-r--r--src/Package/Module.zig2
-rw-r--r--src/Sema.zig234
-rw-r--r--src/Value.zig36
-rw-r--r--src/arch/aarch64/CodeGen.zig8
-rw-r--r--src/arch/aarch64/abi.zig4
-rw-r--r--src/arch/arm/CodeGen.zig8
-rw-r--r--src/arch/arm/abi.zig4
-rw-r--r--src/arch/riscv64/CodeGen.zig8
-rw-r--r--src/arch/riscv64/abi.zig4
-rw-r--r--src/arch/wasm/CodeGen.zig14
-rw-r--r--src/arch/wasm/abi.zig6
-rw-r--r--src/arch/x86_64/CodeGen.zig26
-rw-r--r--src/arch/x86_64/Emit.zig4
-rw-r--r--src/arch/x86_64/Lower.zig2
-rw-r--r--src/arch/x86_64/abi.zig6
-rw-r--r--src/codegen.zig8
-rw-r--r--src/codegen/c.zig56
-rw-r--r--src/codegen/c/type.zig2
-rw-r--r--src/codegen/llvm.zig56
-rw-r--r--src/codegen/llvm/Builder.zig2
-rw-r--r--src/codegen/llvm/bitcode_writer.zig4
-rw-r--r--src/codegen/spirv.zig16
-rw-r--r--src/codegen/spirv/Section.zig4
-rw-r--r--src/crash_report.zig4
-rw-r--r--src/glibc.zig2
-rw-r--r--src/libcxx.zig4
-rw-r--r--src/libtsan.zig2
-rw-r--r--src/libunwind.zig2
-rw-r--r--src/link.zig12
-rw-r--r--src/link/Coff.zig12
-rw-r--r--src/link/Coff/lld.zig10
-rw-r--r--src/link/Dwarf.zig2
-rw-r--r--src/link/Elf.zig52
-rw-r--r--src/link/Elf/ZigObject.zig8
-rw-r--r--src/link/MachO/ZigObject.zig10
-rw-r--r--src/link/MachO/load_commands.zig2
-rw-r--r--src/link/Wasm.zig6
-rw-r--r--src/link/Wasm/ZigObject.zig8
-rw-r--r--src/main.zig70
-rw-r--r--src/musl.zig2
-rw-r--r--src/print_air.zig8
-rw-r--r--src/print_zir.zig2
-rw-r--r--src/type.zig38
47 files changed, 447 insertions, 448 deletions
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 13fb672042..0fdfb6038f 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -1201,7 +1201,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const output_mode = options.config.output_mode;
const is_dyn_lib = switch (output_mode) {
.Obj, .Exe => false,
- .Lib => options.config.link_mode == .Dynamic,
+ .Lib => options.config.link_mode == .dynamic,
};
const is_exe_or_dyn_lib = switch (output_mode) {
.Obj => false,
@@ -1806,8 +1806,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.{ .musl_crt_file = .scrt1_o },
.{ .musl_crt_file = .rcrt1_o },
switch (comp.config.link_mode) {
- .Static => .{ .musl_crt_file = .libc_a },
- .Dynamic => .{ .musl_crt_file = .libc_so },
+ .static => .{ .musl_crt_file = .libc_a },
+ .dynamic => .{ .musl_crt_file = .libc_so },
},
});
}
@@ -6087,7 +6087,7 @@ pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const
fn wantBuildLibCFromSource(comp: Compilation) bool {
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
.Obj => false,
- .Lib => comp.config.link_mode == .Dynamic,
+ .Lib => comp.config.link_mode == .dynamic,
.Exe => true,
};
const ofmt = comp.root_mod.resolved_target.result.ofmt;
@@ -6116,7 +6116,7 @@ fn wantBuildMinGWFromSource(comp: Compilation) bool {
fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
.Obj => false,
- .Lib => comp.config.link_mode == .Dynamic,
+ .Lib => comp.config.link_mode == .dynamic,
.Exe => true,
};
const ofmt = comp.root_mod.resolved_target.result.ofmt;
@@ -6310,7 +6310,7 @@ fn buildOutputFromZig(
const config = try Config.resolve(.{
.output_mode = output_mode,
- .link_mode = .Static,
+ .link_mode = .static,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
.have_zcu = true,
diff --git a/src/Compilation/Config.zig b/src/Compilation/Config.zig
index 2f6422b28a..d692fe5623 100644
--- a/src/Compilation/Config.zig
+++ b/src/Compilation/Config.zig
@@ -348,26 +348,26 @@ pub fn resolve(options: Options) ResolveError!Config {
const link_mode = b: {
const explicitly_exe_or_dyn_lib = switch (options.output_mode) {
.Obj => false,
- .Lib => (options.link_mode orelse .Static) == .Dynamic,
+ .Lib => (options.link_mode orelse .static) == .dynamic,
.Exe => true,
};
if (target_util.cannotDynamicLink(target)) {
- if (options.link_mode == .Dynamic) return error.TargetCannotDynamicLink;
- break :b .Static;
+ if (options.link_mode == .dynamic) return error.TargetCannotDynamicLink;
+ break :b .static;
}
if (explicitly_exe_or_dyn_lib and link_libc and
(target.isGnuLibC() or target_util.osRequiresLibC(target)))
{
- if (options.link_mode == .Static) return error.LibCRequiresDynamicLinking;
- break :b .Dynamic;
+ if (options.link_mode == .static) return error.LibCRequiresDynamicLinking;
+ break :b .dynamic;
}
// When creating a executable that links to system libraries, we
// require dynamic linking, but we must not link static libraries
// or object files dynamically!
if (options.any_dyn_libs and options.output_mode == .Exe) {
- if (options.link_mode == .Static) return error.SharedLibrariesRequireDynamicLinking;
- break :b .Dynamic;
+ if (options.link_mode == .static) return error.SharedLibrariesRequireDynamicLinking;
+ break :b .dynamic;
}
if (options.link_mode) |link_mode| break :b link_mode;
@@ -377,16 +377,16 @@ pub fn resolve(options: Options) ResolveError!Config {
{
// If targeting the system's native ABI and the system's libc is
// musl, link dynamically by default.
- break :b .Dynamic;
+ break :b .dynamic;
}
// Static is generally a better default. Fight me.
- break :b .Static;
+ break :b .static;
};
const import_memory = options.import_memory orelse (options.output_mode == .Obj);
const export_memory = b: {
- if (link_mode == .Dynamic) {
+ if (link_mode == .dynamic) {
if (options.export_memory == true) return error.ExportMemoryAndDynamicIncompatible;
break :b false;
}
@@ -397,7 +397,7 @@ pub fn resolve(options: Options) ResolveError!Config {
const pie: bool = b: {
switch (options.output_mode) {
.Obj, .Exe => {},
- .Lib => if (link_mode == .Dynamic) {
+ .Lib => if (link_mode == .dynamic) {
if (options.pie == true) return error.DynamicLibraryPrecludesPie;
break :b false;
},
@@ -467,7 +467,7 @@ pub fn resolve(options: Options) ResolveError!Config {
if (rdynamic) break :b true;
break :b switch (options.output_mode) {
.Obj, .Exe => false,
- .Lib => link_mode == .Dynamic,
+ .Lib => link_mode == .dynamic,
};
};
diff --git a/src/InternPool.zig b/src/InternPool.zig
index 6a82abedce..64da7fc043 100644
--- a/src/InternPool.zig
+++ b/src/InternPool.zig
@@ -2025,15 +2025,15 @@ pub const LoadedStructType = struct {
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
- .Packed => false,
- .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
+ .@"packed" => false,
+ .auto, .@"extern" => s.flagsPtr(ip).known_non_opv,
};
}
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
- assert(self.layout != .Packed);
+ assert(self.layout != .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
@@ -2041,13 +2041,13 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
- assert(self.layout == .Packed);
+ assert(self.layout == .@"packed");
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
+ if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
flags_ptr.assumed_runtime_bits = true;
@@ -2057,7 +2057,7 @@ pub const LoadedStructType = struct {
}
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
+ if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
flags_ptr.field_types_wip = true;
@@ -2065,12 +2065,12 @@ pub const LoadedStructType = struct {
}
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
+ if (s.layout == .@"packed") return;
s.flagsPtr(ip).field_types_wip = false;
}
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
+ if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
flags_ptr.layout_wip = true;
@@ -2078,12 +2078,12 @@ pub const LoadedStructType = struct {
}
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
+ if (s.layout == .@"packed") return;
s.flagsPtr(ip).layout_wip = false;
}
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
+ if (s.layout == .@"packed") return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
flags_ptr.alignment_wip = true;
@@ -2091,19 +2091,19 @@ pub const LoadedStructType = struct {
}
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
+ if (s.layout == .@"packed") return;
s.flagsPtr(ip).alignment_wip = false;
}
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
switch (s.layout) {
- .Packed => {
+ .@"packed" => {
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
return false;
},
- .Auto, .Extern => {
+ .auto, .@"extern" => {
const flag = &s.flagsPtr(ip).field_inits_wip;
if (flag.*) return true;
flag.* = true;
@@ -2114,13 +2114,13 @@ pub const LoadedStructType = struct {
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
- .Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
+ .@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false,
+ .auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false,
}
}
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return true;
+ if (s.layout == .@"packed") return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
flags_ptr.fully_resolved = true;
@@ -2134,7 +2134,7 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
- assert(self.layout != .Packed);
+ assert(self.layout != .@"packed");
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
}
@@ -2144,14 +2144,14 @@ pub const LoadedStructType = struct {
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
- assert(s.layout == .Packed);
+ assert(s.layout == .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
- assert(s.layout != .Packed);
+ assert(s.layout != .@"packed");
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
}
@@ -2163,31 +2163,31 @@ pub const LoadedStructType = struct {
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
return switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved,
+ .@"packed" => s.packedFlagsPtr(ip).inits_resolved,
+ .auto, .@"extern" => s.flagsPtr(ip).inits_resolved,
};
}
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved = true,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
+ .@"packed" => s.packedFlagsPtr(ip).inits_resolved = true,
+ .auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true,
}
}
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
- .Packed => s.backingIntType(ip).* != .none,
- .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
+ .@"packed" => s.backingIntType(ip).* != .none,
+ .auto, .@"extern" => s.flagsPtr(ip).layout_resolved,
};
}
pub fn isTuple(s: @This(), ip: *InternPool) bool {
- return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
+ return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple;
}
pub fn hasReorderedFields(s: @This()) bool {
- return s.layout == .Auto;
+ return s.layout == .auto;
}
pub const RuntimeOrderIterator = struct {
@@ -2221,7 +2221,7 @@ pub const LoadedStructType = struct {
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
- assert(s.layout != .Packed);
+ assert(s.layout != .@"packed");
return .{
.ip = ip,
.field_index = 0,
@@ -2239,7 +2239,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
.decl = .none,
.namespace = .none,
.zir_index = .none,
- .layout = .Auto,
+ .layout = .auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
@@ -2314,7 +2314,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
.decl = extra.data.decl.toOptional(),
.namespace = namespace,
.zir_index = extra.data.zir_index.toOptional(),
- .layout = if (extra.data.flags.is_extern) .Extern else .Auto,
+ .layout = if (extra.data.flags.is_extern) .@"extern" else .auto,
.field_names = names,
.field_types = field_types,
.field_inits = inits,
@@ -2367,7 +2367,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
.decl = extra.data.decl.toOptional(),
.namespace = extra.data.namespace,
.zir_index = extra.data.zir_index.toOptional(),
- .layout = .Packed,
+ .layout = .@"packed",
.field_names = field_names,
.field_types = field_types,
.field_inits = field_inits,
@@ -4455,7 +4455,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
} else .{ .start = 0, .len = 0 } },
} };
} },
-
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
.type_union => .{ .union_type = ns: {
@@ -6009,9 +6008,9 @@ pub fn getStructType(
};
const is_extern = switch (ini.layout) {
- .Auto => false,
- .Extern => true,
- .Packed => {
+ .auto => false,
+ .@"extern" => true,
+ .@"packed" => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len +
// TODO: fmt bug
// zig fmt: off
@@ -6140,7 +6139,7 @@ pub fn getStructType(
if (ini.any_comptime_fields) {
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
}
- if (ini.layout == .Auto) {
+ if (ini.layout == .auto) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len);
}
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
diff --git a/src/Module.zig b/src/Module.zig
index 4ad760063b..7d20c0f023 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -279,7 +279,7 @@ pub const Export = struct {
pub const Options = struct {
name: InternPool.NullTerminatedString,
- linkage: std.builtin.GlobalLinkage = .Strong,
+ linkage: std.builtin.GlobalLinkage = .strong,
section: InternPool.OptionalNullTerminatedString = .none,
visibility: std.builtin.SymbolVisibility = .default,
};
@@ -3310,7 +3310,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
assert(!small.has_captures_len);
assert(!small.has_backing_int);
- assert(small.layout == .Auto);
+ assert(small.layout == .auto);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
@@ -3327,7 +3327,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
const tracked_inst = try ip.trackZir(gpa, file, .main_struct_inst);
const wip_ty = switch (try ip.getStructType(gpa, .{
- .layout = .Auto,
+ .layout = .auto,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
@@ -5969,7 +5969,7 @@ pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
const s = mod.typeToStruct(ty) orelse return null;
- if (s.layout != .Packed) return null;
+ if (s.layout != .@"packed") return null;
return s;
}
@@ -6185,18 +6185,18 @@ pub fn structFieldAlignment(
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
) Alignment {
- assert(layout != .Packed);
+ assert(layout != .@"packed");
if (explicit_alignment != .none) return explicit_alignment;
switch (layout) {
- .Packed => unreachable,
- .Auto => {
+ .@"packed" => unreachable,
+ .auto => {
if (mod.getTarget().ofmt == .c) {
return structFieldAlignmentExtern(mod, field_ty);
} else {
return field_ty.abiAlignment(mod);
}
},
- .Extern => return structFieldAlignmentExtern(mod, field_ty),
+ .@"extern" => return structFieldAlignmentExtern(mod, field_ty),
}
}
@@ -6224,7 +6224,7 @@ pub fn structPackedFieldBitOffset(
field_index: u32,
) u16 {
const ip = &mod.intern_pool;
- assert(struct_type.layout == .Packed);
+ assert(struct_type.layout == .@"packed");
assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
diff --git a/src/Package/Module.zig b/src/Package/Module.zig
index c6eb1e8c90..d6b89efb41 100644
--- a/src/Package/Module.zig
+++ b/src/Package/Module.zig
@@ -178,7 +178,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
return error.PieRequiresPic;
break :b true;
}
- if (options.global.link_mode == .Dynamic) {
+ if (options.global.link_mode == .dynamic) {
if (options.inherited.pic == false)
return error.DynamicLinkingRequiresPic;
break :b true;
diff --git a/src/Sema.zig b/src/Sema.zig
index 1ac6d28bf0..35adfdb25b 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -357,7 +357,7 @@ pub const Block = struct {
want_safety: ?bool = null,
/// What mode to generate float operations in, set by @setFloatMode
- float_mode: std.builtin.FloatMode = .Strict,
+ float_mode: std.builtin.FloatMode = .strict,
c_import_buf: ?*std.ArrayList(u8) = null,
@@ -686,7 +686,7 @@ pub const Block = struct {
const sema = block.sema;
const mod = sema.mod;
return block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
+ .tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
.ty = Air.internedToRef((try mod.vectorType(.{
.len = sema.typeOf(lhs).vectorLen(mod),
@@ -1020,10 +1020,10 @@ fn analyzeBodyInner(
.field_call => try sema.zirCall(block, inst, .field),
.cmp_lt => try sema.zirCmp(block, inst, .lt),
.cmp_lte => try sema.zirCmp(block, inst, .lte),
- .cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
+ .cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .optimized)),
.cmp_gte => try sema.zirCmp(block, inst, .gte),
.cmp_gt => try sema.zirCmp(block, inst, .gt),
- .cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .Optimized)),
+ .cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .optimized)),
.decl_ref => try sema.zirDeclRef(block, inst),
.decl_val => try sema.zirDeclVal(block, inst),
.load => try sema.zirLoad(block, inst),
@@ -3236,7 +3236,7 @@ fn zirUnionDecl(
.status = .none,
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
.tagged
- else if (small.layout != .Auto)
+ else if (small.layout != .auto)
.none
else switch (block.wantSafety()) {
true => .safety,
@@ -6274,7 +6274,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.needed_comptime_reason = "export target must be comptime-known",
});
const options = try sema.resolveExportOptions(block, options_src, extra.options);
- if (options.linkage == .Internal)
+ if (options.linkage == .internal)
return;
if (operand.val.getFunction(mod)) |function| {
const decl_index = function.owner_decl;
@@ -6301,7 +6301,7 @@ pub fn analyzeExport(
const gpa = sema.gpa;
const mod = sema.mod;
- if (options.linkage == .Internal)
+ if (options.linkage == .internal)
return;
try mod.ensureDeclAnalyzed(exported_decl_index);
@@ -6450,8 +6450,8 @@ fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) Co
.needed_comptime_reason = "atomic order of @fence must be comptime-known",
});
- if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) {
- return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
+ if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.acquire)) {
+ return sema.fail(block, order_src, "atomic ordering must be acquire or stricter", .{});
}
_ = try block.addInst(.{
@@ -10264,7 +10264,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
const all_in_range = try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
@@ -10281,7 +10281,7 @@ fn intCast(
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
const all_in_range = try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
@@ -10303,7 +10303,7 @@ fn intCast(
const zero_inst = Air.internedToRef(zero_val.toIntern());
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
@@ -10380,7 +10380,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
+ .Struct, .Union => if (dest_ty.containerLayout(mod) == .auto) {
const container = switch (dest_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
@@ -10443,7 +10443,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(block, msg);
},
- .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
+ .Struct, .Union => if (operand_ty.containerLayout(mod) == .auto) {
const container = switch (operand_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
@@ -12530,7 +12530,7 @@ fn analyzeSwitchRuntimeBlock(
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
} else {
for (items) |item| {
- const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
+ const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
if (any_ok != .none) {
any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok);
} else {
@@ -12549,12 +12549,12 @@ fn analyzeSwitchRuntimeBlock(
// operand >= first and operand <= last
const range_first_ok = try case_block.addBinOp(
- if (case_block.float_mode == .Optimized) .cmp_gte_optimized else .cmp_gte,
+ if (case_block.float_mode == .optimized) .cmp_gte_optimized else .cmp_gte,
operand,
item_first,
);
const range_last_ok = try case_block.addBinOp(
- if (case_block.float_mode == .Optimized) .cmp_lte_optimized else .cmp_lte,
+ if (case_block.float_mode == .optimized) .cmp_lte_optimized else .cmp_lte,
operand,
item_last,
);
@@ -13904,7 +13904,7 @@ fn zirShl(
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = ov_bit,
.operation = .Or,
@@ -14044,7 +14044,7 @@ fn zirShr(
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = eql,
.operation = .And,
@@ -14811,7 +14811,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
- return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
+ return block.addUnOp(if (block.float_mode == .optimized) .neg_optimized else .neg, rhs);
}
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
@@ -15018,8 +15018,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
break :blk Air.Inst.Tag.div_trunc;
} else switch (block.float_mode) {
- .Optimized => Air.Inst.Tag.div_float_optimized,
- .Strict => Air.Inst.Tag.div_float,
+ .optimized => Air.Inst.Tag.div_float_optimized,
+ .strict => Air.Inst.Tag.div_float,
};
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
}
@@ -15142,8 +15142,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
- .Strict => .reduce,
- .Optimized => .reduce_optimized,
+ .strict => .reduce,
+ .optimized => .reduce_optimized,
},
.data = .{ .reduce = .{
.operand = eql,
@@ -15152,8 +15152,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
});
} else {
const is_in_range = try block.addBinOp(switch (block.float_mode) {
- .Strict => .cmp_eq,
- .Optimized => .cmp_eq_optimized,
+ .strict => .cmp_eq,
+ .optimized => .cmp_eq_optimized,
}, result, floored);
break :ok is_in_range;
}
@@ -15503,7 +15503,7 @@ fn addDivByZeroSafety(
is_int: bool,
) CompileError!void {
// Strict IEEE floats have well-defined division by zero.
- if (!is_int and block.float_mode == .Strict) return;
+ if (!is_int and block.float_mode == .strict) return;
// If rhs was comptime-known to be zero a compile error would have been
// emitted above.
@@ -15535,8 +15535,8 @@ fn addDivByZeroSafety(
fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag {
if (is_int) return normal;
return switch (block.float_mode) {
- .Strict => normal,
- .Optimized => optimized,
+ .strict => normal,
+ .optimized => optimized,
};
}
@@ -16228,7 +16228,7 @@ fn analyzeArithmetic(
return casted_lhs;
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
+ const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .add_optimized else .add;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (is_int) {
@@ -16330,7 +16330,7 @@ fn analyzeArithmetic(
return casted_lhs;
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
+ const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .sub_optimized else .sub;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
if (is_int) {
@@ -16448,7 +16448,7 @@ fn analyzeArithmetic(
}
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
+ const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .mul_optimized else .mul;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
if (is_int) {
@@ -16625,7 +16625,7 @@ fn analyzeArithmetic(
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
try block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = ov_bit,
.operation = .Or,
@@ -17168,7 +17168,7 @@ fn cmpSelf(
if (resolved_type.zigTypeTag(mod) == .Vector) {
return block.addCmpVector(casted_lhs, casted_rhs, op);
}
- const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
+ const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized);
return block.addBinOp(tag, casted_lhs, casted_rhs);
}
@@ -18131,8 +18131,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
const alignment = switch (layout) {
- .Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)),
- .Packed => .none,
+ .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(i)),
+ .@"packed" => .none,
};
const field_ty = union_obj.field_types.get(ip)[i];
@@ -18350,7 +18350,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const opt_default_val = if (field_init == .none) null else Value.fromInterned(field_init);
const default_val_ptr = try sema.optRefValue(opt_default_val);
const alignment = switch (struct_type.layout) {
- .Packed => .none,
+ .@"packed" => .none,
else => try sema.structFieldAlignment(
struct_type.fieldAlign(ip, i),
field_ty,
@@ -19906,7 +19906,7 @@ fn zirStructInit(
var field_i: u32 = 0;
var extra_index = extra.end;
- const is_packed = resolved_ty.containerLayout(mod) == .Packed;
+ const is_packed = resolved_ty.containerLayout(mod) == .@"packed";
while (field_i < extra.data.fields_len) : (field_i += 1) {
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
extra_index = item.end;
@@ -21302,7 +21302,7 @@ fn zirReify(
return sema.fail(block, src, "reified structs must have no decls", .{});
}
- if (layout != .Packed and !backing_integer_val.isNull(mod)) {
+ if (layout != .@"packed" and !backing_integer_val.isNull(mod)) {
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
}
@@ -21665,7 +21665,7 @@ fn reifyUnion(
.status = .none,
.runtime_tag = if (opt_tag_type_val.optionalValue(mod) != null)
.tagged
- else if (layout != .Auto)
+ else if (layout != .auto)
.none
else switch (block.wantSafety()) {
true => .safety,
@@ -21804,7 +21804,7 @@ fn reifyUnion(
break :msg msg;
});
}
- if (layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
+ if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
@@ -21815,7 +21815,7 @@ fn reifyUnion(
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
- } else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
+ } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(gpa);
@@ -21938,9 +21938,9 @@ fn reifyStruct(
errdefer wip_ty.cancel(ip);
if (is_tuple) switch (layout) {
- .Extern => return sema.fail(block, src, "extern tuples are not supported", .{}),
- .Packed => return sema.fail(block, src, "packed tuples are not supported", .{}),
- .Auto => {},
+ .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}),
+ .@"packed" => return sema.fail(block, src, "packed tuples are not supported", .{}),
+ .auto => {},
};
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
@@ -21990,11 +21990,11 @@ fn reifyStruct(
const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema);
if (byte_align == 0) {
- if (layout != .Packed) {
+ if (layout != .@"packed") {
struct_type.field_aligns.get(ip)[field_idx] = .none;
}
} else {
- if (layout == .Packed) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
+ if (layout == .@"packed") return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (!math.isPowerOfTwo(byte_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
struct_type.field_aligns.get(ip)[field_idx] = Alignment.fromNonzeroByteUnits(byte_align);
}
@@ -22004,9 +22004,9 @@ fn reifyStruct(
if (field_is_comptime) {
assert(any_comptime_fields);
switch (layout) {
- .Extern => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}),
- .Packed => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}),
- .Auto => struct_type.setFieldComptime(ip, field_idx),
+ .@"extern" => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}),
+ .@"packed" => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}),
+ .auto => struct_type.setFieldComptime(ip, field_idx),
}
}
@@ -22047,7 +22047,7 @@ fn reifyStruct(
break :msg msg;
});
}
- if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
+ if (layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
@@ -22058,7 +22058,7 @@ fn reifyStruct(
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
- } else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
+ } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
@@ -22072,7 +22072,7 @@ fn reifyStruct(
}
}
- if (layout == .Packed) {
+ if (layout == .@"packed") {
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |field_idx| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
@@ -22226,7 +22226,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (dest_scalar_ty.intInfo(mod).bits == 0) {
if (!is_vector) {
if (block.wantSafety()) {
- const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
+ const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern());
@@ -22236,7 +22236,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
for (0..len) |i| {
const idx_ref = try mod.intRef(Type.usize, i);
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
- const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
+ const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
}
@@ -22246,12 +22246,12 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
} }));
}
if (!is_vector) {
- const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
+ const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
if (block.wantSafety()) {
const back = try block.addTyOp(.float_from_int, operand_ty, result);
const diff = try block.addBinOp(.sub, operand, back);
- const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
- const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
+ const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
+ const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
@@ -22262,12 +22262,12 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
for (new_elems, 0..) |*new_elem, i| {
const idx_ref = try mod.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
- const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
+ const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
if (block.wantSafety()) {
const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result);
const diff = try block.addBinOp(.sub, old_elem, back);
- const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
- const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
+ const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
+ const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
@@ -23311,7 +23311,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
}
switch (ty.containerLayout(mod)) {
- .Packed => {
+ .@"packed" => {
var bit_sum: u64 = 0;
const struct_type = ip.loadStructType(ty.toIntern());
for (0..struct_type.field_types.len) |i| {
@@ -23802,7 +23802,7 @@ fn resolveExportOptions(
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
}
- if (visibility != .default and linkage == .Internal) {
+ if (visibility != .default and linkage == .internal) {
return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{
name, @tagName(visibility),
});
@@ -23894,17 +23894,17 @@ fn zirCmpxchg(
.needed_comptime_reason = "atomic order of cmpxchg failure must be comptime-known",
});
- if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
- return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
+ if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
+ return sema.fail(block, success_order_src, "success atomic ordering must be monotonic or stricter", .{});
}
- if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
- return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
+ if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
+ return sema.fail(block, failure_order_src, "failure atomic ordering must be monotonic or stricter", .{});
}
if (@intFromEnum(failure_order) > @intFromEnum(success_order)) {
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
}
- if (failure_order == .Release or failure_order == .AcqRel) {
- return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
+ if (failure_order == .release or failure_order == .acq_rel) {
+ return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{});
}
const result_ty = try mod.optionalType(elem_ty.toIntern());
@@ -24042,7 +24042,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
return block.addInst(.{
- .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
+ .tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
.operand = operand,
.operation = operation,
@@ -24346,11 +24346,11 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
});
switch (order) {
- .Release, .AcqRel => {
+ .release, .acq_rel => {
return sema.fail(
block,
order_src,
- "@atomicLoad atomic ordering must not be Release or AcqRel",
+ "@atomicLoad atomic ordering must not be release or acq_rel",
.{},
);
},
@@ -24412,8 +24412,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.needed_comptime_reason = "atomic order of @atomicRmW must be comptime-known",
});
- if (order == .Unordered) {
- return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
+ if (order == .unordered) {
+ return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be unordered", .{});
}
// special case zero bit types
@@ -24482,18 +24482,18 @@ fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
});
const air_tag: Air.Inst.Tag = switch (order) {
- .Acquire, .AcqRel => {
+ .acquire, .acq_rel => {
return sema.fail(
block,
order_src,
- "@atomicStore atomic ordering must not be Acquire or AcqRel",
+ "@atomicStore atomic ordering must not be acquire or acq_rel",
.{},
);
},
- .Unordered => .atomic_store_unordered,
- .Monotonic => .atomic_store_monotonic,
- .Release => .atomic_store_release,
- .SeqCst => .atomic_store_seq_cst,
+ .unordered => .atomic_store_unordered,
+ .monotonic => .atomic_store_monotonic,
+ .release => .atomic_store_release,
+ .seq_cst => .atomic_store_seq_cst,
};
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
@@ -24710,7 +24710,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
},
};
- if (parent_ty.containerLayout(mod) == .Packed) {
+ if (parent_ty.containerLayout(mod) == .@"packed") {
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
} else {
ptr_ty_data.flags.alignment = blk: {
@@ -25888,7 +25888,7 @@ fn resolveExternOptions(
) CompileError!struct {
name: InternPool.NullTerminatedString,
library_name: InternPool.OptionalNullTerminatedString = .none,
- linkage: std.builtin.GlobalLinkage = .Strong,
+ linkage: std.builtin.GlobalLinkage = .strong,
is_thread_local: bool = false,
} {
const mod = sema.mod;
@@ -25938,7 +25938,7 @@ fn resolveExternOptions(
return sema.fail(block, name_src, "extern symbol name cannot be empty", .{});
}
- if (linkage != .Weak and linkage != .Strong) {
+ if (linkage != .weak and linkage != .strong) {
return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{});
}
@@ -25984,7 +25984,7 @@ fn zirBuiltinExtern(
else => |e| return e,
};
- if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
+ if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) {
ty = try mod.optionalType(ty.toIntern());
}
const ptr_info = ty.ptrInfo(mod);
@@ -26010,7 +26010,7 @@ fn zirBuiltinExtern(
.is_extern = true,
.is_const = ptr_info.flags.is_const,
.is_threadlocal = options.is_thread_local,
- .is_weak_linkage = options.linkage == .Weak,
+ .is_weak_linkage = options.linkage == .weak,
} }),
),
}, options.name);
@@ -26328,15 +26328,15 @@ fn validateExternType(
return sema.validateExternType(ty.intTagType(mod), position);
},
.Struct, .Union => switch (ty.containerLayout(mod)) {
- .Extern => return true,
- .Packed => {
+ .@"extern" => return true,
+ .@"packed" => {
const bit_size = try ty.bitSizeAdvanced(mod, sema);
switch (bit_size) {
0, 8, 16, 32, 64, 128 => return true,
else => return false,
}
},
- .Auto => return !(try sema.typeHasRuntimeBits(ty)),
+ .auto => return !(try sema.typeHasRuntimeBits(ty)),
},
.Array => {
if (position == .ret_ty or position == .param_ty) return false;
@@ -26456,7 +26456,7 @@ fn validatePackedType(sema: *Sema, ty: Type) !bool {
.Enum,
=> return true,
.Pointer => return !ty.isSlice(mod) and !try sema.typeRequiresComptime(ty),
- .Struct, .Union => return ty.containerLayout(mod) == .Packed,
+ .Struct, .Union => return ty.containerLayout(mod) == .@"packed",
}
}
@@ -27596,7 +27596,7 @@ fn structFieldPtrByIndex(
else
try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
@@ -27641,7 +27641,7 @@ fn structFieldPtrByIndex(
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
}
}
- } else if (struct_type.layout == .Extern) {
+ } else if (struct_type.layout == .@"extern") {
// For extern structs, field alignment might be bigger than type's
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
// second field is aligned as u32.
@@ -27846,7 +27846,7 @@ fn unionFieldPtr(
.is_const = union_ptr_info.flags.is_const,
.is_volatile = union_ptr_info.flags.is_volatile,
.address_space = union_ptr_info.flags.address_space,
- .alignment = if (union_obj.getLayout(ip) == .Auto) blk: {
+ .alignment = if (union_obj.getLayout(ip) == .auto) blk: {
const union_align = if (union_ptr_info.flags.alignment != .none)
union_ptr_info.flags.alignment
else
@@ -27875,7 +27875,7 @@ fn unionFieldPtr(
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
switch (union_obj.getLayout(ip)) {
- .Auto => if (!initializing) {
+ .auto => if (!initializing) {
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
break :ct;
if (union_val.isUndef(mod)) {
@@ -27899,7 +27899,7 @@ fn unionFieldPtr(
return sema.failWithOwnedErrorMsg(block, msg);
}
},
- .Packed, .Extern => {},
+ .@"packed", .@"extern" => {},
}
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
@@ -27911,7 +27911,7 @@ fn unionFieldPtr(
}
try sema.requireRuntimeBlock(block, src, null);
- if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
+ if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
@@ -27954,7 +27954,7 @@ fn unionFieldVal(
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.getLayout(ip)) {
- .Auto => {
+ .auto => {
if (tag_matches) {
return Air.internedToRef(un.val);
} else {
@@ -27971,7 +27971,7 @@ fn unionFieldVal(
return sema.failWithOwnedErrorMsg(block, msg);
}
},
- .Packed, .Extern => |layout| {
+ .@"packed", .@"extern" => |layout| {
if (tag_matches) {
return Air.internedToRef(un.val);
} else {
@@ -27989,7 +27989,7 @@ fn unionFieldVal(
}
try sema.requireRuntimeBlock(block, src, null);
- if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
+ if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
@@ -30961,7 +30961,7 @@ fn beginComptimePtrMutation(
const tag_type = base_child_ty.unionTagTypeHypothetical(mod);
const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index);
- if (layout == .Auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
+ if (layout == .auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
// We need to set the active field of the union.
payload.tag = hypothetical_tag;
@@ -30988,7 +30988,7 @@ fn beginComptimePtrMutation(
.pointee = .{ .reinterpret = .{
.val_ptr = val_ptr,
.byte_offset = 0,
- .write_packed = layout == .Packed,
+ .write_packed = layout == .@"packed",
} },
.ty = parent.ty,
};
@@ -31395,7 +31395,7 @@ fn beginComptimePtrLoad(
if (container_ty.hasWellDefinedLayout(mod)) {
const struct_obj = mod.typeToStruct(container_ty);
- if (struct_obj != null and struct_obj.?.layout == .Packed) {
+ if (struct_obj != null and struct_obj.?.layout == .@"packed") {
// packed structs are not byte addressable
deref.parent = null;
} else if (deref.parent) |*parent| {
@@ -31551,7 +31551,7 @@ fn bitCastUnionFieldVal(
// Reading a larger value means we need to reinterpret from undefined bytes.
const offset = switch (layout) {
- .Extern => offset: {
+ .@"extern" => offset: {
if (field_size > old_size) @memset(buffer[old_size..], 0xaa);
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -31561,7 +31561,7 @@ fn bitCastUnionFieldVal(
};
break :offset 0;
},
- .Packed => offset: {
+ .@"packed" => offset: {
if (field_size > old_size) {
const min_size = @max(old_size, 1);
switch (endian) {
@@ -31577,7 +31577,7 @@ fn bitCastUnionFieldVal(
break :offset if (endian == .big) buffer.len - field_size else 0;
},
- .Auto => unreachable,
+ .auto => unreachable,
};
return Value.readFromMemory(field_ty, mod, buffer[offset..], sema.arena) catch |err| switch (err) {
@@ -33506,7 +33506,7 @@ fn cmpNumeric(
};
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
- return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
+ return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs);
}
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
// For mixed signed and unsigned integers, implicit cast both operands to a signed
@@ -33651,7 +33651,7 @@ fn cmpNumeric(
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
- return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
+ return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs);
}
/// Asserts that LHS value is an int or comptime int and not undefined, and
@@ -35608,7 +35608,7 @@ pub fn resolveStructAlignment(
const target = mod.getTarget();
assert(struct_type.flagsPtr(ip).alignment == .none);
- assert(struct_type.layout != .Packed);
+ assert(struct_type.layout != .@"packed");
if (struct_type.flagsPtr(ip).field_types_wip) {
// We'll guess "pointer-aligned", if the struct has an
@@ -35661,7 +35661,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
try sema.resolveTypeFields(ty);
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
try semaBackingIntType(mod, struct_type);
return;
}
@@ -36625,11 +36625,11 @@ fn semaStructFields(
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
if (fields_len == 0) switch (struct_type.layout) {
- .Packed => {
+ .@"packed" => {
try semaBackingIntType(mod, struct_type);
return;
},
- .Auto, .Extern => {
+ .auto, .@"extern" => {
struct_type.size(ip).* = 0;
struct_type.flagsPtr(ip).layout_resolved = true;
return;
@@ -36810,7 +36810,7 @@ fn semaStructFields(
return sema.failWithOwnedErrorMsg(&block_scope, msg);
}
switch (struct_type.layout) {
- .Extern => if (!try sema.validateExternType(field_ty, .struct_field)) {
+ .@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
@@ -36826,7 +36826,7 @@ fn semaStructFields(
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
},
- .Packed => if (!try sema.validatePackedType(field_ty)) {
+ .@"packed" => if (!try sema.validatePackedType(field_ty)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
@@ -37350,7 +37350,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
return sema.failWithOwnedErrorMsg(&block_scope, msg);
}
const layout = union_type.getLayout(ip);
- if (layout == .Extern and
+ if (layout == .@"extern" and
!try sema.validateExternType(field_ty, .union_field))
{
const msg = msg: {
@@ -37367,7 +37367,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
- } else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
+ } else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
.index = field_i,
@@ -38286,9 +38286,9 @@ fn structFieldAlignment(
return explicit_alignment;
const mod = sema.mod;
switch (layout) {
- .Packed => return .none,
- .Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
- .Extern => {},
+ .@"packed" => return .none,
+ .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
+ .@"extern" => {},
}
// extern
const ty_abi_align = try sema.typeAbiAlignment(field_ty);
diff --git a/src/Value.zig b/src/Value.zig
index 3468ae4f10..a9f80635c7 100644
--- a/src/Value.zig
+++ b/src/Value.zig
@@ -676,8 +676,8 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
.Struct => {
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
switch (struct_type.layout) {
- .Auto => return error.IllDefinedMemoryLayout,
- .Extern => for (0..struct_type.field_types.len) |i| {
+ .auto => return error.IllDefinedMemoryLayout,
+ .@"extern" => for (0..struct_type.field_types.len) |i| {
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const field_val = switch (val.ip_index) {
.none => switch (val.tag()) {
@@ -701,7 +701,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
},
- .Packed => {
+ .@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
@@ -724,8 +724,8 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
.Union => switch (ty.containerLayout(mod)) {
- .Auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
- .Extern => {
+ .auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
+ .@"extern" => {
if (val.unionTag(mod)) |union_tag| {
const union_obj = mod.typeToUnion(ty).?;
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
@@ -739,7 +739,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]);
}
},
- .Packed => {
+ .@"packed" => {
const backing_ty = try ty.unionBackingType(mod);
const byte_count: usize = @intCast(backing_ty.abiSize(mod));
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
@@ -841,7 +841,7 @@ pub fn writeToPackedMemory(
const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
- assert(struct_type.layout == .Packed);
+ assert(struct_type.layout == .@"packed");
var bits: u16 = 0;
for (0..struct_type.field_types.len) |i| {
const field_val = switch (val.ip_index) {
@@ -866,8 +866,8 @@ pub fn writeToPackedMemory(
.Union => {
const union_obj = mod.typeToUnion(ty).?;
switch (union_obj.getLayout(ip)) {
- .Auto, .Extern => unreachable, // Handled in non-packed writeToMemory
- .Packed => {
+ .auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
+ .@"packed" => {
if (val.unionTag(mod)) |union_tag| {
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
@@ -991,8 +991,8 @@ pub fn readFromMemory(
.Struct => {
const struct_type = mod.typeToStruct(ty).?;
switch (struct_type.layout) {
- .Auto => unreachable, // Sema is supposed to have emitted a compile error already
- .Extern => {
+ .auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .@"extern" => {
const field_types = struct_type.field_types;
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
for (field_vals, 0..) |*field_val, i| {
@@ -1006,7 +1006,7 @@ pub fn readFromMemory(
.storage = .{ .elems = field_vals },
} })));
},
- .Packed => {
+ .@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
@@ -1025,8 +1025,8 @@ pub fn readFromMemory(
} })));
},
.Union => switch (ty.containerLayout(mod)) {
- .Auto => return error.IllDefinedMemoryLayout,
- .Extern => {
+ .auto => return error.IllDefinedMemoryLayout,
+ .@"extern" => {
const union_size = ty.abiSize(mod);
const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod);
@@ -1036,7 +1036,7 @@ pub fn readFromMemory(
.val = val,
} })));
},
- .Packed => {
+ .@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
@@ -1177,8 +1177,8 @@ pub fn readFromPackedMemory(
} })));
},
.Union => switch (ty.containerLayout(mod)) {
- .Auto, .Extern => unreachable, // Handled by non-packed readFromMemory
- .Packed => {
+ .auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
+ .@"packed" => {
const backing_ty = try ty.unionBackingType(mod);
const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern();
return Value.fromInterned((try mod.intern(.{ .un = .{
@@ -4064,7 +4064,7 @@ fn dbHelper(self: *Value, tag_to_payload_map: *map: {
.alignment = 0,
};
break :map @Type(.{ .Struct = .{
- .layout = .Extern,
+ .layout = .@"extern",
.fields = &fields,
.decls = &.{},
.is_tuple = false,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 4cd065d61a..59a9fb31aa 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -815,10 +815,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
- .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
- .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
- .atomic_store_release => try self.airAtomicStore(inst, .Release),
- .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
+ .atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig
index d85f67aaa9..5bc452a493 100644
--- a/src/arch/aarch64/abi.zig
+++ b/src/arch/aarch64/abi.zig
@@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(mod)) {
.Struct => {
- if (ty.containerLayout(mod) == .Packed) return .byval;
+ if (ty.containerLayout(mod) == .@"packed") return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
@@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
return .integer;
},
.Union => {
- if (ty.containerLayout(mod) == .Packed) return .byval;
+ if (ty.containerLayout(mod) == .@"packed") return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 638a8543fb..43ffd11097 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -801,10 +801,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
- .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
- .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
- .atomic_store_release => try self.airAtomicStore(inst, .Release),
- .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
+ .atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig
index ffb5c7ae3a..a6581c8dd8 100644
--- a/src/arch/arm/abi.zig
+++ b/src/arch/arm/abi.zig
@@ -33,7 +33,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
switch (ty.zigTypeTag(mod)) {
.Struct => {
const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .Packed) {
+ if (ty.containerLayout(mod) == .@"packed") {
if (bit_size > 64) return .memory;
return .byval;
}
@@ -56,7 +56,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
.Union => {
const bit_size = ty.bitSize(mod);
const union_obj = mod.typeToUnion(ty).?;
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
if (bit_size > 64) return .memory;
return .byval;
}
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 78a3638e19..9e4870222d 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -634,10 +634,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
- .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
- .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
- .atomic_store_release => try self.airAtomicStore(inst, .Release),
- .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
+ .atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig
index da74734de4..be3ac590a2 100644
--- a/src/arch/riscv64/abi.zig
+++ b/src/arch/riscv64/abi.zig
@@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
switch (ty.zigTypeTag(mod)) {
.Struct => {
const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .Packed) {
+ if (ty.containerLayout(mod) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
@@ -44,7 +44,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
},
.Union => {
const bit_size = ty.bitSize(mod);
- if (ty.containerLayout(mod) == .Packed) {
+ if (ty.containerLayout(mod) == .@"packed") {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index e940e37619..a2bcb1cf6a 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1018,7 +1018,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
.unrolled => wasm.Valtype.i32,
},
.Union => switch (ty.containerLayout(mod)) {
- .Packed => {
+ .@"packed" => {
const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory");
return typeToValtype(int_ty, mod);
},
@@ -1737,7 +1737,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
=> return ty.hasRuntimeBitsIgnoreComptime(mod),
.Union => {
if (mod.typeToUnion(ty)) |union_obj| {
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
return ty.abiSize(mod) > 8;
}
}
@@ -3097,7 +3097,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
break :blk parent_ty.structFieldOffset(field_index, mod);
},
.Union => switch (parent_ty.containerLayout(mod)) {
- .Packed => 0,
+ .@"packed" => 0,
else => blk: {
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
@@ -3358,7 +3358,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const struct_type = ip.loadStructType(ty.toIntern());
// non-packed structs are not handled in this function because they
// are by-ref types.
- assert(struct_type.layout == .Packed);
+ assert(struct_type.layout == .@"packed");
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
@@ -3890,7 +3890,7 @@ fn structFieldPtr(
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
const offset = switch (struct_ty.containerLayout(mod)) {
- .Packed => switch (struct_ty.zigTypeTag(mod)) {
+ .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
if (result_ty.ptrInfo(mod).packed_offset.host_size != 0) {
break :offset @as(u32, 0);
@@ -3928,7 +3928,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const result = switch (struct_ty.containerLayout(mod)) {
- .Packed => switch (struct_ty.zigTypeTag(mod)) {
+ .@"packed" => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
@@ -5321,7 +5321,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result_value result;
},
.Struct => switch (result_ty.containerLayout(mod)) {
- .Packed => {
+ .@"packed" => {
if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig
index 9c3fd8260d..7ca0f9f245 100644
--- a/src/arch/wasm/abi.zig
+++ b/src/arch/wasm/abi.zig
@@ -29,7 +29,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
switch (ty.zigTypeTag(mod)) {
.Struct => {
const struct_type = mod.typeToStruct(ty).?;
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
@@ -70,7 +70,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
},
.Union => {
const union_obj = mod.typeToUnion(ty).?;
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
@@ -113,7 +113,7 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
},
.Union => {
const union_obj = mod.typeToUnion(ty).?;
- if (union_obj.getLayout(ip) != .Packed) {
+ if (union_obj.getLayout(ip) != .@"packed") {
const layout = mod.getUnionLayout(union_obj);
if (layout.payload_size == 0 and layout.tag_size != 0) {
return scalarType(ty.unionTagTypeSafety(mod).?, mod);
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 397dd3ab5f..7db294a37b 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2111,10 +2111,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
- .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
- .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
- .atomic_store_release => try self.airAtomicStore(inst, .Release),
- .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
+ .atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
+ .atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
+ .atomic_store_release => try self.airAtomicStore(inst, .release),
+ .atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
@@ -7962,8 +7962,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(operand);
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
- .Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8),
- .Packed => if (mod.typeToStruct(container_ty)) |struct_type|
+ .auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod) * 8),
+ .@"packed" => if (mod.typeToStruct(container_ty)) |struct_type|
mod.structPackedFieldBitOffset(struct_type, index)
else
0,
@@ -11977,9 +11977,9 @@ fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
const order = self.air.instructions.items(.data)[@intFromEnum(inst)].fence;
switch (order) {
- .Unordered, .Monotonic => unreachable,
- .Acquire, .Release, .AcqRel => {},
- .SeqCst => try self.asmOpOnly(.{ ._, .mfence }),
+ .unordered, .monotonic => unreachable,
+ .acquire, .release, .acq_rel => {},
+ .seq_cst => try self.asmOpOnly(.{ ._, .mfence }),
}
self.finishAirBookkeeping();
}
@@ -15747,9 +15747,9 @@ fn atomicOp(
.Xor => .xor,
else => unreachable,
} else switch (order) {
- .Unordered, .Monotonic, .Release, .AcqRel => .mov,
- .Acquire => unreachable,
- .SeqCst => .xchg,
+ .unordered, .monotonic, .release, .acq_rel => .mov,
+ .acquire => unreachable,
+ .seq_cst => .xchg,
};
const dst_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
@@ -17979,7 +17979,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
switch (result_ty.zigTypeTag(mod)) {
.Struct => {
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod));
- if (result_ty.containerLayout(mod) == .Packed) {
+ if (result_ty.containerLayout(mod) == .@"packed") {
const struct_type = mod.typeToStruct(result_ty).?;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index 0975104db3..ae54904aaf 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -110,7 +110,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const is_obj_or_static_lib = switch (emit.lower.output_mode) {
.Exe => false,
.Obj => true,
- .Lib => emit.lower.link_mode == .Static,
+ .Lib => emit.lower.link_mode == .static,
};
const atom = elf_file.symbol(data.atom_index).atom(elf_file).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(data.sym_index);
@@ -158,7 +158,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const is_obj_or_static_lib = switch (emit.lower.output_mode) {
.Exe => false,
.Obj => true,
- .Lib => emit.lower.link_mode == .Static,
+ .Lib => emit.lower.link_mode == .static,
};
const atom = macho_file.getSymbol(data.atom_index).getAtom(macho_file).?;
const sym_index = macho_file.getZigObject().?.symbols.items[data.sym_index];
diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig
index 13b97b551a..578fc022a0 100644
--- a/src/arch/x86_64/Lower.zig
+++ b/src/arch/x86_64/Lower.zig
@@ -329,7 +329,7 @@ fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand)
const is_obj_or_static_lib = switch (lower.output_mode) {
.Exe => false,
.Obj => true,
- .Lib => lower.link_mode == .Static,
+ .Lib => lower.link_mode == .static,
};
const emit_prefix = prefix;
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 601fc1933e..f54a98e08f 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -42,7 +42,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class {
1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag(mod)) {
.Int => return .win_i128,
- .Struct, .Union => if (ty.containerLayout(mod) == .Packed) {
+ .Struct, .Union => if (ty.containerLayout(mod) == .@"packed") {
return .win_i128;
} else {
return .memory;
@@ -238,7 +238,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// separately.".
const struct_type = mod.typeToStruct(ty).?;
const ty_size = ty.abiSize(mod);
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
@@ -356,7 +356,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// separately.".
const union_obj = mod.typeToUnion(ty).?;
const ty_size = mod.unionAbiSize(union_obj);
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
diff --git a/src/codegen.zig b/src/codegen.zig
index 2412b50cc1..c18bdca433 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -513,7 +513,7 @@ pub fn generateSymbol(
.struct_type => {
const struct_type = ip.loadStructType(typed_value.ty.toIntern());
switch (struct_type.layout) {
- .Packed => {
+ .@"packed" => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
@@ -550,7 +550,7 @@ pub fn generateSymbol(
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
- .Auto, .Extern => {
+ .auto, .@"extern" => {
const struct_begin = code.items.len;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
@@ -736,11 +736,11 @@ fn lowerParentPtr(
.anon_struct_type,
.union_type,
=> switch (Type.fromInterned(base_ty).containerLayout(mod)) {
- .Auto, .Extern => @intCast(Type.fromInterned(base_ty).structFieldOffset(
+ .auto, .@"extern" => @intCast(Type.fromInterned(base_ty).structFieldOffset(
@intCast(field.index),
mod,
)),
- .Packed => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
+ .@"packed" => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
@divExact(Type.fromInterned(base_ptr_ty).ptrInfo(mod)
.packed_offset.bit_offset + mod.structPackedFieldBitOffset(
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index abbf7501fc..8d630480e2 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -890,7 +890,7 @@ pub const DeclGen = struct {
return writer.writeAll(" }");
},
.Struct => switch (ty.containerLayout(mod)) {
- .Auto, .Extern => {
+ .auto, .@"extern" => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
@@ -912,7 +912,7 @@ pub const DeclGen = struct {
return writer.writeByte('}');
},
- .Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
+ .@"packed" => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
},
.Union => {
if (!location.isInitializer()) {
@@ -1379,7 +1379,7 @@ pub const DeclGen = struct {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
- .Auto, .Extern => {
+ .auto, .@"extern" => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
@@ -1408,7 +1408,7 @@ pub const DeclGen = struct {
}
try writer.writeByte('}');
},
- .Packed => {
+ .@"packed" => {
const int_info = ty.intInfo(mod);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
@@ -1517,7 +1517,7 @@ pub const DeclGen = struct {
if (un.tag == .none) {
const backing_ty = try ty.unionBackingType(mod);
switch (union_obj.getLayout(ip)) {
- .Packed => {
+ .@"packed" => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, backing_ty);
@@ -1525,7 +1525,7 @@ pub const DeclGen = struct {
}
try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
},
- .Extern => {
+ .@"extern" => {
if (location == .StaticInitializer) {
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
}
@@ -1551,7 +1551,7 @@ pub const DeclGen = struct {
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
if (field_ty.hasRuntimeBits(mod)) {
if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
@@ -1999,7 +1999,7 @@ pub const DeclGen = struct {
try fwd.writeAll(if (is_global) "zig_extern " else "static ");
const maybe_exports = dg.module.decl_exports.get(decl_index);
const export_weak_linkage = if (maybe_exports) |exports|
- exports.items[0].opts.linkage == .Weak
+ exports.items[0].opts.linkage == .weak
else
false;
if (variable.is_weak_linkage or export_weak_linkage) try fwd.writeAll("zig_weak_linkage ");
@@ -2689,7 +2689,7 @@ fn genExports(o: *Object) !void {
const is_variable_const = switch (ip.indexToKey(tv.val.toIntern())) {
.func => return for (exports.items[1..], 1..) |@"export", i| {
try fwd.writeAll("zig_extern ");
- if (@"export".opts.linkage == .Weak) try fwd.writeAll("zig_weak_linkage_fn ");
+ if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
try o.dg.renderFunctionSignature(
fwd,
decl_index,
@@ -2707,7 +2707,7 @@ fn genExports(o: *Object) !void {
};
for (exports.items[1..]) |@"export"| {
try fwd.writeAll("zig_extern ");
- if (@"export".opts.linkage == .Weak) try fwd.writeAll("zig_weak_linkage ");
+ if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage ");
const export_name = ip.stringToSlice(@"export".opts.name);
try o.dg.renderTypeAndName(
fwd,
@@ -2842,7 +2842,7 @@ pub fn genFunc(f: *Function) !void {
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
if (mod.decl_exports.get(decl_index)) |exports|
- if (exports.items[0].opts.linkage == .Weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn ");
+ if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
try genExports(o);
@@ -3278,10 +3278,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.int_from_ptr => try airIntFromPtr(f, inst),
- .atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)),
- .atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)),
- .atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.Release)),
- .atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.SeqCst)),
+ .atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.unordered)),
+ .atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.monotonic)),
+ .atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.release)),
+ .atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.seq_cst)),
.struct_field_ptr_index_0 => try airStructFieldPtrIndex(f, inst, 0),
.struct_field_ptr_index_1 => try airStructFieldPtrIndex(f, inst, 1),
@@ -5497,7 +5497,7 @@ fn fieldLocation(
.Union => {
const union_obj = mod.typeToUnion(container_ty).?;
return switch (union_obj.getLayout(ip)) {
- .Auto, .Extern => {
+ .auto, .@"extern" => {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
return if (container_ty.unionTagTypeSafety(mod) != null and
@@ -5511,7 +5511,7 @@ fn fieldLocation(
else
.{ .identifier = ip.stringToSlice(field_name) } };
},
- .Packed => .begin,
+ .@"packed" => .begin,
};
},
.Pointer => switch (container_ty.ptrSize(mod)) {
@@ -5671,11 +5671,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
- .Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
+ .auto, .@"extern" => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
- .Packed => {
+ .@"packed" => {
const struct_type = mod.typeToStruct(struct_ty).?;
const int_info = struct_ty.intInfo(mod);
@@ -5740,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.union_type => field_name: {
const union_obj = ip.loadUnionType(struct_ty.toIntern());
- if (union_obj.flagsPtr(ip).layout == .Packed) {
+ if (union_obj.flagsPtr(ip).layout == .@"packed") {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
try f.writeCValue(writer, operand_local, .Other);
@@ -7081,7 +7081,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
.Struct => switch (inst_ty.containerLayout(mod)) {
- .Auto, .Extern => for (resolved_elements, 0..) |element, field_index| {
+ .auto, .@"extern" => for (resolved_elements, 0..) |element, field_index| {
if (inst_ty.structFieldIsComptime(field_index, mod)) continue;
const field_ty = inst_ty.structFieldType(field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -7095,7 +7095,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
},
- .Packed => {
+ .@"packed" => {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
const int_info = inst_ty.intInfo(mod);
@@ -7181,7 +7181,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const local = try f.allocLocal(inst, union_ty);
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, payload, .Initializer);
@@ -7482,11 +7482,11 @@ fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue {
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
return switch (order) {
// Note: unordered is actually even less atomic than relaxed
- .Unordered, .Monotonic => "zig_memory_order_relaxed",
- .Acquire => "zig_memory_order_acquire",
- .Release => "zig_memory_order_release",
- .AcqRel => "zig_memory_order_acq_rel",
- .SeqCst => "zig_memory_order_seq_cst",
+ .unordered, .monotonic => "zig_memory_order_relaxed",
+ .acquire => "zig_memory_order_acquire",
+ .release => "zig_memory_order_release",
+ .acq_rel => "zig_memory_order_acq_rel",
+ .seq_cst => "zig_memory_order_seq_cst",
};
}
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index b85daddbda..66f92c930d 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -1495,7 +1495,7 @@ pub const CType = extern union {
}
},
- .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
+ .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .@"packed") {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
try self.initType(Type.fromInterned(packed_struct.backingIntType(ip).*), kind, lookup);
} else {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 8f69dbc7e9..2a5a187e0d 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -1278,7 +1278,7 @@ pub const Object = struct {
const reloc_mode: llvm.RelocMode = if (pic)
.PIC
- else if (self.module.comp.config.link_mode == .Dynamic)
+ else if (self.module.comp.config.link_mode == .dynamic)
llvm.RelocMode.DynamicNoPIC
else
.Static;
@@ -1873,10 +1873,10 @@ pub const Object = struct {
if (comp.config.dll_export_fns)
global_index.setDllStorageClass(.dllexport, &o.builder);
global_index.setLinkage(switch (exports[0].opts.linkage) {
- .Internal => unreachable,
- .Strong => .external,
- .Weak => .weak_odr,
- .LinkOnce => .linkonce_odr,
+ .internal => unreachable,
+ .strong => .external,
+ .weak => .weak_odr,
+ .link_once => .linkonce_odr,
}, &o.builder);
global_index.setVisibility(switch (exports[0].opts.visibility) {
.default => .default,
@@ -3327,7 +3327,7 @@ pub const Object = struct {
const struct_type = ip.loadStructType(t.toIntern());
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
@@ -3477,7 +3477,7 @@ pub const Object = struct {
const union_obj = ip.loadUnionType(t.toIntern());
const layout = mod.getUnionLayout(union_obj);
- if (union_obj.flagsPtr(ip).layout == .Packed) {
+ if (union_obj.flagsPtr(ip).layout == .@"packed") {
const int_ty = try o.builder.intType(@intCast(t.bitSize(mod)));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
return int_ty;
@@ -4038,7 +4038,7 @@ pub const Object = struct {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const struct_ty = try o.lowerType(ty);
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intConst(struct_ty, 0);
var running_bits: u16 = 0;
@@ -4154,7 +4154,7 @@ pub const Object = struct {
const payload = if (un.tag != .none) p: {
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- if (container_layout == .Packed) {
+ if (container_layout == .@"packed") {
if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0);
const small_int_val = try o.builder.castConst(
if (field_ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
@@ -4190,7 +4190,7 @@ pub const Object = struct {
} else p: {
assert(layout.tag_size == 0);
const union_val = try o.lowerValue(un.val);
- if (container_layout == .Packed) {
+ if (container_layout == .@"packed") {
const bitcast_val = try o.builder.castConst(
.bitcast,
union_val,
@@ -4324,7 +4324,7 @@ pub const Object = struct {
const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
- if (parent_ty.containerLayout(mod) == .Packed) {
+ if (parent_ty.containerLayout(mod) == .@"packed") {
return parent_ptr;
}
@@ -6531,7 +6531,7 @@ pub const FuncGen = struct {
assert(!isByRef(field_ty, mod));
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout(mod)) {
- .Packed => {
+ .@"packed" => {
const struct_type = mod.typeToStruct(struct_ty).?;
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
@@ -6558,7 +6558,7 @@ pub const FuncGen = struct {
},
},
.Union => {
- assert(struct_ty.containerLayout(mod) == .Packed);
+ assert(struct_ty.containerLayout(mod) == .@"packed");
const containing_int = struct_llvm_val;
const elem_llvm_ty = try o.lowerType(field_ty);
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
@@ -6581,7 +6581,7 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag(mod)) {
.Struct => {
const layout = struct_ty.containerLayout(mod);
- assert(layout != .Packed);
+ assert(layout != .@"packed");
const struct_llvm_ty = try o.lowerType(struct_ty);
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
const field_ptr =
@@ -9995,7 +9995,7 @@ pub const FuncGen = struct {
return running_int;
}
- assert(result_ty.containerLayout(mod) != .Packed);
+ assert(result_ty.containerLayout(mod) != .@"packed");
if (isByRef(result_ty, mod)) {
// TODO in debug builds init to undef so that the padding will be 0xaa
@@ -10080,7 +10080,7 @@ pub const FuncGen = struct {
const layout = union_ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(union_ty).?;
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
const big_bits = union_ty.bitSize(mod);
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
@@ -10420,7 +10420,7 @@ pub const FuncGen = struct {
const struct_ty = struct_ptr_ty.childType(mod);
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout(mod)) {
- .Packed => {
+ .@"packed" => {
const result_ty = self.typeOfIndex(inst);
const result_ty_info = result_ty.ptrInfo(mod);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
@@ -10462,7 +10462,7 @@ pub const FuncGen = struct {
},
.Union => {
const layout = struct_ty.unionGetLayout(mod);
- if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
+ if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr;
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const union_llvm_ty = try o.lowerType(struct_ty);
return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
@@ -10801,12 +10801,12 @@ pub const FuncGen = struct {
fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering {
return switch (atomic_order) {
- .Unordered => .unordered,
- .Monotonic => .monotonic,
- .Acquire => .acquire,
- .Release => .release,
- .AcqRel => .acq_rel,
- .SeqCst => .seq_cst,
+ .unordered => .unordered,
+ .monotonic => .monotonic,
+ .acquire => .acquire,
+ .release => .release,
+ .acq_rel => .acq_rel,
+ .seq_cst => .seq_cst,
};
}
@@ -11572,7 +11572,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
};
// Packed structs are represented to LLVM as integers.
- if (struct_type.layout == .Packed) return false;
+ if (struct_type.layout == .@"packed") return false;
const field_types = struct_type.field_types.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
@@ -11586,7 +11586,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
return false;
},
.Union => switch (ty.containerLayout(mod)) {
- .Packed => return false,
+ .@"packed" => return false,
else => return ty.hasRuntimeBits(mod),
},
.ErrorUnion => {
@@ -11624,8 +11624,8 @@ fn isScalar(mod: *Module, ty: Type) bool {
.Vector,
=> true,
- .Struct => ty.containerLayout(mod) == .Packed,
- .Union => ty.containerLayout(mod) == .Packed,
+ .Struct => ty.containerLayout(mod) == .@"packed",
+ .Union => ty.containerLayout(mod) == .@"packed",
else => false,
};
}
diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig
index 859d02ce2e..c885261a24 100644
--- a/src/codegen/llvm/Builder.zig
+++ b/src/codegen/llvm/Builder.zig
@@ -8398,7 +8398,7 @@ pub const Metadata = enum(u32) {
fmt_str = fmt_str ++ ")\n";
var fmt_args: @Type(.{ .Struct = .{
- .layout = .Auto,
+ .layout = .auto,
.fields = &fields,
.decls = &.{},
.is_tuple = false,
diff --git a/src/codegen/llvm/bitcode_writer.zig b/src/codegen/llvm/bitcode_writer.zig
index 414cdea682..0b821a32e7 100644
--- a/src/codegen/llvm/bitcode_writer.zig
+++ b/src/codegen/llvm/bitcode_writer.zig
@@ -415,8 +415,8 @@ fn BufType(comptime T: type, comptime min_len: usize) type {
.Enum => |info| info.tag_type,
.Bool => u1,
.Struct => |info| switch (info.layout) {
- .Auto, .Extern => @compileError("Unsupported type: " ++ @typeName(T)),
- .Packed => std.meta.Int(.unsigned, @bitSizeOf(T)),
+ .auto, .@"extern" => @compileError("Unsupported type: " ++ @typeName(T)),
+ .@"packed" => std.meta.Int(.unsigned, @bitSizeOf(T)),
},
else => @compileError("Unsupported type: " ++ @typeName(T)),
})));
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index e95ffee5c1..0b6951ab4f 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -979,7 +979,7 @@ const DeclGen = struct {
},
.struct_type => {
const struct_type = mod.typeToStruct(ty).?;
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
return self.todo("packed struct constants", .{});
}
@@ -1275,7 +1275,7 @@ const DeclGen = struct {
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
- if (union_obj.getLayout(ip) == .Packed) {
+ if (union_obj.getLayout(ip) == .@"packed") {
return self.todo("packed union types", .{});
}
@@ -1532,7 +1532,7 @@ const DeclGen = struct {
else => unreachable,
};
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
}
@@ -3904,7 +3904,7 @@ const DeclGen = struct {
const union_ty = mod.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
- if (union_ty.getLayout(ip) == .Packed) {
+ if (union_ty.getLayout(ip) == .@"packed") {
unreachable; // TODO
}
@@ -3984,11 +3984,11 @@ const DeclGen = struct {
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout(mod)) {
- .Packed => unreachable, // TODO
+ .@"packed" => unreachable, // TODO
else => return try self.extractField(field_ty, object_id, field_index),
},
.Union => switch (object_ty.containerLayout(mod)) {
- .Packed => unreachable, // TODO
+ .@"packed" => unreachable, // TODO
else => {
// Store, ptr-elem-ptr, pointer-cast, load
const layout = self.unionLayout(object_ty);
@@ -4058,13 +4058,13 @@ const DeclGen = struct {
const object_ty = object_ptr_ty.childType(mod);
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout(mod)) {
- .Packed => unreachable, // TODO
+ .@"packed" => unreachable, // TODO
else => {
return try self.accessChain(result_ty_ref, object_ptr, &.{field_index});
},
},
.Union => switch (object_ty.containerLayout(mod)) {
- .Packed => unreachable, // TODO
+ .@"packed" => unreachable, // TODO
else => {
const layout = self.unionLayout(object_ty);
if (!layout.has_payload) {
diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig
index 2ce11111a7..002dad2510 100644
--- a/src/codegen/spirv/Section.zig
+++ b/src/codegen/spirv/Section.zig
@@ -154,7 +154,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand)
}
},
.Struct => |info| {
- if (info.layout == .Packed) {
+ if (info.layout == .@"packed") {
section.writeWord(@as(Word, @bitCast(operand)));
} else {
section.writeExtendedMask(Operand, operand);
@@ -288,7 +288,7 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
}
break :blk total;
},
- .Struct => |info| if (info.layout == .Packed) 1 else extendedMaskSize(Operand, operand),
+ .Struct => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand),
.Union => extendedUnionSize(Operand, operand),
else => unreachable,
},
diff --git a/src/crash_report.zig b/src/crash_report.zig
index d11897ec14..d05a950a46 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -376,7 +376,7 @@ const PanicSwitch = struct {
};
state.* = new_state;
- _ = panicking.fetchAdd(1, .SeqCst);
+ _ = panicking.fetchAdd(1, .seq_cst);
state.recover_stage = .release_ref_count;
@@ -458,7 +458,7 @@ const PanicSwitch = struct {
noinline fn releaseRefCount(state: *volatile PanicState) noreturn {
state.recover_stage = .abort;
- if (panicking.fetchSub(1, .SeqCst) != 1) {
+ if (panicking.fetchSub(1, .seq_cst) != 1) {
// Another thread is panicking, wait for the last one to finish
// and call abort()
diff --git a/src/glibc.zig b/src/glibc.zig
index 5a2caa6809..9765e0ad78 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -1084,7 +1084,7 @@ fn buildSharedLib(
const strip = comp.compilerRtStrip();
const config = try Compilation.Config.resolve(.{
.output_mode = .Lib,
- .link_mode = .Dynamic,
+ .link_mode = .dynamic,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
.have_zcu = false,
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 980836b682..dc1930be4d 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -115,7 +115,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
const root_name = "c++";
const output_mode = .Lib;
- const link_mode = .Static;
+ const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
@@ -327,7 +327,7 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
const root_name = "c++abi";
const output_mode = .Lib;
- const link_mode = .Static;
+ const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 95b44dad76..8f8ecce67d 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -27,7 +27,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!v
const root_name = "tsan";
const output_mode = .Lib;
- const link_mode = .Static;
+ const link_mode = .static;
const target = comp.getTarget();
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 8de6f5a99a..2fae6a65f9 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -62,7 +62,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
});
const root_name = "unwind";
- const link_mode = .Static;
+ const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const basename = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
diff --git a/src/link.zig b/src/link.zig
index be64325f30..631768a4de 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -287,8 +287,8 @@ pub const File = struct {
switch (output_mode) {
.Obj => return,
.Lib => switch (link_mode) {
- .Static => return,
- .Dynamic => {},
+ .static => return,
+ .dynamic => {},
},
.Exe => {},
}
@@ -582,7 +582,7 @@ pub const File = struct {
const use_lld = build_options.have_llvm and comp.config.use_lld;
const output_mode = comp.config.output_mode;
const link_mode = comp.config.link_mode;
- if (use_lld and output_mode == .Lib and link_mode == .Static) {
+ if (use_lld and output_mode == .Lib and link_mode == .static) {
return base.linkAsArchive(arena, prog_node);
}
switch (base.tag) {
@@ -957,8 +957,8 @@ pub const File = struct {
const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777;
switch (effectiveOutputMode(use_lld, output_mode)) {
.Lib => return switch (link_mode) {
- .Dynamic => executable_mode,
- .Static => fs.File.default_mode,
+ .dynamic => executable_mode,
+ .static => fs.File.default_mode,
},
.Exe => return executable_mode,
.Obj => return fs.File.default_mode,
@@ -966,7 +966,7 @@ pub const File = struct {
}
pub fn isStatic(self: File) bool {
- return self.comp.config.link_mode == .Static;
+ return self.comp.config.link_mode == .static;
}
pub fn isObject(self: File) bool {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index 5bf83b52ea..206cf7348c 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1599,11 +1599,11 @@ pub fn updateExports(
}
}
- if (exp.opts.linkage == .LinkOnce) {
+ if (exp.opts.linkage == .link_once) {
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
exp.getSrcLoc(mod),
- "Unimplemented: GlobalLinkage.LinkOnce",
+ "Unimplemented: GlobalLinkage.link_once",
.{},
));
continue;
@@ -1633,11 +1633,11 @@ pub fn updateExports(
sym.type = atom.getSymbol(self).type;
switch (exp.opts.linkage) {
- .Strong => {
+ .strong => {
sym.storage_class = .EXTERNAL;
},
- .Internal => @panic("TODO Internal"),
- .Weak => @panic("TODO WeakExternal"),
+ .internal => @panic("TODO Internal"),
+ .weak => @panic("TODO WeakExternal"),
else => unreachable,
}
@@ -2275,7 +2275,7 @@ fn writeHeader(self: *Coff) !void {
.p32 => flags.@"32BIT_MACHINE" = 1,
.p64 => flags.LARGE_ADDRESS_AWARE = 1,
}
- if (self.base.comp.config.output_mode == .Lib and self.base.comp.config.link_mode == .Dynamic) {
+ if (self.base.comp.config.output_mode == .Lib and self.base.comp.config.link_mode == .dynamic) {
flags.DLL = 1;
}
diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig
index 475090c31d..405c107628 100644
--- a/src/link/Coff/lld.zig
+++ b/src/link/Coff/lld.zig
@@ -45,7 +45,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node)
defer sub_prog_node.end();
const is_lib = comp.config.output_mode == .Lib;
- const is_dyn_lib = comp.config.link_mode == .Dynamic and is_lib;
+ const is_dyn_lib = comp.config.link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or comp.config.output_mode == .Exe;
const link_in_crt = comp.config.link_libc and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
@@ -411,16 +411,16 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node)
try argv.append(try comp.get_libc_crt_file(arena, "mingwex.lib"));
} else {
const lib_str = switch (comp.config.link_mode) {
- .Dynamic => "",
- .Static => "lib",
+ .dynamic => "",
+ .static => "lib",
};
const d_str = switch (optimize_mode) {
.Debug => "d",
else => "",
};
switch (comp.config.link_mode) {
- .Static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
- .Dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
+ .static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
+ .dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
}
try argv.append(try allocPrint(arena, "{s}vcruntime{s}.lib", .{ lib_str, d_str }));
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index ae14d16be2..9b6ff5d243 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -317,7 +317,7 @@ pub const DeclState = struct {
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 85ef62b4bc..d26f49ca09 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -262,7 +262,7 @@ pub fn createEmpty(
.sparc64 => 0x2000,
else => 0x1000,
};
- const is_dyn_lib = output_mode == .Lib and link_mode == .Dynamic;
+ const is_dyn_lib = output_mode == .Lib and link_mode == .dynamic;
const default_sym_version: elf.Elf64_Versym = if (is_dyn_lib or comp.config.rdynamic)
elf.VER_NDX_GLOBAL
else
@@ -349,7 +349,7 @@ pub fn createEmpty(
}
const is_obj = output_mode == .Obj;
- const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .Static);
+ const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
// What path should this ELF linker code output to?
// If using LLD to link, this code should produce an object file so that it
@@ -1180,10 +1180,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
success: {
if (!self.base.isStatic()) {
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .Dynamic))
+ if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .dynamic))
break :success;
}
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .Static))
+ if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .static))
break :success;
try self.reportMissingLibraryError(
@@ -1211,8 +1211,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
});
} else if (target.isMusl()) {
const path = try comp.get_libc_crt_file(arena, switch (link_mode) {
- .Static => "libc.a",
- .Dynamic => "libc.so",
+ .static => "libc.a",
+ .dynamic => "libc.so",
});
try system_libs.append(.{ .path = path });
} else {
@@ -1628,7 +1628,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
// libc dep
if (comp.config.link_libc) {
if (self.base.comp.libc_installation != null) {
- const needs_grouping = link_mode == .Static;
+ const needs_grouping = link_mode == .static;
if (needs_grouping) try argv.append("--start-group");
try argv.appendSlice(target_util.libcFullLinkFlags(target));
if (needs_grouping) try argv.append("--end-group");
@@ -1642,8 +1642,8 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
} else if (target.isMusl()) {
try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
- .Static => "libc.a",
- .Dynamic => "libc.so",
+ .static => "libc.a",
+ .dynamic => "libc.so",
}));
}
}
@@ -1797,10 +1797,10 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void {
// Maybe we should hoist search-strategy all the way here?
for (self.lib_dirs) |lib_dir| {
if (!self.base.isStatic()) {
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .Dynamic))
+ if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .dynamic))
break :success;
}
- if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .Static))
+ if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .static))
break :success;
}
} else {
@@ -1858,8 +1858,8 @@ fn accessLibPath(
test_path.clearRetainingCapacity();
const prefix = if (link_mode != null) "lib" else "";
const suffix = if (link_mode) |mode| switch (mode) {
- .Static => target.staticLibSuffix(),
- .Dynamic => target.dynamicLibSuffix(),
+ .static => target.staticLibSuffix(),
+ .dynamic => target.dynamicLibSuffix(),
} else "";
try test_path.writer().print("{s}" ++ sep ++ "{s}{s}{s}", .{
lib_dir_path,
@@ -2150,10 +2150,10 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
const is_obj = output_mode == .Obj;
const is_lib = output_mode == .Lib;
const link_mode = comp.config.link_mode;
- const is_dyn_lib = link_mode == .Dynamic and is_lib;
+ const is_dyn_lib = link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
const have_dynamic_linker = comp.config.link_libc and
- link_mode == .Dynamic and is_exe_or_dyn_lib;
+ link_mode == .dynamic and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
const compiler_rt_path: ?[]const u8 = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
@@ -2463,7 +2463,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
try argv.append(arg);
}
- if (link_mode == .Static) {
+ if (link_mode == .static) {
if (target.cpu.arch.isArmOrThumb()) {
try argv.append("-Bstatic");
} else {
@@ -2647,7 +2647,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
comp.link_error_flags.missing_libc = false;
if (comp.config.link_libc) {
if (comp.libc_installation != null) {
- const needs_grouping = link_mode == .Static;
+ const needs_grouping = link_mode == .static;
if (needs_grouping) try argv.append("--start-group");
try argv.appendSlice(target_util.libcFullLinkFlags(target));
if (needs_grouping) try argv.append("--end-group");
@@ -2661,8 +2661,8 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
} else if (target.isMusl()) {
try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
- .Static => "libc.a",
- .Dynamic => "libc.so",
+ .static => "libc.a",
+ .dynamic => "libc.so",
}));
} else {
comp.link_error_flags.missing_libc = true;
@@ -2928,8 +2928,8 @@ pub fn writeElfHeader(self: *Elf) !void {
.Exe => if (comp.config.pie) .DYN else .EXEC,
.Obj => .REL,
.Lib => switch (link_mode) {
- .Static => @as(elf.ET, .REL),
- .Dynamic => .DYN,
+ .static => @as(elf.ET, .REL),
+ .dynamic => .DYN,
},
};
mem.writeInt(u16, hdr_buf[index..][0..2], @intFromEnum(elf_type), endian);
@@ -3216,7 +3216,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
// __rela_iplt_start, __rela_iplt_end
if (self.rela_dyn_section_index) |shndx| blk: {
- if (link_mode != .Static or comp.config.pie) break :blk;
+ if (link_mode != .static or comp.config.pie) break :blk;
const shdr = &self.shdrs.items[shndx];
const end_addr = shdr.sh_addr + shdr.sh_size;
const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
@@ -5061,12 +5061,12 @@ const CsuObjects = struct {
} = switch (comp.config.output_mode) {
.Obj => return CsuObjects{},
.Lib => switch (comp.config.link_mode) {
- .Dynamic => .dynamic_lib,
- .Static => return CsuObjects{},
+ .dynamic => .dynamic_lib,
+ .static => return CsuObjects{},
},
.Exe => switch (comp.config.link_mode) {
- .Dynamic => if (comp.config.pie) .dynamic_pie else .dynamic_exe,
- .Static => if (comp.config.pie) .static_pie else .static_exe,
+ .dynamic => if (comp.config.pie) .dynamic_pie else .dynamic_exe,
+ .static => if (comp.config.pie) .static_pie else .static_exe,
},
};
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index b6413f7d45..28b8acb51f 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -1436,10 +1436,10 @@ pub fn updateExports(
}
}
const stb_bits: u8 = switch (exp.opts.linkage) {
- .Internal => elf.STB_LOCAL,
- .Strong => elf.STB_GLOBAL,
- .Weak => elf.STB_WEAK,
- .LinkOnce => {
+ .internal => elf.STB_LOCAL,
+ .strong => elf.STB_GLOBAL,
+ .weak => elf.STB_WEAK,
+ .link_once => {
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create(
gpa,
diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig
index fadf80b2c0..bfd02fbd78 100644
--- a/src/link/MachO/ZigObject.zig
+++ b/src/link/MachO/ZigObject.zig
@@ -1216,11 +1216,11 @@ pub fn updateExports(
continue;
}
}
- if (exp.opts.linkage == .LinkOnce) {
+ if (exp.opts.linkage == .link_once) {
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
gpa,
exp.getSrcLoc(mod),
- "Unimplemented: GlobalLinkage.LinkOnce",
+ "Unimplemented: GlobalLinkage.link_once",
.{},
));
continue;
@@ -1242,12 +1242,12 @@ pub fn updateExports(
self.symtab.items(.atom)[global_nlist_index] = self.symtab.items(.atom)[nlist_idx];
switch (exp.opts.linkage) {
- .Internal => {
+ .internal => {
// Symbol should be hidden, or in MachO lingo, private extern.
global_nlist.n_type |= macho.N_PEXT;
},
- .Strong => {},
- .Weak => {
+ .strong => {},
+ .weak => {
// Weak linkage is specified as part of n_desc field.
// Symbol's n_type is like for a symbol with strong linkage.
global_nlist.n_desc |= macho.N_WEAK_DEF;
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index 778fdd74c7..394253db48 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -222,7 +222,7 @@ pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
const comp = macho_file.base.comp;
const gpa = comp.gpa;
- assert(comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic);
+ assert(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
const emit = macho_file.base.emit;
const install_name = macho_file.install_name orelse
try emit.directory.join(gpa, &.{emit.sub_path});
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index c32a472213..128cec5b6e 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2518,7 +2518,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node)
// When the target os is WASI, we allow linking with WASI-LIBC
if (target.os.tag == .wasi) {
const is_exe_or_dyn_lib = output_mode == .Exe or
- (output_mode == .Lib and link_mode == .Dynamic);
+ (output_mode == .Lib and link_mode == .dynamic);
if (is_exe_or_dyn_lib) {
for (comp.wasi_emulated_libs) |crt_file| {
try positionals.append(try comp.get_libc_crt_file(
@@ -3549,7 +3549,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo
try argv.append("--allow-undefined");
}
- if (comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic) {
+ if (comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic) {
try argv.append("--shared");
}
if (comp.config.pie) {
@@ -3569,7 +3569,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo
if (target.os.tag == .wasi) {
const is_exe_or_dyn_lib = comp.config.output_mode == .Exe or
- (comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic);
+ (comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
if (is_exe_or_dyn_lib) {
for (comp.wasi_emulated_libs) |crt_file| {
try argv.append(try comp.get_libc_crt_file(
diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig
index 5b1c587a74..30aab49cb1 100644
--- a/src/link/Wasm/ZigObject.zig
+++ b/src/link/Wasm/ZigObject.zig
@@ -896,14 +896,14 @@ pub fn updateExports(
sym.name = export_name;
switch (exp.opts.linkage) {
- .Internal => {
+ .internal => {
sym.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
},
- .Weak => {
+ .weak => {
sym.setFlag(.WASM_SYM_BINDING_WEAK);
},
- .Strong => {}, // symbols are strong by default
- .LinkOnce => {
+ .strong => {}, // symbols are strong by default
+ .link_once => {
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(mod),
diff --git a/src/main.zig b/src/main.zig
index 5a187c65e9..74c78bf089 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -755,8 +755,8 @@ const SystemLib = struct {
fn fallbackMode(this: SystemLib) std.builtin.LinkMode {
assert(this.search_strategy != .no_fallback);
return switch (this.preferred_mode) {
- .Dynamic => .Static,
- .Static => .Dynamic,
+ .dynamic => .static,
+ .static => .dynamic,
};
}
};
@@ -892,7 +892,7 @@ fn buildOutputType(
var entitlements: ?[]const u8 = null;
var pagezero_size: ?u64 = null;
var lib_search_strategy: SystemLib.SearchStrategy = .paths_first;
- var lib_preferred_mode: std.builtin.LinkMode = .Dynamic;
+ var lib_preferred_mode: std.builtin.LinkMode = .dynamic;
var headerpad_size: ?u32 = null;
var headerpad_max_install_names: bool = false;
var dead_strip_dylibs: bool = false;
@@ -1166,22 +1166,22 @@ fn buildOutputType(
};
} else if (mem.eql(u8, arg, "-search_paths_first")) {
lib_search_strategy = .paths_first;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else if (mem.eql(u8, arg, "-search_paths_first_static")) {
lib_search_strategy = .paths_first;
- lib_preferred_mode = .Static;
+ lib_preferred_mode = .static;
} else if (mem.eql(u8, arg, "-search_dylibs_first")) {
lib_search_strategy = .mode_first;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else if (mem.eql(u8, arg, "-search_static_first")) {
lib_search_strategy = .mode_first;
- lib_preferred_mode = .Static;
+ lib_preferred_mode = .static;
} else if (mem.eql(u8, arg, "-search_dylibs_only")) {
lib_search_strategy = .no_fallback;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else if (mem.eql(u8, arg, "-search_static_only")) {
lib_search_strategy = .no_fallback;
- lib_preferred_mode = .Static;
+ lib_preferred_mode = .static;
} else if (mem.eql(u8, arg, "-headerpad")) {
const next_arg = args_iter.nextOrFatal();
headerpad_size = std.fmt.parseUnsigned(u32, eatIntPrefix(next_arg, 16), 16) catch |err| {
@@ -1478,12 +1478,12 @@ fn buildOutputType(
emit_implib = .no;
emit_implib_arg_provided = true;
} else if (mem.eql(u8, arg, "-dynamic")) {
- create_module.opts.link_mode = .Dynamic;
- lib_preferred_mode = .Dynamic;
+ create_module.opts.link_mode = .dynamic;
+ lib_preferred_mode = .dynamic;
lib_search_strategy = .mode_first;
} else if (mem.eql(u8, arg, "-static")) {
- create_module.opts.link_mode = .Static;
- lib_preferred_mode = .Static;
+ create_module.opts.link_mode = .static;
+ lib_preferred_mode = .static;
lib_search_strategy = .no_fallback;
} else if (mem.eql(u8, arg, "-fdll-export-fns")) {
create_module.opts.dll_export_fns = true;
@@ -1904,7 +1904,7 @@ fn buildOutputType(
},
.nostdlib_cpp => create_module.opts.ensure_libcpp_on_non_freestanding = false,
.shared => {
- create_module.opts.link_mode = .Dynamic;
+ create_module.opts.link_mode = .dynamic;
is_shared_lib = true;
},
.rdynamic => create_module.opts.rdynamic = true,
@@ -1961,20 +1961,20 @@ fn buildOutputType(
mem.eql(u8, linker_arg, "-call_shared"))
{
lib_search_strategy = .no_fallback;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else if (mem.eql(u8, linker_arg, "-Bstatic") or
mem.eql(u8, linker_arg, "-dn") or
mem.eql(u8, linker_arg, "-non_shared") or
mem.eql(u8, linker_arg, "-static"))
{
lib_search_strategy = .no_fallback;
- lib_preferred_mode = .Static;
+ lib_preferred_mode = .static;
} else if (mem.eql(u8, linker_arg, "-search_paths_first")) {
lib_search_strategy = .paths_first;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else if (mem.eql(u8, linker_arg, "-search_dylibs_first")) {
lib_search_strategy = .mode_first;
- lib_preferred_mode = .Dynamic;
+ lib_preferred_mode = .dynamic;
} else {
try linker_args.append(linker_arg);
}
@@ -3033,7 +3033,7 @@ fn buildOutputType(
const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) {
.Obj => false,
- .Lib => create_module.resolved_options.link_mode == .Dynamic,
+ .Lib => create_module.resolved_options.link_mode == .dynamic,
.Exe => true,
};
// Note that cmake when targeting Windows will try to execute
@@ -3770,8 +3770,8 @@ fn createModule(
)) {
const path = try arena.dupe(u8, test_path.items);
switch (info.preferred_mode) {
- .Static => try create_module.link_objects.append(arena, .{ .path = path }),
- .Dynamic => try create_module.resolved_system_libs.append(arena, .{
+ .static => try create_module.link_objects.append(arena, .{ .path = path }),
+ .dynamic => try create_module.resolved_system_libs.append(arena, .{
.name = lib_name,
.lib = .{
.needed = info.needed,
@@ -3804,8 +3804,8 @@ fn createModule(
)) {
const path = try arena.dupe(u8, test_path.items);
switch (info.fallbackMode()) {
- .Static => try create_module.link_objects.append(arena, .{ .path = path }),
- .Dynamic => try create_module.resolved_system_libs.append(arena, .{
+ .static => try create_module.link_objects.append(arena, .{ .path = path }),
+ .dynamic => try create_module.resolved_system_libs.append(arena, .{
.name = lib_name,
.lib = .{
.needed = info.needed,
@@ -3838,8 +3838,8 @@ fn createModule(
)) {
const path = try arena.dupe(u8, test_path.items);
switch (info.preferred_mode) {
- .Static => try create_module.link_objects.append(arena, .{ .path = path }),
- .Dynamic => try create_module.resolved_system_libs.append(arena, .{
+ .static => try create_module.link_objects.append(arena, .{ .path = path }),
+ .dynamic => try create_module.resolved_system_libs.append(arena, .{
.name = lib_name,
.lib = .{
.needed = info.needed,
@@ -3862,8 +3862,8 @@ fn createModule(
)) {
const path = try arena.dupe(u8, test_path.items);
switch (info.fallbackMode()) {
- .Static => try create_module.link_objects.append(arena, .{ .path = path }),
- .Dynamic => try create_module.resolved_system_libs.append(arena, .{
+ .static => try create_module.link_objects.append(arena, .{ .path = path }),
+ .dynamic => try create_module.resolved_system_libs.append(arena, .{
.name = lib_name,
.lib = .{
.needed = info.needed,
@@ -4145,8 +4145,8 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
buf.appendSlice("... ") catch {};
}
need_ellipse = false;
- const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
- const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
+ const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
+ const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
const current_item = completed_items + 1;
if (node.name.len != 0 or eti > 0) {
if (node.name.len != 0) {
@@ -4163,7 +4163,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
need_ellipse = false;
}
}
- maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .Acquire);
+ maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .acquire);
}
}
@@ -6842,7 +6842,7 @@ fn accessLibPath(
) !bool {
const sep = fs.path.sep_str;
- if (target.isDarwin() and link_mode == .Dynamic) tbd: {
+ if (target.isDarwin() and link_mode == .dynamic) tbd: {
// Prefer .tbd over .dylib.
test_path.clearRetainingCapacity();
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.tbd", .{ lib_dir_path, lib_name });
@@ -6863,8 +6863,8 @@ fn accessLibPath(
target.libPrefix(),
lib_name,
switch (link_mode) {
- .Static => target.staticLibSuffix(),
- .Dynamic => target.dynamicLibSuffix(),
+ .static => target.staticLibSuffix(),
+ .dynamic => target.dynamicLibSuffix(),
},
});
try checked_paths.writer().print("\n {s}", .{test_path.items});
@@ -6879,7 +6879,7 @@ fn accessLibPath(
// In the case of Darwin, the main check will be .dylib, so here we
// additionally check for .so files.
- if (target.isDarwin() and link_mode == .Dynamic) so: {
+ if (target.isDarwin() and link_mode == .dynamic) so: {
test_path.clearRetainingCapacity();
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.so", .{ lib_dir_path, lib_name });
try checked_paths.writer().print("\n {s}", .{test_path.items});
@@ -6894,7 +6894,7 @@ fn accessLibPath(
// In the case of MinGW, the main check will be .lib but we also need to
// look for `libfoo.a`.
- if (target.isMinGW() and link_mode == .Static) mingw: {
+ if (target.isMinGW() and link_mode == .static) mingw: {
test_path.clearRetainingCapacity();
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.a", .{
lib_dir_path, lib_name,
diff --git a/src/musl.zig b/src/musl.zig
index ed943b7bf5..3228faf271 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -207,7 +207,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
const strip = comp.compilerRtStrip();
const config = try Compilation.Config.resolve(.{
.output_mode = .Lib,
- .link_mode = .Dynamic,
+ .link_mode = .dynamic,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
.have_zcu = false,
diff --git a/src/print_air.zig b/src/print_air.zig
index 8af0301c1a..0c1beac3a8 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -303,10 +303,10 @@ const Writer = struct {
.fence => try w.writeFence(s, inst),
.atomic_load => try w.writeAtomicLoad(s, inst),
.prefetch => try w.writePrefetch(s, inst),
- .atomic_store_unordered => try w.writeAtomicStore(s, inst, .Unordered),
- .atomic_store_monotonic => try w.writeAtomicStore(s, inst, .Monotonic),
- .atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
- .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
+ .atomic_store_unordered => try w.writeAtomicStore(s, inst, .unordered),
+ .atomic_store_monotonic => try w.writeAtomicStore(s, inst, .monotonic),
+ .atomic_store_release => try w.writeAtomicStore(s, inst, .release),
+ .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .seq_cst),
.atomic_rmw => try w.writeAtomicRmw(s, inst),
.field_parent_ptr => try w.writeFieldParentPtr(s, inst),
.wasm_memory_size => try w.writeWasmMemorySize(s, inst),
diff --git a/src/print_zir.zig b/src/print_zir.zig
index d96fe4f6c9..8c858591eb 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -1440,7 +1440,7 @@ const Writer = struct {
if (small.has_backing_int) {
const backing_int_body_len = self.code.extra[extra_index];
extra_index += 1;
- try stream.writeAll("Packed(");
+ try stream.writeAll("packed(");
if (backing_int_body_len == 0) {
const backing_int_ref: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
diff --git a/src/type.zig b/src/type.zig
index 8b2c6f2a1e..664498e353 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -741,12 +741,12 @@ pub const Type = struct {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// Struct with no fields have a well-defined layout of no bits.
- return struct_type.layout != .Auto or struct_type.field_types.len == 0;
+ return struct_type.layout != .auto or struct_type.field_types.len == 0;
},
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
return switch (union_type.flagsPtr(ip).runtime_tag) {
- .none, .safety => union_type.flagsPtr(ip).layout != .Auto,
+ .none, .safety => union_type.flagsPtr(ip).layout != .auto,
.tagged => false,
};
},
@@ -1027,7 +1027,7 @@ pub const Type = struct {
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- if (struct_type.layout == .Packed) {
+ if (struct_type.layout == .@"packed") {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
@@ -1407,7 +1407,7 @@ pub const Type = struct {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => switch (struct_type.layout) {
- .Packed => {
+ .@"packed" => {
if (struct_type.backingIntType(ip).* == .none) return .{
.val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
@@ -1415,7 +1415,7 @@ pub const Type = struct {
} }))),
};
},
- .Auto, .Extern => {
+ .auto, .@"extern" => {
if (!struct_type.haveLayout(ip)) return .{
.val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
@@ -1427,10 +1427,10 @@ pub const Type = struct {
.eager => {},
}
switch (struct_type.layout) {
- .Packed => return .{
+ .@"packed" => return .{
.scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod),
},
- .Auto, .Extern => {
+ .auto, .@"extern" => {
assert(struct_type.haveLayout(ip));
return .{ .scalar = struct_type.size(ip).* };
},
@@ -1656,7 +1656,7 @@ pub const Type = struct {
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- const is_packed = struct_type.layout == .Packed;
+ const is_packed = struct_type.layout == .@"packed";
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
if (is_packed) try sema.resolveTypeLayout(ty);
@@ -1674,7 +1674,7 @@ pub const Type = struct {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
- const is_packed = ty.containerLayout(mod) == .Packed;
+ const is_packed = ty.containerLayout(mod) == .@"packed";
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
if (is_packed) try sema.resolveTypeLayout(ty);
@@ -1987,9 +1987,9 @@ pub const Type = struct {
/// Asserts the type is either an extern or packed union.
pub fn unionBackingType(ty: Type, mod: *Module) !Type {
return switch (ty.containerLayout(mod)) {
- .Extern => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
- .Packed => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
- .Auto => unreachable,
+ .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
+ .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
+ .auto => unreachable,
};
}
@@ -2003,7 +2003,7 @@ pub const Type = struct {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).layout,
- .anon_struct_type => .Auto,
+ .anon_struct_type => .auto,
.union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
else => unreachable,
};
@@ -2177,7 +2177,7 @@ pub const Type = struct {
pub fn isAbiInt(ty: Type, mod: *Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Int, .Enum, .ErrorSet => true,
- .Struct => ty.containerLayout(mod) == .Packed,
+ .Struct => ty.containerLayout(mod) == .@"packed",
else => false,
};
}
@@ -2690,7 +2690,7 @@ pub const Type = struct {
const struct_type = ip.loadStructType(ty.toIntern());
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
- if (struct_type.layout == .Packed)
+ if (struct_type.layout == .@"packed")
return false;
// A struct with no fields is not comptime-only.
@@ -3051,7 +3051,7 @@ pub const Type = struct {
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- assert(struct_type.layout != .Packed);
+ assert(struct_type.layout != .@"packed");
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
@@ -3132,7 +3132,7 @@ pub const Type = struct {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
- assert(struct_type.layout != .Packed);
+ assert(struct_type.layout != .@"packed");
return struct_type.offsets.get(ip)[index];
},
@@ -3208,7 +3208,7 @@ pub const Type = struct {
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- if (struct_type.layout == .Packed) return false;
+ if (struct_type.layout == .@"packed") return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
},
@@ -3230,7 +3230,7 @@ pub const Type = struct {
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
- if (struct_type.layout == .Packed) return false;
+ if (struct_type.layout == .@"packed") return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
},