aboutsummaryrefslogtreecommitdiff
path: root/src/Module.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/Module.zig')
-rw-r--r--src/Module.zig566
1 files changed, 192 insertions, 374 deletions
diff --git a/src/Module.zig b/src/Module.zig
index 9da3134917..a39d4da6c8 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -105,8 +105,6 @@ comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternP
/// To be eliminated in a future commit by moving more data into InternPool.
/// Current uses that must be eliminated:
-/// * Struct comptime_args
-/// * Struct optimized_order
/// * comptime pointer mutation
/// This memory lives until the Module is destroyed.
tmp_hack_arena: std.heap.ArenaAllocator,
@@ -678,14 +676,10 @@ pub const Decl = struct {
/// If the Decl owns its value and it is a struct, return it,
/// otherwise null.
- pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct {
- return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod));
- }
-
- pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
- if (!decl.owns_tv) return .none;
- if (decl.val.ip_index == .none) return .none;
- return mod.intern_pool.indexToStructType(decl.val.toIntern());
+ pub fn getOwnedStruct(decl: Decl, mod: *Module) ?InternPool.Key.StructType {
+ if (!decl.owns_tv) return null;
+ if (decl.val.ip_index == .none) return null;
+ return mod.typeToStruct(decl.val.toType());
}
/// If the Decl owns its value and it is a union, return it,
@@ -795,9 +789,10 @@ pub const Decl = struct {
return decl.getExternDecl(mod) != .none;
}
- pub fn getAlignment(decl: Decl, mod: *Module) u32 {
+ pub fn getAlignment(decl: Decl, mod: *Module) Alignment {
assert(decl.has_tv);
- return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
+ if (decl.alignment != .none) return decl.alignment;
+ return decl.ty.abiAlignment(mod);
}
};
@@ -806,218 +801,6 @@ pub const EmitH = struct {
fwd_decl: ArrayListUnmanaged(u8) = .{},
};
-pub const PropertyBoolean = enum { no, yes, unknown, wip };
-
-/// Represents the data that a struct declaration provides.
-pub const Struct = struct {
- /// Set of field names in declaration order.
- fields: Fields,
- /// Represents the declarations inside this struct.
- namespace: Namespace.Index,
- /// The Decl that corresponds to the struct itself.
- owner_decl: Decl.Index,
- /// Index of the struct_decl ZIR instruction.
- zir_index: Zir.Inst.Index,
- /// Indexes into `fields` sorted to be most memory efficient.
- optimized_order: ?[*]u32 = null,
- layout: std.builtin.Type.ContainerLayout,
- /// If the layout is not packed, this is the noreturn type.
- /// If the layout is packed, this is the backing integer type of the packed struct.
- /// Whether zig chooses this type or the user specifies it, it is stored here.
- /// This will be set to the noreturn type until status is `have_layout`.
- backing_int_ty: Type = Type.noreturn,
- status: enum {
- none,
- field_types_wip,
- have_field_types,
- layout_wip,
- have_layout,
- fully_resolved_wip,
- // The types and all its fields have had their layout resolved. Even through pointer,
- // which `have_layout` does not ensure.
- fully_resolved,
- },
- /// If true, has more than one possible value. However it may still be non-runtime type
- /// if it is a comptime-only type.
- /// If false, resolving the fields is necessary to determine whether the type has only
- /// one possible value.
- known_non_opv: bool,
- requires_comptime: PropertyBoolean = .unknown,
- have_field_inits: bool = false,
- is_tuple: bool,
- assumed_runtime_bits: bool = false,
-
- pub const Index = enum(u32) {
- _,
-
- pub fn toOptional(i: Index) OptionalIndex {
- return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
- }
- };
-
- pub const OptionalIndex = enum(u32) {
- none = std.math.maxInt(u32),
- _,
-
- pub fn init(oi: ?Index) OptionalIndex {
- return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
- }
-
- pub fn unwrap(oi: OptionalIndex) ?Index {
- if (oi == .none) return null;
- return @as(Index, @enumFromInt(@intFromEnum(oi)));
- }
- };
-
- pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
-
- /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
- pub const Field = struct {
- /// Uses `noreturn` to indicate `anytype`.
- /// undefined until `status` is >= `have_field_types`.
- ty: Type,
- /// Uses `none` to indicate no default.
- default_val: InternPool.Index,
- /// Zero means to use the ABI alignment of the type.
- abi_align: Alignment,
- /// undefined until `status` is `have_layout`.
- offset: u32,
- /// If true then `default_val` is the comptime field value.
- is_comptime: bool,
-
- /// Returns the field alignment. If the struct is packed, returns 0.
- /// Keep implementation in sync with `Sema.structFieldAlignment`.
- pub fn alignment(
- field: Field,
- mod: *Module,
- layout: std.builtin.Type.ContainerLayout,
- ) u32 {
- if (field.abi_align.toByteUnitsOptional()) |abi_align| {
- assert(layout != .Packed);
- return @as(u32, @intCast(abi_align));
- }
-
- const target = mod.getTarget();
-
- switch (layout) {
- .Packed => return 0,
- .Auto => {
- if (target.ofmt == .c) {
- return alignmentExtern(field, mod);
- } else {
- return field.ty.abiAlignment(mod);
- }
- },
- .Extern => return alignmentExtern(field, mod),
- }
- }
-
- pub fn alignmentExtern(field: Field, mod: *Module) u32 {
- // This logic is duplicated in Type.abiAlignmentAdvanced.
- const ty_abi_align = field.ty.abiAlignment(mod);
-
- if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
- // The C ABI requires 128 bit integer fields of structs
- // to be 16-bytes aligned.
- return @max(ty_abi_align, 16);
- }
-
- return ty_abi_align;
- }
- };
-
- /// Used in `optimized_order` to indicate field that is not present in the
- /// runtime version of the struct.
- pub const omitted_field = std.math.maxInt(u32);
-
- pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString {
- return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
- }
-
- pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
- return mod.declPtr(s.owner_decl).srcLoc(mod);
- }
-
- pub fn haveFieldTypes(s: Struct) bool {
- return switch (s.status) {
- .none,
- .field_types_wip,
- => false,
- .have_field_types,
- .layout_wip,
- .have_layout,
- .fully_resolved_wip,
- .fully_resolved,
- => true,
- };
- }
-
- pub fn haveLayout(s: Struct) bool {
- return switch (s.status) {
- .none,
- .field_types_wip,
- .have_field_types,
- .layout_wip,
- => false,
- .have_layout,
- .fully_resolved_wip,
- .fully_resolved,
- => true,
- };
- }
-
- pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
- assert(s.layout == .Packed);
- assert(s.haveLayout());
- var bit_sum: u64 = 0;
- for (s.fields.values(), 0..) |field, i| {
- if (i == index) {
- return @as(u16, @intCast(bit_sum));
- }
- bit_sum += field.ty.bitSize(mod);
- }
- unreachable; // index out of bounds
- }
-
- pub const RuntimeFieldIterator = struct {
- module: *Module,
- struct_obj: *const Struct,
- index: u32 = 0,
-
- pub const FieldAndIndex = struct {
- field: Field,
- index: u32,
- };
-
- pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
- const mod = it.module;
- while (true) {
- var i = it.index;
- it.index += 1;
- if (it.struct_obj.fields.count() <= i)
- return null;
-
- if (it.struct_obj.optimized_order) |some| {
- i = some[i];
- if (i == Module.Struct.omitted_field) return null;
- }
- const field = it.struct_obj.fields.values()[i];
-
- if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
- return FieldAndIndex{ .index = i, .field = field };
- }
- }
- }
- };
-
- pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
- return .{
- .struct_obj = s,
- .module = module,
- };
- }
-};
-
pub const DeclAdapter = struct {
mod: *Module,
@@ -2893,20 +2676,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
return mod.intern_pool.namespacePtr(index);
}
-pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
- return mod.intern_pool.structPtr(index);
-}
-
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null);
}
-/// This one accepts an index from the InternPool and asserts that it is not
-/// the anonymous empty struct type.
-pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
- return mod.structPtr(index.unwrap() orelse return null);
-}
-
/// Returns true if and only if the Decl is the top level struct associated with a File.
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
const decl = mod.declPtr(decl_index);
@@ -3351,11 +3124,11 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
if (!decl.owns_tv) continue;
- if (decl.getOwnedStruct(mod)) |struct_obj| {
- struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
+ if (decl.getOwnedStruct(mod)) |struct_type| {
+ struct_type.setZirIndex(ip, inst_map.get(struct_type.zir_index) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
- };
+ });
}
if (decl.getOwnedUnion(mod)) |union_type| {
@@ -3870,36 +3643,16 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const new_decl = mod.declPtr(new_decl_index);
errdefer @panic("TODO error handling");
- const struct_index = try mod.createStruct(.{
- .owner_decl = new_decl_index,
- .fields = .{},
- .zir_index = undefined, // set below
- .layout = .Auto,
- .status = .none,
- .known_non_opv = undefined,
- .is_tuple = undefined, // set below
- .namespace = new_namespace_index,
- });
- errdefer mod.destroyStruct(struct_index);
-
- const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
- .index = struct_index.toOptional(),
- .namespace = new_namespace_index.toOptional(),
- } });
- // TODO: figure out InternPool removals for incremental compilation
- //errdefer mod.intern_pool.remove(struct_ty);
-
- new_namespace.ty = struct_ty.toType();
file.root_decl = new_decl_index.toOptional();
new_decl.name = try file.fullyQualifiedName(mod);
+ new_decl.name_fully_qualified = true;
new_decl.src_line = 0;
new_decl.is_pub = true;
new_decl.is_exported = false;
new_decl.has_align = false;
new_decl.has_linksection_or_addrspace = false;
new_decl.ty = Type.type;
- new_decl.val = struct_ty.toValue();
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
@@ -3907,75 +3660,76 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
new_decl.analysis = .in_progress;
new_decl.generation = mod.generation;
- new_decl.name_fully_qualified = true;
- if (file.status == .success_zir) {
- assert(file.zir_loaded);
- const main_struct_inst = Zir.main_struct_inst;
- const struct_obj = mod.structPtr(struct_index);
- struct_obj.zir_index = main_struct_inst;
- const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
- const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
- struct_obj.is_tuple = small.is_tuple;
-
- var sema_arena = std.heap.ArenaAllocator.init(gpa);
- defer sema_arena.deinit();
- const sema_arena_allocator = sema_arena.allocator();
-
- var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
- defer comptime_mutable_decls.deinit();
-
- var sema: Sema = .{
- .mod = mod,
- .gpa = gpa,
- .arena = sema_arena_allocator,
- .code = file.zir,
- .owner_decl = new_decl,
- .owner_decl_index = new_decl_index,
- .func_index = .none,
- .func_is_naked = false,
- .fn_ret_ty = Type.void,
- .fn_ret_ty_ies = null,
- .owner_func_index = .none,
- .comptime_mutable_decls = &comptime_mutable_decls,
- };
- defer sema.deinit();
+ if (file.status != .success_zir) {
+ new_decl.analysis = .file_failure;
+ return;
+ }
+ assert(file.zir_loaded);
- if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
- for (comptime_mutable_decls.items) |decl_index| {
- const decl = mod.declPtr(decl_index);
- _ = try decl.internValue(mod);
- }
- new_decl.analysis = .complete;
- } else |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => {},
- }
+ var sema_arena = std.heap.ArenaAllocator.init(gpa);
+ defer sema_arena.deinit();
+ const sema_arena_allocator = sema_arena.allocator();
- if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
- const source = file.getSource(gpa) catch |err| {
- try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
- return error.AnalysisFail;
- };
+ var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
+ defer comptime_mutable_decls.deinit();
- const resolved_path = std.fs.path.resolve(
- gpa,
- if (file.pkg.root_src_directory.path) |pkg_path|
- &[_][]const u8{ pkg_path, file.sub_file_path }
- else
- &[_][]const u8{file.sub_file_path},
- ) catch |err| {
- try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
- return error.AnalysisFail;
- };
- errdefer gpa.free(resolved_path);
+ var sema: Sema = .{
+ .mod = mod,
+ .gpa = gpa,
+ .arena = sema_arena_allocator,
+ .code = file.zir,
+ .owner_decl = new_decl,
+ .owner_decl_index = new_decl_index,
+ .func_index = .none,
+ .func_is_naked = false,
+ .fn_ret_ty = Type.void,
+ .fn_ret_ty_ies = null,
+ .owner_func_index = .none,
+ .comptime_mutable_decls = &comptime_mutable_decls,
+ };
+ defer sema.deinit();
- mod.comp.whole_cache_manifest_mutex.lock();
- defer mod.comp.whole_cache_manifest_mutex.unlock();
- try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
- }
- } else {
- new_decl.analysis = .file_failure;
+ const main_struct_inst = Zir.main_struct_inst;
+ const struct_ty = sema.getStructType(
+ new_decl_index,
+ new_namespace_index,
+ main_struct_inst,
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ // TODO: figure out InternPool removals for incremental compilation
+ //errdefer ip.remove(struct_ty);
+ for (comptime_mutable_decls.items) |decl_index| {
+ const decl = mod.declPtr(decl_index);
+ _ = try decl.internValue(mod);
+ }
+
+ new_namespace.ty = struct_ty.toType();
+ new_decl.val = struct_ty.toValue();
+ new_decl.analysis = .complete;
+
+ if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
+ const source = file.getSource(gpa) catch |err| {
+ try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
+ return error.AnalysisFail;
+ };
+
+ const resolved_path = std.fs.path.resolve(
+ gpa,
+ if (file.pkg.root_src_directory.path) |pkg_path|
+ &[_][]const u8{ pkg_path, file.sub_file_path }
+ else
+ &[_][]const u8{file.sub_file_path},
+ ) catch |err| {
+ try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
+ return error.AnalysisFail;
+ };
+ errdefer gpa.free(resolved_path);
+
+ mod.comp.whole_cache_manifest_mutex.lock();
+ defer mod.comp.whole_cache_manifest_mutex.unlock();
+ try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
}
}
@@ -4057,12 +3811,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
if (mod.declIsRoot(decl_index)) {
const main_struct_inst = Zir.main_struct_inst;
- const struct_index = decl.getOwnedStructIndex(mod).unwrap().?;
- const struct_obj = mod.structPtr(struct_index);
- // This might not have gotten set in `semaFile` if the first time had
- // a ZIR failure, so we set it here in case.
- struct_obj.zir_index = main_struct_inst;
- try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
+ const struct_type = decl.getOwnedStruct(mod).?;
+ assert(struct_type.zir_index == main_struct_inst);
+ if (true) @panic("TODO");
+ // why did the code used to have this? I don't see how struct_type could have
+ // been created already without the analyzeStructDecl logic already called on it.
+ //try sema.analyzeStructDecl(decl, main_struct_inst, struct_type);
decl.analysis = .complete;
decl.generation = mod.generation;
return false;
@@ -5241,14 +4995,6 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
return mod.intern_pool.destroyNamespace(mod.gpa, index);
}
-pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
- return mod.intern_pool.createStruct(mod.gpa, initialization);
-}
-
-pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
- return mod.intern_pool.destroyStruct(mod.gpa, index);
-}
-
pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
@@ -6210,10 +5956,10 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
// type, we change it to 0 here. If this causes an assertion trip because the
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
- if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
- if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
- canon_info.flags.alignment = .none;
- }
+ if (info.flags.alignment != .none and have_elem_layout and
+ info.flags.alignment == info.child.toType().abiAlignment(mod))
+ {
+ canon_info.flags.alignment = .none;
}
switch (info.flags.vector_index) {
@@ -6483,7 +6229,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp()));
},
.lazy_align => |lazy_ty| {
- return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign);
+ return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
},
.lazy_size => |lazy_ty| {
return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @intFromBool(sign);
@@ -6639,20 +6385,30 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
-pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
+pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
- const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
- return mod.structPtr(struct_index);
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |t| t,
+ else => null,
+ };
+}
+
+pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
+ if (ty.ip_index == .none) return null;
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |t| if (t.layout == .Packed) t else null,
+ else => null,
+ };
}
/// This asserts that the union's enum tag type has been resolved.
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
- switch (ip.indexToKey(ty.ip_index)) {
- .union_type => |k| return ip.loadUnionType(k),
- else => return null,
- }
+ return switch (ip.indexToKey(ty.ip_index)) {
+ .union_type => |k| ip.loadUnionType(k),
+ else => null,
+ };
}
pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
@@ -6741,13 +6497,13 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
pub const UnionLayout = struct {
abi_size: u64,
- abi_align: u32,
+ abi_align: Alignment,
most_aligned_field: u32,
most_aligned_field_size: u64,
biggest_field: u32,
payload_size: u64,
- payload_align: u32,
- tag_align: u32,
+ payload_align: Alignment,
+ tag_align: Alignment,
tag_size: u64,
padding: u32,
};
@@ -6759,35 +6515,37 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var payload_size: u64 = 0;
- var payload_align: u32 = 0;
+ var payload_align: Alignment = .@"1";
for (u.field_types.get(ip), 0..) |field_ty, i| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = u.fieldAlign(ip, @intCast(i)).toByteUnitsOptional() orelse
+ const explicit_align = u.fieldAlign(ip, @intCast(i));
+ const field_align = if (explicit_align != .none)
+ explicit_align
+ else
field_ty.toType().abiAlignment(mod);
const field_size = field_ty.toType().abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(i);
}
- if (field_align > payload_align) {
- payload_align = @intCast(field_align);
+ if (field_align.compare(.gte, payload_align)) {
+ payload_align = field_align;
most_aligned_field = @intCast(i);
most_aligned_field_size = field_size;
}
}
- payload_align = @max(payload_align, 1);
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !u.enum_tag_ty.toType().hasRuntimeBits(mod)) {
return .{
- .abi_size = std.mem.alignForward(u64, payload_size, payload_align),
+ .abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
.payload_size = payload_size,
.payload_align = payload_align,
- .tag_align = 0,
+ .tag_align = .none,
.tag_size = 0,
.padding = 0,
};
@@ -6795,29 +6553,29 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.enum_tag_ty.toType().abiSize(mod);
- const tag_align = @max(1, u.enum_tag_ty.toType().abiAlignment(mod));
+ const tag_align = u.enum_tag_ty.toType().abiAlignment(mod).max(.@"1");
var size: u64 = 0;
var padding: u32 = undefined;
- if (tag_align >= payload_align) {
+ if (tag_align.compare(.gte, payload_align)) {
// {Tag, Payload}
size += tag_size;
- size = std.mem.alignForward(u64, size, payload_align);
+ size = payload_align.forward(size);
size += payload_size;
const prev_size = size;
- size = std.mem.alignForward(u64, size, tag_align);
- padding = @as(u32, @intCast(size - prev_size));
+ size = tag_align.forward(size);
+ padding = @intCast(size - prev_size);
} else {
// {Payload, Tag}
size += payload_size;
- size = std.mem.alignForward(u64, size, tag_align);
+ size = tag_align.forward(size);
size += tag_size;
const prev_size = size;
- size = std.mem.alignForward(u64, size, payload_align);
- padding = @as(u32, @intCast(size - prev_size));
+ size = payload_align.forward(size);
+ padding = @intCast(size - prev_size);
}
return .{
.abi_size = size,
- .abi_align = @max(tag_align, payload_align),
+ .abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
@@ -6834,17 +6592,16 @@ pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
}
/// Returns 0 if the union is represented with 0 bits at runtime.
-/// TODO: this returns alignment in byte units should should be a u64
-pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
+pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
- var max_align: u32 = 0;
+ var max_align: Alignment = .none;
if (have_tag) max_align = u.enum_tag_ty.toType().abiAlignment(mod);
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
- max_align = @max(max_align, field_align);
+ max_align = max_align.max(field_align);
}
return max_align;
}
@@ -6852,10 +6609,10 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
-/// TODO: this returns alignment in byte units should should be a u64
-pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) u32 {
+pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
- if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
+ const field_align = u.fieldAlign(ip, field_index);
+ if (field_align != .none) return field_align;
const field_ty = u.field_types.get(ip)[field_index].toType();
return field_ty.abiAlignment(mod);
}
@@ -6866,3 +6623,64 @@ pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value
const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
return enum_type.tagValueIndex(ip, enum_tag.toIntern());
}
+
+/// Returns the field alignment of a non-packed struct in byte units.
+/// Keep implementation in sync with `Sema.structFieldAlignment`.
+/// asserts the layout is not packed.
+pub fn structFieldAlignment(
+ mod: *Module,
+ explicit_alignment: InternPool.Alignment,
+ field_ty: Type,
+ layout: std.builtin.Type.ContainerLayout,
+) Alignment {
+ assert(layout != .Packed);
+ if (explicit_alignment != .none) return explicit_alignment;
+ switch (layout) {
+ .Packed => unreachable,
+ .Auto => {
+ if (mod.getTarget().ofmt == .c) {
+ return structFieldAlignmentExtern(mod, field_ty);
+ } else {
+ return field_ty.abiAlignment(mod);
+ }
+ },
+ .Extern => return structFieldAlignmentExtern(mod, field_ty),
+ }
+}
+
+/// Returns the field alignment of an extern struct in byte units.
+/// This logic is duplicated in Type.abiAlignmentAdvanced.
+pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
+ const ty_abi_align = field_ty.abiAlignment(mod);
+
+ if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
+ // The C ABI requires 128 bit integer fields of structs
+ // to be 16-bytes aligned.
+ return ty_abi_align.max(.@"16");
+ }
+
+ return ty_abi_align;
+}
+
+/// TODO: avoid linear search by storing these in trailing data of packed struct types
+/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing
+/// that one too.
+/// https://github.com/ziglang/zig/issues/17178
+pub fn structPackedFieldBitOffset(
+ mod: *Module,
+ struct_type: InternPool.Key.StructType,
+ field_index: usize,
+) u16 {
+ const ip = &mod.intern_pool;
+ assert(struct_type.layout == .Packed);
+ assert(struct_type.haveLayout(ip));
+ var bit_sum: u64 = 0;
+ for (0..struct_type.field_types.len) |i| {
+ if (i == field_index) {
+ return @intCast(bit_sum);
+ }
+ const field_ty = struct_type.field_types.get(ip)[i].toType();
+ bit_sum += field_ty.bitSize(mod);
+ }
+ unreachable; // index out of bounds
+}