aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig2712
-rw-r--r--src/codegen/c/type.zig336
-rw-r--r--src/codegen/llvm.zig4294
-rw-r--r--src/codegen/spirv.zig1906
-rw-r--r--src/codegen/spirv/Assembler.zig170
-rw-r--r--src/codegen/spirv/Cache.zig1046
-rw-r--r--src/codegen/spirv/Module.zig505
-rw-r--r--src/codegen/spirv/spec.zig2
-rw-r--r--src/codegen/spirv/type.zig567
9 files changed, 5843 insertions, 5695 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 86b74b1429..d06f01c2c3 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
+const InternPool = @import("../InternPool.zig");
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -256,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
return .{ .data = ident };
}
-/// This data is available when outputting .c code for a `*Module.Fn`.
+/// This data is available when outputting .c code for a `Module.Fn.Index`.
/// It is not available when generating .h file.
pub const Function = struct {
air: Air,
@@ -267,7 +268,7 @@ pub const Function = struct {
next_block_index: usize = 0,
object: Object,
lazy_fns: LazyFnMap,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
/// All the locals, to be emitted at the top of the function.
locals: std.ArrayListUnmanaged(Local) = .{},
/// Which locals are available for reuse, based on Type.
@@ -285,10 +286,11 @@ pub const Function = struct {
const gop = try f.value_map.getOrPut(inst);
if (gop.found_existing) return gop.value_ptr.*;
- const val = f.air.value(ref).?;
- const ty = f.air.typeOf(ref);
+ const mod = f.object.dg.module;
+ const val = (try f.air.value(ref, mod)).?;
+ const ty = f.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: {
+ const result: CValue = if (lowersToArray(ty, mod)) result: {
const writer = f.object.code_header.writer();
const alignment = 0;
const decl_c_value = try f.allocLocalValue(ty, alignment);
@@ -318,11 +320,11 @@ pub const Function = struct {
/// those which go into `allocs`. This function does not add the resulting local into `allocs`;
/// that responsibility lies with the caller.
fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
+ const mod = f.object.dg.module;
const gpa = f.object.dg.gpa;
- const target = f.object.dg.module.getTarget();
try f.locals.append(gpa, .{
.cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)),
+ .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
});
return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
}
@@ -336,10 +338,10 @@ pub const Function = struct {
/// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
/// not be used for persistent locals (i.e. those in `allocs`).
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
- const target = f.object.dg.module.getTarget();
+ const mod = f.object.dg.module;
if (f.free_locals_map.getPtr(.{
.cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)),
+ .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
})) |locals_list| {
if (locals_list.popOrNull()) |local_entry| {
return .{ .new_local = local_entry.key };
@@ -352,8 +354,9 @@ pub const Function = struct {
fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void {
switch (c_value) {
.constant => |inst| {
- const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const mod = f.object.dg.module;
+ const ty = f.typeOf(inst);
+ const val = (try f.air.value(inst, mod)).?;
return f.object.dg.renderValue(w, ty, val, location);
},
.undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location),
@@ -364,8 +367,9 @@ pub const Function = struct {
fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void {
switch (c_value) {
.constant => |inst| {
- const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const mod = f.object.dg.module;
+ const ty = f.typeOf(inst);
+ const val = (try f.air.value(inst, mod)).?;
try w.writeAll("(*");
try f.object.dg.renderValue(w, ty, val, .Other);
return w.writeByte(')');
@@ -377,8 +381,9 @@ pub const Function = struct {
fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
.constant => |inst| {
- const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const mod = f.object.dg.module;
+ const ty = f.typeOf(inst);
+ const val = (try f.air.value(inst, mod)).?;
try f.object.dg.renderValue(w, ty, val, .Other);
try w.writeByte('.');
return f.writeCValue(w, member, .Other);
@@ -390,8 +395,9 @@ pub const Function = struct {
fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
.constant => |inst| {
- const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const mod = f.object.dg.module;
+ const ty = f.typeOf(inst);
+ const val = (try f.air.value(inst, mod)).?;
try w.writeByte('(');
try f.object.dg.renderValue(w, ty, val, .Other);
try w.writeAll(")->");
@@ -446,6 +452,7 @@ pub const Function = struct {
var promoted = f.object.dg.ctypes.promote(gpa);
defer f.object.dg.ctypes.demote(promoted);
const arena = promoted.arena.allocator();
+ const mod = f.object.dg.module;
gop.value_ptr.* = .{
.fn_name = switch (key) {
@@ -454,12 +461,12 @@ pub const Function = struct {
.never_inline,
=> |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{
@tagName(key),
- fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)),
+ fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)),
@enumToInt(owner_decl),
}),
},
.data = switch (key) {
- .tag_name => .{ .tag_name = try data.tag_name.copy(arena) },
+ .tag_name => .{ .tag_name = data.tag_name },
.never_tail => .{ .never_tail = data.never_tail },
.never_inline => .{ .never_inline = data.never_inline },
},
@@ -480,6 +487,16 @@ pub const Function = struct {
f.object.dg.ctypes.deinit(gpa);
f.object.dg.fwd_decl.deinit();
}
+
+ fn typeOf(f: *Function, inst: Air.Inst.Ref) Type {
+ const mod = f.object.dg.module;
+ return f.air.typeOf(inst, &mod.intern_pool);
+ }
+
+ fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type {
+ const mod = f.object.dg.module;
+ return f.air.typeOfIndex(inst, &mod.intern_pool);
+ }
};
/// This data is available when outputting .c code for a `Module`.
@@ -508,8 +525,9 @@ pub const DeclGen = struct {
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
+ const mod = dg.module;
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(dg.decl.?);
+ const src_loc = src.toSrcLoc(dg.decl.?, mod);
dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args);
return error.AnalysisFail;
}
@@ -522,53 +540,28 @@ pub const DeclGen = struct {
decl_index: Decl.Index,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const decl = dg.module.declPtr(decl_index);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
assert(decl.has_tv);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) {
+ if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) {
return dg.writeCValue(writer, .{ .undef = ty });
}
// Chase function values in order to be able to reference the original function.
- inline for (.{ .function, .extern_fn }) |tag|
- if (decl.val.castTag(tag)) |func|
- if (func.data.owner_decl != decl_index)
- return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location);
+ if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index)
+ return dg.renderDeclValue(writer, ty, val, func.owner_decl, location);
+ if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index)
+ return dg.renderDeclValue(writer, ty, val, extern_func.decl, location);
- if (decl.val.castTag(.variable)) |var_payload|
- try dg.renderFwdDecl(decl_index, var_payload.data);
-
- if (ty.isSlice()) {
- if (location == .StaticInitializer) {
- try writer.writeByte('{');
- } else {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeAll("){ .ptr = ");
- }
-
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer);
-
- var len_pl: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = val.sliceLen(dg.module),
- };
- const len_val = Value.initPayload(&len_pl.base);
-
- if (location == .StaticInitializer) {
- return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
- } else {
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
- }
- }
+ if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
- const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module);
+ const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.ty, mod);
if (need_typecast) {
try writer.writeAll("((");
try dg.renderType(writer, ty);
@@ -579,127 +572,126 @@ pub const DeclGen = struct {
if (need_typecast) try writer.writeByte(')');
}
- // Renders a "parent" pointer by recursing to the root decl/variable
- // that its contents are defined with respect to.
- //
- // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr
- fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void {
- if (!ptr_ty.isSlice()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ptr_ty);
- try writer.writeByte(')');
- }
- switch (ptr_val.tag()) {
- .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}),
- .decl_ref_mut, .decl_ref, .variable => {
- const decl_index = switch (ptr_val.tag()) {
- .decl_ref => ptr_val.castTag(.decl_ref).?.data,
- .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index,
- .variable => ptr_val.castTag(.variable).?.data.owner_decl,
+ /// Renders a "parent" pointer by recursing to the root decl/variable
+ /// that its contents are defined with respect to.
+ fn renderParentPtr(
+ dg: *DeclGen,
+ writer: anytype,
+ ptr_val: InternPool.Index,
+ location: ValueRenderLocation,
+ ) error{ OutOfMemory, AnalysisFail }!void {
+ const mod = dg.module;
+ const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType();
+ const ptr_cty = try dg.typeToIndex(ptr_ty, .complete);
+ const ptr = mod.intern_pool.indexToKey(ptr_val).ptr;
+ switch (ptr.addr) {
+ .decl, .mut_decl => try dg.renderDeclValue(
+ writer,
+ ptr_ty,
+ ptr_val.toValue(),
+ switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
+ else => unreachable,
+ },
+ location,
+ ),
+ .int => |int| {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_cty);
+ try writer.print("){x}", .{try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other)});
+ },
+ .eu_payload, .opt_payload => |base| {
+ const ptr_base_ty = mod.intern_pool.typeOf(base).toType();
+ const base_ty = ptr_base_ty.childType(mod);
+ // Ensure complete type definition is visible before accessing fields.
+ _ = try dg.typeToIndex(base_ty, .complete);
+ const payload_ty = switch (ptr.addr) {
+ .eu_payload => base_ty.errorUnionPayload(mod),
+ .opt_payload => base_ty.optionalChild(mod),
else => unreachable,
};
- try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location);
+ const ptr_payload_ty = try mod.adjustPtrTypeChild(ptr_base_ty, payload_ty);
+ const ptr_payload_cty = try dg.typeToIndex(ptr_payload_ty, .complete);
+ if (ptr_cty != ptr_payload_cty) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_cty);
+ try writer.writeByte(')');
+ }
+ try writer.writeAll("&(");
+ try dg.renderParentPtr(writer, base, location);
+ try writer.writeAll(")->payload");
},
- .field_ptr => {
- const target = dg.module.getTarget();
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
-
+ .elem => |elem| {
+ const ptr_base_ty = mod.intern_pool.typeOf(elem.base).toType();
+ const elem_ty = ptr_base_ty.elemType2(mod);
+ const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty);
+ const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete);
+ if (ptr_cty != ptr_elem_cty) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_cty);
+ try writer.writeByte(')');
+ }
+ try writer.writeAll("&(");
+ if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One)
+ try writer.writeByte('*');
+ try dg.renderParentPtr(writer, elem.base, location);
+ try writer.print(")[{d}]", .{elem.index});
+ },
+ .field => |field| {
+ const ptr_base_ty = mod.intern_pool.typeOf(field.base).toType();
+ const base_ty = ptr_base_ty.childType(mod);
// Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(field_ptr.container_ty, .complete);
-
- var container_ptr_pl = ptr_ty.ptrInfo();
- container_ptr_pl.data.pointee_type = field_ptr.container_ty;
- const container_ptr_ty = Type.initPayload(&container_ptr_pl.base);
-
- switch (fieldLocation(
- field_ptr.container_ty,
- ptr_ty,
- @intCast(u32, field_ptr.field_index),
- target,
- )) {
- .begin => try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- ),
- .field => |field| {
+ _ = try dg.typeToIndex(base_ty, .complete);
+ const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
+ .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod),
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .One, .Many, .C => unreachable,
+ .Slice => switch (field.index) {
+ Value.slice_ptr_index => base_ty.slicePtrFieldType(mod),
+ Value.slice_len_index => Type.usize,
+ else => unreachable,
+ },
+ },
+ else => unreachable,
+ };
+ const ptr_field_ty = try mod.adjustPtrTypeChild(ptr_base_ty, field_ty);
+ const ptr_field_cty = try dg.typeToIndex(ptr_field_ty, .complete);
+ if (ptr_cty != ptr_field_cty) {
+ try writer.writeByte('(');
+ try dg.renderCType(writer, ptr_cty);
+ try writer.writeByte(')');
+ }
+ switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) {
+ .begin => try dg.renderParentPtr(writer, field.base, location),
+ .field => |name| {
try writer.writeAll("&(");
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.writeAll(")->");
- try dg.writeCValue(writer, field);
+ try dg.writeCValue(writer, name);
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = ptr_ty.ptrInfo();
- u8_ptr_pl.data.pointee_type = Type.u8;
- const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
-
- var byte_offset_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = byte_offset,
- };
- const byte_offset_val = Value.initPayload(&byte_offset_pl.base);
+ const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8);
+ const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
try writer.writeAll("((");
try dg.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.print(" + {})", .{
try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
});
},
.end => {
try writer.writeAll("((");
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.print(") + {})", .{
- try dg.fmtIntLiteral(Type.usize, Value.one, .Other),
+ try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other),
});
},
}
},
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- var elem_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = elem_ptr.elem_ty,
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
-
- try writer.writeAll("&(");
- try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location);
- try writer.print(")[{d}]", .{elem_ptr.index});
- },
- .opt_payload_ptr, .eu_payload_ptr => {
- const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
- var container_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = payload_ptr.container_ty,
- };
- const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base);
-
- // Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(payload_ptr.container_ty, .complete);
-
- try writer.writeAll("&(");
- try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location);
- try writer.writeAll(")->payload");
- },
- else => unreachable,
+ .comptime_field => unreachable,
}
}
@@ -710,23 +702,25 @@ pub const DeclGen = struct {
arg_val: Value,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
+ const mod = dg.module;
var val = arg_val;
- if (val.castTag(.runtime_value)) |rt| {
- val = rt.data;
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
}
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
const initializer_type: ValueRenderLocation = switch (location) {
.StaticInitializer => .StaticInitializer,
else => .Initializer,
};
- const safety_on = switch (dg.module.optimizeMode()) {
+ const safety_on = switch (mod.optimizeMode()) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
};
- if (val.isUndefDeep()) {
- switch (ty.zigTypeTag()) {
+ if (val.isUndefDeep(mod)) {
+ switch (ty.zigTypeTag(mod)) {
.Bool => {
if (safety_on) {
return writer.writeAll("0xaa");
@@ -737,8 +731,8 @@ pub const DeclGen = struct {
.Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
- var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits };
- const repr_ty = Type.initPayload(&repr_pl.base);
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
try writer.writeAll("zig_cast_");
try dg.renderTypeForBuiltinFnName(writer, ty);
@@ -757,7 +751,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument);
return writer.writeByte(')');
},
- .Pointer => if (ty.isSlice()) {
+ .Pointer => if (ty.isSlice(mod)) {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
@@ -765,8 +759,7 @@ pub const DeclGen = struct {
}
try writer.writeAll("{(");
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&buf);
+ const ptr_ty = ty.slicePtrFieldType(mod);
try dg.renderType(writer, ptr_ty);
return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
} else {
@@ -775,14 +768,13 @@ pub const DeclGen = struct {
return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
+ const payload_ty = ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, Type.bool, val, location);
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return dg.renderValue(writer, payload_ty, val, location);
}
@@ -798,7 +790,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, Type.bool, val, initializer_type);
return writer.writeAll(" }");
},
- .Struct => switch (ty.containerLayout()) {
+ .Struct => switch (ty.containerLayout(mod)) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
@@ -808,10 +800,10 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
- for (0..ty.structFieldCount()) |field_i| {
- if (ty.structFieldIsComptime(field_i)) continue;
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBits()) continue;
+ for (0..ty.structFieldCount(mod)) |field_i| {
+ if (ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBits(mod)) continue;
if (!empty) try writer.writeByte(',');
try dg.renderValue(writer, field_ty, val, initializer_type);
@@ -831,29 +823,29 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- if (ty.unionTagTypeSafety()) |tag_ty| {
- const layout = ty.unionGetLayout(target);
+ if (ty.unionTagTypeSafety(mod)) |tag_ty| {
+ const layout = ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
try dg.renderValue(writer, tag_ty, val, initializer_type);
}
- if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}');
+ if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
try writer.writeAll(" .payload = {");
}
- for (ty.unionFields().values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ for (ty.unionFields(mod).values()) |field| {
+ if (!field.ty.hasRuntimeBits(mod)) continue;
try dg.renderValue(writer, field.ty, val, initializer_type);
break;
}
- if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
+ if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
return writer.writeByte('}');
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- const error_ty = ty.errorUnionSet();
+ const payload_ty = ty.errorUnionPayload(mod);
+ const error_ty = ty.errorUnionSet(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, error_ty, val, location);
}
@@ -870,11 +862,11 @@ pub const DeclGen = struct {
return writer.writeAll(" }");
},
.Array, .Vector => {
- const ai = ty.arrayInfo();
- if (ai.elem_type.eql(Type.u8, dg.module)) {
+ const ai = ty.arrayInfo(mod);
+ if (ai.elem_type.eql(Type.u8, mod)) {
var literal = stringLiteral(writer);
try literal.start();
- const c_len = ty.arrayLenIncludingSentinel();
+ const c_len = ty.arrayLenIncludingSentinel(mod);
var index: u64 = 0;
while (index < c_len) : (index += 1)
try literal.writeChar(0xaa);
@@ -887,11 +879,11 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- const c_len = ty.arrayLenIncludingSentinel();
+ const c_len = ty.arrayLenIncludingSentinel(mod);
var index: u64 = 0;
while (index < c_len) : (index += 1) {
if (index > 0) try writer.writeAll(", ");
- try dg.renderValue(writer, ty.childType(), val, initializer_type);
+ try dg.renderValue(writer, ty.childType(mod), val, initializer_type);
}
return writer.writeByte('}');
}
@@ -916,23 +908,129 @@ pub const DeclGen = struct {
}
unreachable;
}
- switch (ty.zigTypeTag()) {
- .Int => switch (val.tag()) {
- .field_ptr,
- .elem_ptr,
- .opt_payload_ptr,
- .eu_payload_ptr,
- .decl_ref_mut,
- .decl_ref,
- => try dg.renderParentPtr(writer, val, ty, location),
- else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
+
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ // types, not values
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ // memoization, not values
+ .memoized_call,
+ => unreachable,
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ // non-runtime values
+ .undefined => unreachable,
+ .void => unreachable,
+ .null => unreachable,
+ .empty_struct => unreachable,
+ .@"unreachable" => unreachable,
+ .generic_poison => unreachable,
+
+ .false => try writer.writeAll("false"),
+ .true => try writer.writeAll("true"),
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ .empty_enum_value,
+ => unreachable, // non-runtime values
+ .int => |int| switch (int.storage) {
+ .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
+ .lazy_align, .lazy_size => {
+ try writer.writeAll("((");
+ try dg.renderType(writer, ty);
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
+ },
},
- .Float => {
+ .err => |err| try writer.print("zig_error_{}", .{
+ fmtIdent(mod.intern_pool.stringToSlice(err.name)),
+ }),
+ .error_union => |error_union| {
+ const payload_ty = ty.errorUnionPayload(mod);
+ const error_ty = ty.errorUnionSet(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ switch (error_union.val) {
+ .err_name => |err_name| return dg.renderValue(
+ writer,
+ error_ty,
+ (try mod.intern(.{ .err = .{
+ .ty = error_ty.toIntern(),
+ .name = err_name,
+ } })).toValue(),
+ location,
+ ),
+ .payload => return dg.renderValue(
+ writer,
+ Type.err_int,
+ try mod.intValue(Type.err_int, 0),
+ location,
+ ),
+ }
+ }
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{ .payload = ");
+ try dg.renderValue(
+ writer,
+ payload_ty,
+ switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .payload => |payload| payload,
+ }.toValue(),
+ initializer_type,
+ );
+ try writer.writeAll(", .error = ");
+ switch (error_union.val) {
+ .err_name => |err_name| try dg.renderValue(
+ writer,
+ error_ty,
+ (try mod.intern(.{ .err = .{
+ .ty = error_ty.toIntern(),
+ .name = err_name,
+ } })).toValue(),
+ location,
+ ),
+ .payload => try dg.renderValue(
+ writer,
+ Type.err_int,
+ try mod.intValue(Type.err_int, 0),
+ location,
+ ),
+ }
+ try writer.writeAll(" }");
+ },
+ .enum_tag => {
+ const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location);
+ },
+ .float => {
const bits = ty.floatBits(target);
- const f128_val = val.toFloat(f128);
+ const f128_val = val.toFloat(f128, mod);
- var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits };
- const repr_ty = Type.initPayload(&repr_ty_pl.base);
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
assert(bits <= 128);
var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
@@ -943,21 +1041,15 @@ pub const DeclGen = struct {
};
switch (bits) {
- 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))),
- 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))),
- 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))),
- 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))),
+ 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
+ 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
+ 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
+ 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
128 => repr_val_big.set(@bitCast(u128, f128_val)),
else => unreachable,
}
- var repr_val_pl = Value.Payload.BigInt{
- .base = .{
- .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative,
- },
- .data = repr_val_big.limbs[0..repr_val_big.len],
- };
- const repr_val = Value.initPayload(&repr_val_pl.base);
+ const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst());
try writer.writeAll("zig_cast_");
try dg.renderTypeForBuiltinFnName(writer, ty);
@@ -968,10 +1060,10 @@ pub const DeclGen = struct {
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
switch (bits) {
- 16 => try writer.print("{x}", .{val.toFloat(f16)}),
- 32 => try writer.print("{x}", .{val.toFloat(f32)}),
- 64 => try writer.print("{x}", .{val.toFloat(f64)}),
- 80 => try writer.print("{x}", .{val.toFloat(f80)}),
+ 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}),
+ 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}),
+ 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}),
+ 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}),
128 => try writer.print("{x}", .{f128_val}),
else => unreachable,
}
@@ -1011,10 +1103,10 @@ pub const DeclGen = struct {
if (std.math.isNan(f128_val)) switch (bits) {
// We only actually need to pass the significand, but it will get
// properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}),
- 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}),
- 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}),
- 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}),
+ 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
+ 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
+ 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
+ 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
else => unreachable,
};
@@ -1023,173 +1115,80 @@ pub const DeclGen = struct {
}
try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)});
if (!empty) try writer.writeByte(')');
- return;
},
- .Pointer => switch (val.tag()) {
- .null_value, .zero => if (ty.isSlice()) {
- var slice_pl = Value.Payload.Slice{
- .base = .{ .tag = .slice },
- .data = .{ .ptr = val, .len = Value.undef },
- };
- const slice_val = Value.initPayload(&slice_pl.base);
-
- return dg.renderValue(writer, ty, slice_val, location);
- } else {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- try writer.writeAll(")NULL)");
- },
- .variable => {
- const decl = val.castTag(.variable).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
- },
- .slice => {
+ .ptr => |ptr| {
+ if (ptr.len != .none) {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
-
- const slice = val.castTag(.slice).?.data;
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
-
try writer.writeByte('{');
- try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type);
- try writer.writeAll(", ");
- try dg.renderValue(writer, Type.usize, slice.len, initializer_type);
- try writer.writeByte('}');
- },
- .function => {
- const func = val.castTag(.function).?.data;
- try dg.renderDeclName(writer, func.owner_decl, 0);
- },
- .extern_fn => {
- const extern_fn = val.castTag(.extern_fn).?.data;
- try dg.renderDeclName(writer, extern_fn.owner_decl, 0);
- },
- .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
- },
- .field_ptr,
- .elem_ptr,
- .opt_payload_ptr,
- .eu_payload_ptr,
- .decl_ref_mut,
- .decl_ref,
- => try dg.renderParentPtr(writer, val, ty, location),
- else => unreachable,
- },
- .Array, .Vector => {
- if (location == .FunctionArgument) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
}
-
- // First try specific tag representations for more efficiency.
- switch (val.tag()) {
- .undef, .empty_struct_value, .empty_array => {
- const ai = ty.arrayInfo();
- try writer.writeByte('{');
- if (ai.sentinel) |s| {
- try dg.renderValue(writer, ai.elem_type, s, initializer_type);
- } else {
- try writer.writeByte('0');
- }
- try writer.writeByte('}');
- },
- .bytes, .str_lit => |t| {
- const bytes = switch (t) {
- .bytes => val.castTag(.bytes).?.data,
- .str_lit => bytes: {
- const str_lit = val.castTag(.str_lit).?.data;
- break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- },
+ const ptr_location = switch (ptr.len) {
+ .none => location,
+ else => initializer_type,
+ };
+ const ptr_ty = switch (ptr.len) {
+ .none => ty,
+ else => ty.slicePtrFieldType(mod),
+ };
+ const ptr_val = switch (ptr.len) {
+ .none => val,
+ else => val.slicePtr(mod),
+ };
+ switch (ptr.addr) {
+ .decl, .mut_decl => try dg.renderDeclValue(
+ writer,
+ ptr_ty,
+ ptr_val,
+ switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
else => unreachable,
- };
- const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null;
- try writer.print("{s}", .{
- fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel),
+ },
+ ptr_location,
+ ),
+ .int => |int| {
+ try writer.writeAll("((");
+ try dg.renderType(writer, ptr_ty);
+ try writer.print("){x})", .{
+ try dg.fmtIntLiteral(Type.usize, int.toValue(), ptr_location),
});
},
- else => {
- // Fall back to generic implementation.
- var arena = std.heap.ArenaAllocator.init(dg.gpa);
- defer arena.deinit();
- const arena_allocator = arena.allocator();
-
- // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal
- const max_string_initializer_len = 65535;
-
- const ai = ty.arrayInfo();
- if (ai.elem_type.eql(Type.u8, dg.module)) {
- if (ai.len <= max_string_initializer_len) {
- var literal = stringLiteral(writer);
- try literal.start();
- var index: usize = 0;
- while (index < ai.len) : (index += 1) {
- const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
- try literal.writeChar(elem_val_u8);
- }
- if (ai.sentinel) |s| {
- const s_u8 = @intCast(u8, s.toUnsignedInt(target));
- if (s_u8 != 0) try literal.writeChar(s_u8);
- }
- try literal.end();
- } else {
- try writer.writeByte('{');
- var index: usize = 0;
- while (index < ai.len) : (index += 1) {
- if (index != 0) try writer.writeByte(',');
- const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
- try writer.print("'\\x{x}'", .{elem_val_u8});
- }
- if (ai.sentinel) |s| {
- if (index != 0) try writer.writeByte(',');
- try dg.renderValue(writer, ai.elem_type, s, initializer_type);
- }
- try writer.writeByte('}');
- }
- } else {
- try writer.writeByte('{');
- var index: usize = 0;
- while (index < ai.len) : (index += 1) {
- if (index != 0) try writer.writeByte(',');
- const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type);
- }
- if (ai.sentinel) |s| {
- if (index != 0) try writer.writeByte(',');
- try dg.renderValue(writer, ai.elem_type, s, initializer_type);
- }
- try writer.writeByte('}');
- }
- },
+ .eu_payload,
+ .opt_payload,
+ .elem,
+ .field,
+ => try dg.renderParentPtr(writer, ptr_val.ip_index, ptr_location),
+ .comptime_field => unreachable,
}
- },
- .Bool => {
- if (val.toBool()) {
- return writer.writeAll("true");
- } else {
- return writer.writeAll("false");
+ if (ptr.len != .none) {
+ try writer.writeAll(", ");
+ try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type);
+ try writer.writeByte('}');
}
},
- .Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
+ .opt => |opt| {
+ const payload_ty = ty.optionalChild(mod);
- const is_null_val = Value.makeBool(val.tag() == .null_value);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ const is_null_val = Value.makeBool(opt.val == .none);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
return dg.renderValue(writer, Type.bool, is_null_val, location);
- if (ty.optionalReprIsPayload()) {
- const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val;
- return dg.renderValue(writer, payload_ty, payload_val, location);
- }
+ if (ty.optionalReprIsPayload(mod)) return dg.renderValue(
+ writer,
+ payload_ty,
+ switch (opt.val) {
+ .none => switch (payload_ty.zigTypeTag(mod)) {
+ .ErrorSet => try mod.intValue(Type.err_int, 0),
+ .Pointer => try mod.getCoerced(val, payload_ty),
+ else => unreachable,
+ },
+ else => |payload| payload.toValue(),
+ },
+ location,
+ );
if (!location.isInitializer()) {
try writer.writeByte('(');
@@ -1197,93 +1196,74 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
- const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef;
-
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
+ try dg.renderValue(writer, payload_ty, switch (opt.val) {
+ .none => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ else => |payload| payload,
+ }.toValue(), initializer_type);
try writer.writeAll(", .is_null = ");
try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
try writer.writeAll(" }");
},
- .ErrorSet => {
- if (val.castTag(.@"error")) |error_pl| {
- // Error values are already defined by genErrDecls.
- try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)});
- } else {
- try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)});
- }
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- const error_ty = ty.errorUnionSet();
- const error_val = if (val.errorUnionIsPayload()) Value.zero else val;
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type, .vector_type => {
+ if (location == .FunctionArgument) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+ // Fall back to generic implementation.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- return dg.renderValue(writer, error_ty, error_val, location);
- }
+ // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal
+ const max_string_initializer_len = 65535;
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
- try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
- try writer.writeAll(", .error = ");
- try dg.renderValue(writer, error_ty, error_val, initializer_type);
- try writer.writeAll(" }");
- },
- .Enum => {
- switch (val.tag()) {
- .enum_field_index => {
- const field_index = val.castTag(.enum_field_index).?.data;
- switch (ty.tag()) {
- .enum_simple => return writer.print("{d}", .{field_index}),
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- const tag_val = enum_full.values.keys()[field_index];
- return dg.renderValue(writer, enum_full.tag_ty, tag_val, location);
- } else {
- return writer.print("{d}", .{field_index});
- }
- },
- .enum_numbered => {
- const enum_obj = ty.castTag(.enum_numbered).?.data;
- if (enum_obj.values.count() != 0) {
- const tag_val = enum_obj.values.keys()[field_index];
- return dg.renderValue(writer, enum_obj.tag_ty, tag_val, location);
- } else {
- return writer.print("{d}", .{field_index});
- }
- },
- else => unreachable,
+ const ai = ty.arrayInfo(mod);
+ if (ai.elem_type.eql(Type.u8, mod)) {
+ if (ai.len <= max_string_initializer_len) {
+ var literal = stringLiteral(writer);
+ try literal.start();
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ const elem_val = try val.elemValue(mod, index);
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ try literal.writeChar(elem_val_u8);
+ }
+ if (ai.sentinel) |s| {
+ const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
+ if (s_u8 != 0) try literal.writeChar(s_u8);
+ }
+ try literal.end();
+ } else {
+ try writer.writeByte('{');
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ if (index != 0) try writer.writeByte(',');
+ const elem_val = try val.elemValue(mod, index);
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ try writer.print("'\\x{x}'", .{elem_val_u8});
+ }
+ if (ai.sentinel) |s| {
+ if (index != 0) try writer.writeByte(',');
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ }
+ try writer.writeByte('}');
}
- },
- else => {
- var int_tag_ty_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_ty_buffer);
- return dg.renderValue(writer, int_tag_ty, val, location);
- },
- }
- },
- .Fn => switch (val.tag()) {
- .function => {
- const decl = val.castTag(.function).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
- },
- .extern_fn => {
- const decl = val.castTag(.extern_fn).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
+ } else {
+ try writer.writeByte('{');
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ if (index != 0) try writer.writeByte(',');
+ const elem_val = try val.elemValue(mod, index);
+ try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type);
+ }
+ if (ai.sentinel) |s| {
+ if (index != 0) try writer.writeByte(',');
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ }
+ try writer.writeByte('}');
+ }
},
- else => unreachable,
- },
- .Struct => switch (ty.containerLayout()) {
- .Auto, .Extern => {
- const field_vals = val.castTag(.aggregate).?.data;
-
+ .anon_struct_type => |tuple| {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
@@ -1292,133 +1272,184 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
- for (field_vals, 0..) |field_val, field_i| {
- if (ty.structFieldIsComptime(field_i)) continue;
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ for (tuple.types, tuple.values, 0..) |field_ty, comptime_ty, field_i| {
+ if (comptime_ty != .none) continue;
+ if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
- try dg.renderValue(writer, field_ty, field_val, initializer_type);
+
+ const field_val = switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{
+ .ty = field_ty,
+ .storage = .{ .u64 = bytes[field_i] },
+ } }),
+ .elems => |elems| elems[field_i],
+ .repeated_elem => |elem| elem,
+ };
+ try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type);
empty = false;
}
try writer.writeByte('}');
},
- .Packed => {
- const field_vals = val.castTag(.aggregate).?.data;
- const int_info = ty.intInfo(target);
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ switch (struct_obj.layout) {
+ .Auto, .Extern => {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ try writer.writeByte('{');
+ var empty = true;
+ for (struct_obj.fields.values(), 0..) |field, field_i| {
+ if (field.is_comptime) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ if (!empty) try writer.writeByte(',');
+ const field_val = switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{
+ .ty = field.ty.toIntern(),
+ .storage = .{ .u64 = bytes[field_i] },
+ } }),
+ .elems => |elems| elems[field_i],
+ .repeated_elem => |elem| elem,
+ };
+ try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type);
+
+ empty = false;
+ }
+ try writer.writeByte('}');
+ },
+ .Packed => {
+ const int_info = ty.intInfo(mod);
- var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 };
- const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
+ const bits = Type.smallestUnsignedBits(int_info.bits - 1);
+ const bit_offset_ty = try mod.intType(.unsigned, bits);
- var eff_num_fields: usize = 0;
- for (0..field_vals.len) |field_i| {
- if (ty.structFieldIsComptime(field_i)) continue;
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ var bit_offset: u64 = 0;
+ var eff_num_fields: usize = 0;
- eff_num_fields += 1;
- }
+ for (struct_obj.fields.values()) |field| {
+ if (field.is_comptime) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (eff_num_fields == 0) {
- try writer.writeByte('(');
- try dg.renderValue(writer, ty, Value.undef, initializer_type);
- try writer.writeByte(')');
- } else if (ty.bitSize(target) > 64) {
- // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
- var num_or = eff_num_fields - 1;
- while (num_or > 0) : (num_or -= 1) {
- try writer.writeAll("zig_or_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
- try writer.writeByte('(');
- }
+ eff_num_fields += 1;
+ }
- var eff_index: usize = 0;
- var needs_closing_paren = false;
- for (field_vals, 0..) |field_val, field_i| {
- if (ty.structFieldIsComptime(field_i)) continue;
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
-
- const cast_context = IntCastContext{ .value = .{ .value = field_val } };
- if (bit_offset_val_pl.data != 0) {
- try writer.writeAll("zig_shl_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
+ if (eff_num_fields == 0) {
try writer.writeByte('(');
- try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
- try writer.writeAll(", ");
- try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
- } else {
- try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
- }
-
- if (needs_closing_paren) try writer.writeByte(')');
- if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
-
- bit_offset_val_pl.data += field_ty.bitSize(target);
- needs_closing_paren = true;
- eff_index += 1;
- }
- } else {
- try writer.writeByte('(');
- // a << a_off | b << b_off | c << c_off
- var empty = true;
- for (field_vals, 0..) |field_val, field_i| {
- if (ty.structFieldIsComptime(field_i)) continue;
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
-
- if (!empty) try writer.writeAll(" | ");
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
+ } else if (ty.bitSize(mod) > 64) {
+ // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
+ var num_or = eff_num_fields - 1;
+ while (num_or > 0) : (num_or -= 1) {
+ try writer.writeAll("zig_or_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ }
- if (bit_offset_val_pl.data != 0) {
- try dg.renderValue(writer, field_ty, field_val, .Other);
- try writer.writeAll(" << ");
- try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ var eff_index: usize = 0;
+ var needs_closing_paren = false;
+ for (struct_obj.fields.values(), 0..) |field, field_i| {
+ if (field.is_comptime) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const field_val = switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{
+ .ty = field.ty.toIntern(),
+ .storage = .{ .u64 = bytes[field_i] },
+ } }),
+ .elems => |elems| elems[field_i],
+ .repeated_elem => |elem| elem,
+ };
+ const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
+ if (bit_offset != 0) {
+ try writer.writeAll("zig_shl_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
+ try writer.writeAll(", ");
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try writer.writeByte(')');
+ } else {
+ try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
+ }
+
+ if (needs_closing_paren) try writer.writeByte(')');
+ if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
+
+ bit_offset += field.ty.bitSize(mod);
+ needs_closing_paren = true;
+ eff_index += 1;
+ }
} else {
- try dg.renderValue(writer, field_ty, field_val, .Other);
+ try writer.writeByte('(');
+ // a << a_off | b << b_off | c << c_off
+ var empty = true;
+ for (struct_obj.fields.values(), 0..) |field, field_i| {
+ if (field.is_comptime) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ if (!empty) try writer.writeAll(" | ");
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+
+ const field_val = switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{
+ .ty = field.ty.toIntern(),
+ .storage = .{ .u64 = bytes[field_i] },
+ } }),
+ .elems => |elems| elems[field_i],
+ .repeated_elem => |elem| elem,
+ };
+
+ if (bit_offset != 0) {
+ try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
+ try writer.writeAll(" << ");
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ } else {
+ try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
+ }
+
+ bit_offset += field.ty.bitSize(mod);
+ empty = false;
+ }
+ try writer.writeByte(')');
}
-
- bit_offset_val_pl.data += field_ty.bitSize(target);
- empty = false;
- }
- try writer.writeByte(')');
+ },
}
},
+ else => unreachable,
},
- .Union => {
- const union_obj = val.castTag(.@"union").?.data;
-
+ .un => |un| {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
- const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?;
- const field_ty = ty.unionFields().values()[field_i].ty;
- const field_name = ty.unionFields().keys()[field_i];
- if (ty.containerLayout() == .Packed) {
- if (field_ty.hasRuntimeBits()) {
- if (field_ty.isPtrAtRuntime()) {
+ const field_i = ty.unionTagFieldIndex(un.tag.toValue(), mod).?;
+ const field_ty = ty.unionFields(mod).values()[field_i].ty;
+ const field_name = ty.unionFields(mod).keys()[field_i];
+ if (ty.containerLayout(mod) == .Packed) {
+ if (field_ty.hasRuntimeBits(mod)) {
+ if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
- } else if (field_ty.zigTypeTag() == .Float) {
+ } else if (field_ty.zigTypeTag(mod) == .Float) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, field_ty, union_obj.val, initializer_type);
+ try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type);
} else {
try writer.writeAll("0");
}
@@ -1426,44 +1457,28 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- if (ty.unionTagTypeSafety()) |tag_ty| {
- const layout = ty.unionGetLayout(target);
+ if (ty.unionTagTypeSafety(mod)) |tag_ty| {
+ const layout = ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
- try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type);
+ try dg.renderValue(writer, tag_ty, un.tag.toValue(), initializer_type);
}
- if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}');
+ if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
try writer.writeAll(" .payload = {");
}
- if (field_ty.hasRuntimeBits()) {
- try writer.print(" .{ } = ", .{fmtIdent(field_name)});
- try dg.renderValue(writer, field_ty, union_obj.val, initializer_type);
+ if (field_ty.hasRuntimeBits(mod)) {
+ try writer.print(" .{ } = ", .{fmtIdent(mod.intern_pool.stringToSlice(field_name))});
+ try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type);
try writer.writeByte(' ');
- } else for (ty.unionFields().values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ } else for (ty.unionFields(mod).values()) |field| {
+ if (!field.ty.hasRuntimeBits(mod)) continue;
try dg.renderValue(writer, field.ty, Value.undef, initializer_type);
break;
}
- if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}');
+ if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
try writer.writeByte('}');
},
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- .Frame,
- .AnyFrame,
- => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
- @tagName(tag),
- }),
}
}
@@ -1478,12 +1493,12 @@ pub const DeclGen = struct {
},
) !void {
const store = &dg.ctypes.set;
- const module = dg.module;
+ const mod = dg.module;
- const fn_decl = module.declPtr(fn_decl_index);
+ const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
- const fn_info = fn_decl.ty.fnInfo();
+ const fn_info = mod.typeToFunc(fn_decl.ty).?;
if (fn_info.cc == .Naked) {
switch (kind) {
.forward => try w.writeAll("zig_naked_decl "),
@@ -1491,14 +1506,13 @@ pub const DeclGen = struct {
else => unreachable,
}
}
- if (fn_decl.val.castTag(.function)) |func_payload|
- if (func_payload.data.is_cold) try w.writeAll("zig_cold ");
- if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn ");
+ if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold ");
+ if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
const trailing = try renderTypePrefix(
dg.decl_index,
store.*,
- module,
+ mod,
w,
fn_cty_idx,
.suffix,
@@ -1512,8 +1526,8 @@ pub const DeclGen = struct {
switch (kind) {
.forward => {},
- .complete => if (fn_info.alignment > 0)
- try w.print(" zig_align_fn({})", .{fn_info.alignment}),
+ .complete => if (fn_info.alignment.toByteUnitsOptional()) |a|
+ try w.print(" zig_align_fn({})", .{a}),
else => unreachable,
}
@@ -1525,7 +1539,7 @@ pub const DeclGen = struct {
try renderTypeSuffix(
dg.decl_index,
store.*,
- module,
+ mod,
w,
fn_cty_idx,
.suffix,
@@ -1537,8 +1551,8 @@ pub const DeclGen = struct {
);
switch (kind) {
- .forward => if (fn_info.alignment > 0)
- try w.print(" zig_align_fn({})", .{fn_info.alignment}),
+ .forward => if (fn_info.alignment.toByteUnitsOptional()) |a|
+ try w.print(" zig_align_fn({})", .{a}),
.complete => {},
else => unreachable,
}
@@ -1577,9 +1591,9 @@ pub const DeclGen = struct {
fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
- const module = dg.module;
- _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
- try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
+ const mod = dg.module;
+ _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{});
+ try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{});
}
const IntCastContext = union(enum) {
@@ -1619,18 +1633,18 @@ pub const DeclGen = struct {
/// | > 64 bit integer | < 64 bit integer | zig_make_<dest_ty>(0, src)
/// | > 64 bit integer | > 64 bit integer | zig_make_<dest_ty>(zig_hi_<src_ty>(src), zig_lo_<src_ty>(src))
fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void {
- const target = dg.module.getTarget();
- const dest_bits = dest_ty.bitSize(target);
- const dest_int_info = dest_ty.intInfo(target);
+ const mod = dg.module;
+ const dest_bits = dest_ty.bitSize(mod);
+ const dest_int_info = dest_ty.intInfo(mod);
- const src_is_ptr = src_ty.isPtrAtRuntime();
+ const src_is_ptr = src_ty.isPtrAtRuntime(mod);
const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) {
.unsigned => Type.usize,
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(target);
- const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null;
+ const src_bits = src_eff_ty.bitSize(mod);
+ const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
(toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or
@@ -1703,8 +1717,8 @@ pub const DeclGen = struct {
alignment: u32,
kind: CType.Kind,
) error{ OutOfMemory, AnalysisFail }!void {
- const target = dg.module.getTarget();
- const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target));
+ const mod = dg.module;
+ const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod));
try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas);
}
@@ -1717,7 +1731,7 @@ pub const DeclGen = struct {
alignas: CType.AlignAs,
) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
- const module = dg.module;
+ const mod = dg.module;
switch (std.math.order(alignas.@"align", alignas.abi)) {
.lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}),
@@ -1726,25 +1740,20 @@ pub const DeclGen = struct {
}
const trailing =
- try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers);
+ try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers);
try w.print("{}", .{trailing});
try dg.writeCValue(w, name);
- try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{});
+ try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{});
}
fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool {
- switch (tv.val.tag()) {
- .extern_fn => return true,
- .function => {
- const func = tv.val.castTag(.function).?.data;
- return dg.module.decl_exports.contains(func.owner_decl);
- },
- .variable => {
- const variable = tv.val.castTag(.variable).?.data;
- return dg.module.decl_exports.contains(variable.owner_decl);
- },
+ const mod = dg.module;
+ return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
+ .variable => |variable| mod.decl_exports.contains(variable.decl),
+ .extern_func => true,
+ .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl),
else => unreachable,
- }
+ };
}
fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void {
@@ -1819,7 +1828,7 @@ pub const DeclGen = struct {
try dg.writeCValue(writer, member);
}
- fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void {
+ fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void {
const decl = dg.module.declPtr(decl_index);
const fwd_decl_writer = dg.fwd_decl.writer();
const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern;
@@ -1830,7 +1839,7 @@ pub const DeclGen = struct {
fwd_decl_writer,
decl.ty,
.{ .decl = decl_index },
- CQualifiers.init(.{ .@"const" = !variable.is_mutable }),
+ CQualifiers.init(.{ .@"const" = variable.is_const }),
decl.@"align",
.complete,
);
@@ -1838,19 +1847,20 @@ pub const DeclGen = struct {
}
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void {
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
-
- if (dg.module.decl_exports.get(decl_index)) |exports| {
- try writer.writeAll(exports.items[export_index].options.name);
- } else if (decl.isExtern()) {
- try writer.writeAll(mem.span(decl.name));
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ try mod.markDeclAlive(decl);
+
+ if (mod.decl_exports.get(decl_index)) |exports| {
+ try writer.print("{}", .{exports.items[export_index].opts.name.fmt(&mod.intern_pool)});
+ } else if (decl.isExtern(mod)) {
+ try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)});
} else {
// MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
// expand to 3x the length of its input, but let's cut it off at a much shorter limit.
var name: [100]u8 = undefined;
var name_stream = std.io.fixedBufferStream(&name);
- decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) {
+ decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => {},
};
try writer.print("{}__{d}", .{
@@ -1894,18 +1904,18 @@ pub const DeclGen = struct {
.bits => {},
}
- const target = dg.module.getTarget();
- const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{
+ const mod = dg.module;
+ const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(target)),
+ .bits = @intCast(u16, ty.bitSize(mod)),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
- var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits };
+ const bits_ty = if (is_big) Type.u16 else Type.u8;
try writer.print(", {}", .{try dg.fmtIntLiteral(
- if (is_big) Type.u16 else Type.u8,
- Value.initPayload(&bits_pl.base),
+ bits_ty,
+ try mod.intValue(bits_ty, int_info.bits),
.FunctionArgument,
)});
}
@@ -1916,6 +1926,7 @@ pub const DeclGen = struct {
val: Value,
loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
+ const mod = dg.module;
const kind: CType.Kind = switch (loc) {
.FunctionArgument => .parameter,
.Initializer, .Other => .complete,
@@ -1923,7 +1934,7 @@ pub const DeclGen = struct {
};
return std.fmt.Formatter(formatIntLiteral){ .data = .{
.dg = dg,
- .int_info = ty.intInfo(dg.module.getTarget()),
+ .int_info = ty.intInfo(mod),
.kind = kind,
.cty = try dg.typeToCType(ty, kind),
.val = val,
@@ -1979,7 +1990,7 @@ fn renderTypeName(
try w.print("{s} {s}{}__{d}", .{
@tagName(tag)["fwd_".len..],
attributes,
- fmtIdent(mem.span(mod.declPtr(owner_decl).name)),
+ fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)),
@enumToInt(owner_decl),
});
},
@@ -2392,15 +2403,20 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void {
}
pub fn genErrDecls(o: *Object) !void {
+ const mod = o.dg.module;
const writer = o.writer();
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
- for (o.dg.module.error_name_list.items, 0..) |name, value| {
- max_name_len = std.math.max(name.len, max_name_len);
- var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
- try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
+ for (mod.global_error_set.keys()[1..], 1..) |name_nts, value| {
+ const name = mod.intern_pool.stringToSlice(name_nts);
+ max_name_len = @max(name.len, max_name_len);
+ const err_val = try mod.intern(.{ .err = .{
+ .ty = .anyerror_type,
+ .name = name_nts,
+ } });
+ try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other);
try writer.print(" = {d}u,\n", .{value});
}
o.indent_writer.popIndent();
@@ -2412,40 +2428,44 @@ pub fn genErrDecls(o: *Object) !void {
defer o.dg.gpa.free(name_buf);
@memcpy(name_buf[0..name_prefix.len], name_prefix);
- for (o.dg.module.error_name_list.items) |name| {
+ for (mod.global_error_set.keys()) |name_nts| {
+ const name = mod.intern_pool.stringToSlice(name_nts);
@memcpy(name_buf[name_prefix.len..][0..name.len], name);
const identifier = name_buf[0 .. name_prefix.len + name.len];
- var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
- const name_ty = Type.initPayload(&name_ty_pl.base);
-
- var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
- const name_val = Value.initPayload(&name_pl.base);
+ const name_ty = try mod.arrayType(.{
+ .len = name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
+ });
+ const name_val = try mod.intern(.{ .aggregate = .{
+ .ty = name_ty.toIntern(),
+ .storage = .{ .bytes = name },
+ } });
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete);
try writer.writeAll(" = ");
- try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer);
+ try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer);
try writer.writeAll(";\n");
}
- var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{
- .len = o.dg.module.error_name_list.items.len,
- .elem_type = Type.initTag(.const_slice_u8_sentinel_0),
- } };
- const name_array_ty = Type.initPayload(&name_array_ty_pl.base);
+ const name_array_ty = try mod.arrayType(.{
+ .len = mod.global_error_set.count(),
+ .child = .slice_const_u8_sentinel_0_type,
+ });
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete);
try writer.writeAll(" = {");
- for (o.dg.module.error_name_list.items, 0..) |name, value| {
+ for (mod.global_error_set.keys(), 0..) |name_nts, value| {
+ const name = mod.intern_pool.stringToSlice(name_nts);
if (value != 0) try writer.writeByte(',');
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
- const len_val = Value.initPayload(&len_pl.base);
+ const len_val = try mod.intValue(Type.usize, name.len);
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
- fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other),
+ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .StaticInitializer),
});
}
try writer.writeAll("};\n");
@@ -2455,20 +2475,23 @@ fn genExports(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = o.dg.module;
+ const ip = &mod.intern_pool;
const fwd_decl_writer = o.dg.fwd_decl.writer();
- if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| {
+ if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| {
for (exports.items[1..], 1..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) });
try fwd_decl_writer.print(", {s}, {s});\n", .{
- fmtStringLiteral(exports.items[0].options.name, null),
- fmtStringLiteral(@"export".options.name, null),
+ fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
+ fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null),
});
}
}
}
pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
+ const mod = o.dg.module;
const w = o.writer();
const key = lazy_fn.key_ptr.*;
const val = lazy_fn.value_ptr;
@@ -2477,7 +2500,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
.tag_name => {
const enum_ty = val.data.tag_name;
- const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const name_slice_ty = Type.slice_const_u8_sentinel_0;
try w.writeAll("static ");
try o.dg.renderType(w, name_slice_ty);
@@ -2486,34 +2509,30 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeByte('(');
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
try w.writeAll(") {\n switch (tag) {\n");
- for (enum_ty.enumFields().keys(), 0..) |name, index| {
- var tag_pl: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, index),
- };
- const tag_val = Value.initPayload(&tag_pl.base);
+ for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
+ const index = @intCast(u32, index_usize);
+ const name = mod.intern_pool.stringToSlice(name_ip);
+ const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
- var int_pl: Value.Payload.U64 = undefined;
- const int_val = tag_val.enumToInt(enum_ty, &int_pl);
+ const int_val = try tag_val.enumToInt(enum_ty, mod);
- var name_ty_pl = Type.Payload.Len{
- .base = .{ .tag = .array_u8_sentinel_0 },
- .data = name.len,
- };
- const name_ty = Type.initPayload(&name_ty_pl.base);
-
- var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
- const name_val = Value.initPayload(&name_pl.base);
-
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
- const len_val = Value.initPayload(&len_pl.base);
+ const name_ty = try mod.arrayType(.{
+ .len = name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
+ });
+ const name_val = try mod.intern(.{ .aggregate = .{
+ .ty = name_ty.toIntern(),
+ .storage = .{ .bytes = name },
+ } });
+ const len_val = try mod.intValue(Type.usize, name.len);
try w.print(" case {}: {{\n static ", .{
try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
try w.writeAll(" = ");
- try o.dg.renderValue(w, name_ty, name_val, .Initializer);
+ try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
@@ -2529,7 +2548,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeAll("}\n");
},
.never_tail, .never_inline => |fn_decl_index| {
- const fn_decl = o.dg.module.declPtr(fn_decl_index);
+ const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete);
const fn_info = fn_cty.cast(CType.Payload.Function).?.data;
@@ -2646,19 +2665,19 @@ pub fn genDecl(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = o.dg.module;
const decl = o.dg.decl.?;
const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? };
- const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val };
+ const tv: TypedValue = .{ .ty = decl.ty, .val = (try decl.internValue(mod)).toValue() };
- if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return;
- if (tv.val.tag() == .extern_fn) {
+ if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
+ if (tv.val.getExternFunc(mod)) |_| {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.writeAll("zig_extern ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
try genExports(o);
- } else if (tv.val.castTag(.variable)) |var_payload| {
- const variable: *Module.Var = var_payload.data;
+ } else if (tv.val.getVariable(mod)) |variable| {
try o.dg.renderFwdDecl(decl_c_value.decl, variable);
try genExports(o);
@@ -2669,11 +2688,12 @@ pub fn genDecl(o: *Object) !void {
if (!is_global) try w.writeAll("static ");
if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
- if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
+ try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete);
- if (decl.@"linksection" != null) try w.writeAll(", read, write)");
+ if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
- try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
+ try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
@@ -2686,9 +2706,10 @@ pub fn genDecl(o: *Object) !void {
const w = o.writer();
if (!is_global) try w.writeAll("static ");
- if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
+ try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete);
- if (decl.@"linksection" != null) try w.writeAll(", read)");
+ if (decl.@"linksection" != .none) try w.writeAll(", read)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
try w.writeAll(";\n");
@@ -2704,8 +2725,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
.val = dg.decl.?.val,
};
const writer = dg.fwd_decl.writer();
+ const mod = dg.module;
- switch (tv.ty.zigTypeTag()) {
+ switch (tv.ty.zigTypeTag(mod)) {
.Fn => {
const is_global = dg.declIsGlobal(tv);
if (is_global) {
@@ -2791,17 +2813,18 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
}
fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
+ const mod = f.object.dg.module;
+ const ip = &mod.intern_pool;
const air_tags = f.air.instructions.items(.tag);
for (body) |inst| {
- if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) {
+ if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip))
continue;
- }
const result_value = switch (air_tags[inst]) {
// zig fmt: off
- .constant => unreachable, // excluded from function bodies
- .const_ty => unreachable, // excluded from function bodies
+ .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable,
+
.arg => try airArg(f, inst),
.trap => try airTrap(f.object.writer()),
@@ -2826,10 +2849,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
+ const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_scalar_ty.isInt())
+ break :blk if (lhs_scalar_ty.isInt(mod))
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
@@ -3077,7 +3100,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -3095,9 +3118,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
}
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
+ const mod = f.object.dg.module;
+ const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3120,13 +3144,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- const inst_ty = f.air.typeOfIndex(inst);
- const ptr_ty = f.air.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime();
+ const inst_ty = f.typeOfIndex(inst);
+ const ptr_ty = f.typeOf(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(mod);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3141,7 +3166,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
if (elem_has_bits) try writer.writeByte('&');
- if (elem_has_bits and ptr_ty.ptrSize() == .One) {
+ if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) {
// It's a pointer to an array, so we need to de-reference.
try f.writeCValueDeref(writer, ptr);
} else try f.writeCValue(writer, ptr, .Other);
@@ -3155,9 +3180,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
+ const mod = f.object.dg.module;
+ const inst_ty = f.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3180,13 +3206,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
- const inst_ty = f.air.typeOfIndex(inst);
- const slice_ty = f.air.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.elemType2();
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime();
+ const inst_ty = f.typeOfIndex(inst);
+ const slice_ty = f.typeOf(bin_op.lhs);
+ const elem_ty = slice_ty.elemType2(mod);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3209,9 +3236,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const inst_ty = f.air.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ const inst_ty = f.typeOfIndex(inst);
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3234,14 +3262,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- const elem_type = inst_ty.elemType();
- if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
+ const mod = f.object.dg.module;
+ const inst_ty = f.typeOfIndex(inst);
+ const elem_type = inst_ty.childType(mod);
+ if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
- const target = f.object.dg.module.getTarget();
const local = try f.allocLocalValue(
elem_type,
- inst_ty.ptrAlignment(target),
+ inst_ty.ptrAlignment(mod),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
@@ -3250,14 +3278,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
- const elem_ty = inst_ty.elemType();
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
+ const mod = f.object.dg.module;
+ const inst_ty = f.typeOfIndex(inst);
+ const elem_ty = inst_ty.childType(mod);
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
- const target = f.object.dg.module.getTarget();
const local = try f.allocLocalValue(
elem_ty,
- inst_ty.ptrAlignment(target),
+ inst_ty.ptrAlignment(mod),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
@@ -3266,7 +3294,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const inst_cty = try f.typeToIndex(inst_ty, .parameter);
const i = f.next_arg_index;
@@ -3290,14 +3318,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const ptr_ty = f.air.typeOf(ty_op.operand);
- const ptr_scalar_ty = ptr_ty.scalarType();
- const ptr_info = ptr_scalar_ty.ptrInfo().data;
+ const ptr_ty = f.typeOf(ty_op.operand);
+ const ptr_scalar_ty = ptr_ty.scalarType(mod);
+ const ptr_info = ptr_scalar_ty.ptrInfo(mod);
const src_ty = ptr_info.pointee_type;
- if (!src_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3306,9 +3335,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
- const target = f.object.dg.module.getTarget();
- const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target);
- const is_array = lowersToArray(src_ty, target);
+ const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod);
+ const is_array = lowersToArray(src_ty, mod);
const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer();
@@ -3327,29 +3355,13 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, src_ty);
try writer.writeAll("))");
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
- var host_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const host_ty = Type.initPayload(&host_pl.base);
+ const host_bits: u16 = ptr_info.host_size * 8;
+ const host_ty = try mod.intType(.unsigned, host_bits);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(host_pl.data - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset);
- var bit_offset_val_pl: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = ptr_info.bit_offset,
- };
- const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
-
- var field_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, src_ty.bitSize(target)),
- };
- const field_ty = Type.initPayload(&field_pl.base);
+ const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3360,9 +3372,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("((");
try f.renderType(writer, field_ty);
try writer.writeByte(')');
- const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
if (cant_cast) {
- if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3390,23 +3402,22 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
- const target = f.object.dg.module.getTarget();
const op_inst = Air.refToIndex(un_op);
- const op_ty = f.air.typeOf(un_op);
- const ret_ty = if (is_ptr) op_ty.childType() else op_ty;
- var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target);
+ const op_ty = f.typeOf(un_op);
+ const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty;
+ const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) {
try reap(f, inst, &.{un_op});
_ = try airCall(f, op_inst.?, .always_tail);
- } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, target);
+ const is_array = lowersToArray(ret_ty, mod);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocLocal(inst, lowered_ret_ty);
try writer.writeAll("memcpy(");
@@ -3435,22 +3446,23 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
} else {
try reap(f, inst, &.{un_op});
// Not even allowed to return void in a naked function.
- if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true)
+ if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true)
try writer.writeAll("return;\n");
}
return .none;
}
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const operand_ty = f.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3467,20 +3479,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const target = f.object.dg.module.getTarget();
- const dest_int_info = inst_scalar_ty.intInfo(target);
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const dest_int_info = inst_scalar_ty.intInfo(mod);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
- const scalar_int_info = scalar_ty.intInfo(target);
+ const operand_ty = f.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_int_info = scalar_ty.intInfo(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3508,14 +3520,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
try v.elem(f, writer);
} else switch (dest_int_info.signedness) {
.unsigned => {
- var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
- defer arena.deinit();
-
- const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
- const mask_val = try inst_scalar_ty.maxInt(stack.get(), target);
+ const mask_val = try inst_scalar_ty.maxIntScalar(mod, scalar_ty);
try writer.writeAll("zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
@@ -3526,11 +3531,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
.signed => {
const c_bits = toCIntBits(scalar_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- var shift_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = c_bits - dest_bits,
- };
- const shift_val = Value.initPayload(&shift_pl.base);
+ const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits);
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
@@ -3566,7 +3567,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const local = try f.allocLocal(inst, inst_ty);
const a = try Assignment.start(f, writer, inst_ty);
try f.writeCValue(writer, local, .Other);
@@ -3577,17 +3578,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
+ const mod = f.object.dg.module;
// *a = b;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = f.air.typeOf(bin_op.lhs);
- const ptr_scalar_ty = ptr_ty.scalarType();
- const ptr_info = ptr_scalar_ty.ptrInfo().data;
+ const ptr_ty = f.typeOf(bin_op.lhs);
+ const ptr_scalar_ty = ptr_ty.scalarType(mod);
+ const ptr_info = ptr_scalar_ty.ptrInfo(mod);
const ptr_val = try f.resolveInst(bin_op.lhs);
- const src_ty = f.air.typeOf(bin_op.rhs);
+ const src_ty = f.typeOf(bin_op.rhs);
- const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
+ const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false;
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3602,10 +3604,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
- const target = f.object.dg.module.getTarget();
const is_aligned = ptr_info.@"align" == 0 or
- ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target);
- const is_array = lowersToArray(ptr_info.pointee_type, target);
+ ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod);
+ const is_array = lowersToArray(ptr_info.pointee_type, mod);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -3647,22 +3648,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
- var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits };
- const host_ty = Type.initPayload(&host_pl.base);
+ const host_ty = try mod.intType(.unsigned, host_bits);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(host_bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
-
- var bit_offset_val_pl: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = ptr_info.bit_offset,
- };
- const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
+ const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset);
- const src_bits = src_ty.bitSize(target);
+ const src_bits = src_ty.bitSize(mod);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
@@ -3675,11 +3666,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try mask.shiftLeft(&mask, ptr_info.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
- var mask_pl = Value.Payload.BigInt{
- .base = .{ .tag = .int_big_positive },
- .data = mask.limbs[0..mask.len()],
- };
- const mask_val = Value.initPayload(&mask_pl.base);
+ const mask_val = try mod.intValue_big(host_ty, mask.toConst());
try f.writeCValueDeref(writer, ptr_val);
try v.elem(f, writer);
@@ -3693,9 +3680,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
- const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
if (cant_cast) {
- if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, ");
@@ -3705,7 +3692,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeByte(')');
}
- if (src_ty.isPtrAtRuntime()) {
+ if (src_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -3728,6 +3715,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3735,9 +3723,9 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const operand_ty = f.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType(mod);
const w = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3765,15 +3753,16 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
- if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
+ const operand_ty = f.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType(mod);
+ if (scalar_ty.ip_index != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits);
const op = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3797,18 +3786,18 @@ fn airBinOp(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType();
- const target = f.object.dg.module.getTarget();
- if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
+ const operand_ty = f.typeOf(bin_op.lhs);
+ const scalar_ty = operand_ty.scalarType(mod);
+ if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3835,12 +3824,12 @@ fn airCmpOp(
data: anytype,
operator: std.math.CompareOperator,
) !CValue {
- const lhs_ty = f.air.typeOf(data.lhs);
- const scalar_ty = lhs_ty.scalarType();
+ const mod = f.object.dg.module;
+ const lhs_ty = f.typeOf(data.lhs);
+ const scalar_ty = lhs_ty.scalarType(mod);
- const target = f.object.dg.module.getTarget();
- const scalar_bits = scalar_ty.bitSize(target);
- if (scalar_ty.isInt() and scalar_bits > 64)
+ const scalar_bits = scalar_ty.bitSize(mod);
+ if (scalar_ty.isInt(mod) and scalar_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -3852,13 +3841,13 @@ fn airCmpOp(
if (scalar_ty.isRuntimeFloat())
return airCmpBuiltinCall(f, inst, data, operator, .operator, .none);
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const rhs_ty = f.air.typeOf(data.rhs);
- const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer();
+ const rhs_ty = f.typeOf(data.rhs);
+ const need_cast = lhs_ty.isSinglePointer(mod) or rhs_ty.isSinglePointer(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, writer, lhs_ty);
@@ -3885,12 +3874,12 @@ fn airEquality(
inst: Air.Inst.Index,
operator: std.math.CompareOperator,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
- const operand_bits = operand_ty.bitSize(target);
- if (operand_ty.isInt() and operand_bits > 64)
+ const operand_ty = f.typeOf(bin_op.lhs);
+ const operand_bits = operand_ty.bitSize(mod);
+ if (operand_ty.isInt(mod) and operand_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -3907,12 +3896,12 @@ fn airEquality(
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) {
+ if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) {
// (A && B) || (C && (A == B))
// A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload
@@ -3951,7 +3940,7 @@ fn airEquality(
fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -3965,6 +3954,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3972,9 +3962,9 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const elem_ty = inst_scalar_ty.elemType2();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const elem_ty = inst_scalar_ty.elemType2(mod);
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
@@ -3983,7 +3973,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try v.elem(f, writer);
try writer.writeAll(" = ");
- if (elem_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We must convert to and from integer types to prevent UB if the operation
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
@@ -4012,13 +4002,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
}
fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
- const target = f.object.dg.module.getTarget();
- if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64)
+ if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
@@ -4054,6 +4044,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons
}
fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4061,9 +4052,8 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const len = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = inst_ty.slicePtrFieldType(&buf);
+ const inst_ty = f.typeOfIndex(inst);
+ const ptr_ty = inst_ty.slicePtrFieldType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -4092,12 +4082,11 @@ fn airCall(
inst: Air.Inst.Index,
modifier: std.builtin.CallModifier,
) !CValue {
+ const mod = f.object.dg.module;
// Not even allowed to call panic in a naked function.
- if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
+ if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
const gpa = f.object.dg.gpa;
- const module = f.object.dg.module;
- const target = module.getTarget();
const writer = f.object.writer();
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
@@ -4107,7 +4096,7 @@ fn airCall(
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
for (resolved_args, args) |*resolved_arg, arg| {
- const arg_ty = f.air.typeOf(arg);
+ const arg_ty = f.typeOf(arg);
const arg_cty = try f.typeToIndex(arg_ty, .parameter);
if (f.indexToCType(arg_cty).tag() == .void) {
resolved_arg.* = .none;
@@ -4115,8 +4104,7 @@ fn airCall(
}
resolved_arg.* = try f.resolveInst(arg);
if (arg_cty != try f.typeToIndex(arg_ty, .complete)) {
- var lowered_arg_buf: LowerFnRetTyBuffer = undefined;
- const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target);
+ const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod);
const array_local = try f.allocLocal(inst, lowered_arg_ty);
try writer.writeAll("memcpy(");
@@ -4138,22 +4126,21 @@ fn airCall(
for (args) |arg| try bt.feed(arg);
}
- const callee_ty = f.air.typeOf(pl_op.operand);
- const fn_ty = switch (callee_ty.zigTypeTag()) {
+ const callee_ty = f.typeOf(pl_op.operand);
+ const fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(),
+ .Pointer => callee_ty.childType(mod),
else => unreachable,
};
- const ret_ty = fn_ty.fnReturnType();
- var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target);
+ const ret_ty = fn_ty.fnReturnType(mod);
+ const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
const result_local = result: {
if (modifier == .always_tail) {
try writer.writeAll("zig_always_tail return ");
break :result .none;
- } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result .none;
} else if (f.liveness.isUnused(inst)) {
try writer.writeByte('(');
@@ -4171,19 +4158,22 @@ fn airCall(
callee: {
known: {
const fn_decl = fn_decl: {
- const callee_val = f.air.value(pl_op.operand) orelse break :known;
- break :fn_decl switch (callee_val.tag()) {
- .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl,
- .function => callee_val.castTag(.function).?.data.owner_decl,
- .decl_ref => callee_val.castTag(.decl_ref).?.data,
+ const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known;
+ break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| decl,
+ else => break :known,
+ },
else => break :known,
};
};
switch (modifier) {
.auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0),
- inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName(
- @unionInit(LazyFnKey, @tagName(mod), fn_decl),
- @unionInit(LazyFnValue.Data, @tagName(mod), {}),
+ inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName(
+ @unionInit(LazyFnKey, @tagName(m), fn_decl),
+ @unionInit(LazyFnValue.Data, @tagName(m), {}),
)),
else => unreachable,
}
@@ -4211,7 +4201,7 @@ fn airCall(
try writer.writeAll(");\n");
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, target))
+ if (result_local == .none or !lowersToArray(ret_ty, mod))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -4245,18 +4235,21 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
- const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
- const writer = f.object.writer();
- const function = f.air.values[ty_pl.payload].castTag(.function).?.data;
+ const ty_fn = f.air.instructions.items(.data)[inst].ty_fn;
const mod = f.object.dg.module;
- try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name});
+ const writer = f.object.writer();
+ const function = mod.funcPtr(ty_fn.func);
+ try writer.print("/* dbg func:{s} */\n", .{
+ mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name),
+ });
return .none;
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false;
+ const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false;
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4266,6 +4259,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Block, ty_pl.payload);
const body = f.air.extra[extra.end..][0..extra.data.body_len];
@@ -4275,8 +4269,8 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
f.next_block_index += 1;
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
- const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst))
+ const inst_ty = f.typeOfIndex(inst);
+ const result = if (inst_ty.ip_index != .void_type and !f.liveness.isUnused(inst))
try f.allocLocal(inst, inst_ty)
else
.none;
@@ -4298,7 +4292,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.indent_writer.insertNewline();
// noreturn blocks have no `br` instructions reaching them, so we don't want a label
- if (!f.air.typeOfIndex(inst).isNoReturn()) {
+ if (!f.typeOfIndex(inst).isNoReturn(mod)) {
// label must be followed by an expression, include an empty one.
try writer.print("zig_block_{d}:;\n", .{block_id});
}
@@ -4310,15 +4304,16 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Try, pl_op.payload);
const body = f.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = f.air.typeOf(pl_op.operand);
+ const err_union_ty = f.typeOf(pl_op.operand);
return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false);
}
fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
const body = f.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = f.air.typeOf(extra.data.ptr).childType();
+ const err_union_ty = f.typeOf(extra.data.ptr).childType(mod);
return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true);
}
@@ -4330,14 +4325,15 @@ fn lowerTry(
err_union_ty: Type,
is_ptr: bool,
) !CValue {
+ const mod = f.object.dg.module;
const err_union = try f.resolveInst(operand);
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
- const payload_ty = err_union_ty.errorUnionPayload();
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
- if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
try writer.writeAll("if (");
if (!payload_has_bits) {
if (is_ptr)
@@ -4399,7 +4395,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue {
// If result is .none then the value of the block is unused.
if (result != .none) {
- const operand_ty = f.air.typeOf(branch.operand);
+ const operand_ty = f.typeOf(branch.operand);
const operand = try f.resolveInst(branch.operand);
try reap(f, inst, &.{branch.operand});
@@ -4416,10 +4412,10 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue {
fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const dest_ty = f.air.typeOfIndex(inst);
+ const dest_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const operand_ty = f.typeOf(ty_op.operand);
const bitcasted = try bitcast(f, dest_ty, operand, operand_ty);
try reap(f, inst, &.{ty_op.operand});
@@ -4431,6 +4427,8 @@ const LocalResult = struct {
need_free: bool,
fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue {
+ const mod = f.object.dg.module;
+
if (lr.need_free) {
// Move the freshly allocated local to be owned by this instruction,
// by returning it here instead of freeing it.
@@ -4441,7 +4439,7 @@ const LocalResult = struct {
try lr.free(f);
const writer = f.object.writer();
try f.writeCValue(writer, local, .Other);
- if (dest_ty.isAbiInt()) {
+ if (dest_ty.isAbiInt(mod)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4461,12 +4459,13 @@ const LocalResult = struct {
};
fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult {
- const target = f.object.dg.module.getTarget();
+ const mod = f.object.dg.module;
+ const target = mod.getTarget();
const writer = f.object.writer();
- if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) {
- const src_info = dest_ty.intInfo(target);
- const dest_info = operand_ty.intInfo(target);
+ if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) {
+ const src_info = dest_ty.intInfo(mod);
+ const dest_info = operand_ty.intInfo(mod);
if (src_info.signedness == dest_info.signedness and
src_info.bits == dest_info.bits)
{
@@ -4477,7 +4476,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
}
}
- if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) {
+ if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) {
const local = try f.allocLocal(0, dest_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = (");
@@ -4494,7 +4493,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
const operand_lval = if (operand == .constant) blk: {
const operand_local = try f.allocLocal(0, operand_ty);
try f.writeCValue(writer, operand_local, .Other);
- if (operand_ty.isAbiInt()) {
+ if (operand_ty.isAbiInt(mod)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4516,13 +4515,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
try writer.writeAll("));\n");
// Ensure padding bits have the expected value.
- if (dest_ty.isAbiInt()) {
+ if (dest_ty.isAbiInt(mod)) {
const dest_cty = try f.typeToCType(dest_ty, .complete);
- const dest_info = dest_ty.intInfo(target);
- var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
- .unsigned => .int_unsigned,
- .signed => .int_signed,
- } }, .data = dest_info.bits };
+ const dest_info = dest_ty.intInfo(mod);
+ var bits: u16 = dest_info.bits;
var wrap_cty: ?CType = null;
var need_bitcasts = false;
@@ -4535,9 +4531,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
const elem_cty = f.indexToCType(pl.data.elem_type);
wrap_cty = elem_cty.toSignedness(dest_info.signedness);
need_bitcasts = wrap_cty.?.tag() == .zig_i128;
- info_ty_pl.data -= 1;
- info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8);
- info_ty_pl.data += 1;
+ bits -= 1;
+ bits %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ bits += 1;
}
try writer.writeAll(" = ");
if (need_bitcasts) {
@@ -4546,7 +4542,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
try writer.writeByte('(');
}
try writer.writeAll("zig_wrap_");
- const info_ty = Type.initPayload(&info_ty_pl.base);
+ const info_ty = try mod.intType(dest_info.signedness, bits);
if (wrap_cty) |cty|
try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
else
@@ -4622,8 +4618,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnreach(f: *Function) !CValue {
+ const mod = f.object.dg.module;
// Not even allowed to call unreachable in a naked function.
- if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
+ if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none;
try f.object.writer().writeAll("zig_unreachable();\n");
return .none;
@@ -4657,6 +4654,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(") ");
try genBodyResolveState(f, inst, liveness_condbr.then_deaths, then_body, false);
+ try writer.writeByte('\n');
// We don't need to use `genBodyResolveState` for the else block, because this instruction is
// noreturn so must terminate a body, therefore we don't need to leave `value_map` or
@@ -4675,19 +4673,20 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const condition = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
- const condition_ty = f.air.typeOf(pl_op.operand);
+ const condition_ty = f.typeOf(pl_op.operand);
const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload);
const writer = f.object.writer();
try writer.writeAll("switch (");
- if (condition_ty.zigTypeTag() == .Bool) {
+ if (condition_ty.zigTypeTag(mod) == .Bool) {
try writer.writeByte('(');
try f.renderType(writer, Type.u1);
try writer.writeByte(')');
- } else if (condition_ty.isPtrAtRuntime()) {
+ } else if (condition_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -4714,12 +4713,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
for (items) |item| {
try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
- if (condition_ty.isPtrAtRuntime()) {
+ if (condition_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
}
- try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other);
+ try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other);
try writer.writeByte(':');
}
try writer.writeByte(' ');
@@ -4764,6 +4763,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool {
}
fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
@@ -4777,8 +4777,8 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: {
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
- const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: {
+ const inst_ty = f.typeOfIndex(inst);
+ const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: {
const local = try f.allocLocal(inst, inst_ty);
if (f.wantSafety()) {
try f.writeCValue(writer, local, .Other);
@@ -4807,7 +4807,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[1] == '{';
if (is_reg) {
- const output_ty = if (output == .none) inst_ty else f.air.typeOf(output).childType();
+ const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
try writer.writeAll("register ");
const alignment = 0;
const local_value = try f.allocLocalValue(output_ty, alignment);
@@ -4840,7 +4840,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[0] == '{';
const input_val = try f.resolveInst(input);
if (asmInputNeedsLocal(constraint, input_val)) {
- const input_ty = f.air.typeOf(input);
+ const input_ty = f.typeOf(input);
if (is_reg) try writer.writeAll("register ");
const alignment = 0;
const local_value = try f.allocLocalValue(input_ty, alignment);
@@ -5025,6 +5025,7 @@ fn airIsNull(
operator: []const u8,
is_ptr: bool,
) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
@@ -5040,23 +5041,22 @@ fn airIsNull(
try f.writeCValue(writer, operand, .Other);
}
- const operand_ty = f.air.typeOf(un_op);
- const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty;
- var payload_buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&payload_buf);
- var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const operand_ty = f.typeOf(un_op);
+ const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
+ const payload_ty = optional_ty.optionalChild(mod);
- const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
TypedValue{ .ty = Type.bool, .val = Value.true }
- else if (optional_ty.isPtrLikeOptional())
+ else if (optional_ty.isPtrLikeOptional(mod))
// operand is a regular pointer, test `operand !=/== NULL`
- TypedValue{ .ty = optional_ty, .val = Value.null }
- else if (payload_ty.zigTypeTag() == .ErrorSet)
- TypedValue{ .ty = payload_ty, .val = Value.zero }
- else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: {
+ TypedValue{ .ty = optional_ty, .val = try mod.getCoerced(Value.null, optional_ty) }
+ else if (payload_ty.zigTypeTag(mod) == .ErrorSet)
+ TypedValue{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, 0) }
+ else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: {
try writer.writeAll(".ptr");
- const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf);
- break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null };
+ const slice_ptr_ty = payload_ty.slicePtrFieldType(mod);
+ const opt_slice_ptr_ty = try mod.optionalType(slice_ptr_ty.toIntern());
+ break :rhs TypedValue{ .ty = opt_slice_ptr_ty, .val = try mod.nullValue(opt_slice_ptr_ty) };
} else rhs: {
try writer.writeAll(".is_null");
break :rhs TypedValue{ .ty = Type.bool, .val = Value.true };
@@ -5070,24 +5070,24 @@ fn airIsNull(
}
fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const opt_ty = f.air.typeOf(ty_op.operand);
+ const opt_ty = f.typeOf(ty_op.operand);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
+ const payload_ty = opt_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return .none;
}
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, operand, .Other);
@@ -5104,23 +5104,24 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const ptr_ty = f.air.typeOf(ty_op.operand);
- const opt_ty = ptr_ty.childType();
- const inst_ty = f.air.typeOfIndex(inst);
+ const ptr_ty = f.typeOf(ty_op.operand);
+ const opt_ty = ptr_ty.childType(mod);
+ const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) {
return .{ .undef = inst_ty };
}
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
// the operand is just a regular pointer, no need to do anything special.
// *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C
try writer.writeAll(" = ");
@@ -5134,17 +5135,18 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const operand_ty = f.typeOf(ty_op.operand);
- const opt_ty = operand_ty.elemType();
+ const opt_ty = operand_ty.childType(mod);
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
if (f.liveness.isUnused(inst)) {
return .none;
}
@@ -5179,48 +5181,49 @@ fn fieldLocation(
container_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- target: std.Target,
+ mod: *Module,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u32,
end: void,
} {
- return switch (container_ty.zigTypeTag()) {
- .Struct => switch (container_ty.containerLayout()) {
- .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| {
- if (container_ty.structFieldIsComptime(next_field_index)) continue;
- const field_ty = container_ty.structFieldType(next_field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
-
- break .{ .field = if (container_ty.isSimpleTuple())
+ const ip = &mod.intern_pool;
+ return switch (container_ty.zigTypeTag(mod)) {
+ .Struct => switch (container_ty.containerLayout(mod)) {
+ .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| {
+ if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
+ const field_ty = container_ty.structFieldType(next_field_index, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ break .{ .field = if (container_ty.isSimpleTuple(mod))
.{ .field = next_field_index }
else
- .{ .identifier = container_ty.structFieldName(next_field_index) } };
- } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin,
- .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0)
- .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) }
+ .{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } };
+ } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
+ .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0)
+ .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
else
.begin,
},
- .Union => switch (container_ty.containerLayout()) {
+ .Union => switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => {
- const field_ty = container_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime())
- return if (container_ty.unionTagTypeSafety() != null and
- !container_ty.unionHasAllZeroBitFieldTypes())
+ const field_ty = container_ty.structFieldType(field_index, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
+ return if (container_ty.unionTagTypeSafety(mod) != null and
+ !container_ty.unionHasAllZeroBitFieldTypes(mod))
.{ .field = .{ .identifier = "payload" } }
else
.begin;
- const field_name = container_ty.unionFields().keys()[field_index];
- return .{ .field = if (container_ty.unionTagTypeSafety()) |_|
- .{ .payload_identifier = field_name }
+ const field_name = container_ty.unionFields(mod).keys()[field_index];
+ return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_|
+ .{ .payload_identifier = ip.stringToSlice(field_name) }
else
- .{ .identifier = field_name } };
+ .{ .identifier = ip.stringToSlice(field_name) } };
},
.Packed => .begin,
},
- .Pointer => switch (container_ty.ptrSize()) {
+ .Pointer => switch (container_ty.ptrSize(mod)) {
.Slice => switch (field_index) {
0 => .{ .field = .{ .identifier = "ptr" } },
1 => .{ .field = .{ .identifier = "len" } },
@@ -5238,7 +5241,7 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const container_ptr_val = try f.resolveInst(extra.struct_operand);
try reap(f, inst, &.{extra.struct_operand});
- const container_ptr_ty = f.air.typeOf(extra.struct_operand);
+ const container_ptr_ty = f.typeOf(extra.struct_operand);
return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index);
}
@@ -5247,19 +5250,19 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue
const container_ptr_val = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const container_ptr_ty = f.air.typeOf(ty_op.operand);
+ const container_ptr_ty = f.typeOf(ty_op.operand);
return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index);
}
fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- const target = f.object.dg.module.getTarget();
- const container_ptr_ty = f.air.typeOfIndex(inst);
- const container_ty = container_ptr_ty.childType();
+ const container_ptr_ty = f.typeOfIndex(inst);
+ const container_ty = container_ptr_ty.childType(mod);
- const field_ptr_ty = f.air.typeOf(extra.field_ptr);
+ const field_ptr_ty = f.typeOf(extra.field_ptr);
const field_ptr_val = try f.resolveInst(extra.field_ptr);
try reap(f, inst, &.{extra.field_ptr});
@@ -5270,12 +5273,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, container_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) {
+ switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) {
.begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
.field => |field| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
- u8_ptr_pl.data.pointee_type = Type.u8;
- const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
+ const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
@@ -5288,15 +5289,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
- u8_ptr_pl.data.pointee_type = Type.u8;
- const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
+ const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
- var byte_offset_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = byte_offset,
- };
- const byte_offset_val = Value.initPayload(&byte_offset_pl.base);
+ const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
@@ -5306,7 +5301,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
},
.end => {
try f.writeCValue(writer, field_ptr_val, .Other);
- try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
},
}
@@ -5321,9 +5316,9 @@ fn fieldPtr(
container_ptr_val: CValue,
field_index: u32,
) !CValue {
- const target = f.object.dg.module.getTarget();
- const container_ty = container_ptr_ty.elemType();
- const field_ptr_ty = f.air.typeOfIndex(inst);
+ const mod = f.object.dg.module;
+ const container_ty = container_ptr_ty.childType(mod);
+ const field_ptr_ty = f.typeOfIndex(inst);
// Ensure complete type definition is visible before accessing fields.
_ = try f.typeToIndex(container_ty, .complete);
@@ -5335,22 +5330,16 @@ fn fieldPtr(
try f.renderType(writer, field_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) {
+ switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) {
.begin => try f.writeCValue(writer, container_ptr_val, .Initializer),
.field => |field| {
try writer.writeByte('&');
try f.writeCValueDerefMember(writer, container_ptr_val, field);
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
- u8_ptr_pl.data.pointee_type = Type.u8;
- const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
+ const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8);
- var byte_offset_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = byte_offset,
- };
- const byte_offset_val = Value.initPayload(&byte_offset_pl.base);
+ const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
try writer.writeAll("((");
try f.renderType(writer, u8_ptr_ty);
@@ -5361,7 +5350,7 @@ fn fieldPtr(
.end => {
try writer.writeByte('(');
try f.writeCValue(writer, container_ptr_val, .Other);
- try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
},
}
@@ -5370,58 +5359,45 @@ fn fieldPtr(
}
fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
+ const ip = &mod.intern_pool;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
- const inst_ty = f.air.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ const inst_ty = f.typeOfIndex(inst);
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{extra.struct_operand});
return .none;
}
- const target = f.object.dg.module.getTarget();
const struct_byval = try f.resolveInst(extra.struct_operand);
try reap(f, inst, &.{extra.struct_operand});
- const struct_ty = f.air.typeOf(extra.struct_operand);
+ const struct_ty = f.typeOf(extra.struct_operand);
const writer = f.object.writer();
// Ensure complete type definition is visible before accessing fields.
_ = try f.typeToIndex(struct_ty, .complete);
- const field_name: CValue = switch (struct_ty.tag()) {
- .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) {
- .Auto, .Extern => if (struct_ty.isSimpleTuple())
+ const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+ .struct_type => switch (struct_ty.containerLayout(mod)) {
+ .Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
- .{ .identifier = struct_ty.structFieldName(extra.field_index) },
+ .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
.Packed => {
- const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const int_info = struct_ty.intInfo(target);
+ const struct_obj = mod.typeToStruct(struct_ty).?;
+ const int_info = struct_ty.intInfo(mod);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- var bit_offset_val_pl: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = struct_obj.packedFieldBitOffset(target, extra.field_index),
- };
- const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
+ const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
- const field_int_signedness = if (inst_ty.isAbiInt())
- inst_ty.intInfo(target).signedness
+ const field_int_signedness = if (inst_ty.isAbiInt(mod))
+ inst_ty.intInfo(mod).signedness
else
.unsigned;
- var field_int_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (field_int_signedness) {
- .unsigned => .int_unsigned,
- .signed => .int_signed,
- } },
- .data = @intCast(u16, inst_ty.bitSize(target)),
- };
- const field_int_ty = Type.initPayload(&field_int_pl.base);
+ const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -5432,18 +5408,18 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
- if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
- if (bit_offset_val_pl.data > 0) {
+ if (bit_offset > 0) {
try writer.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
}
try f.writeCValue(writer, struct_byval, .Other);
- if (bit_offset_val_pl.data > 0) {
+ if (bit_offset > 0) {
try writer.writeAll(", ");
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
@@ -5465,36 +5441,46 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
},
},
- .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) {
- const operand_lval = if (struct_byval == .constant) blk: {
- const operand_local = try f.allocLocal(inst, struct_ty);
- try f.writeCValue(writer, operand_local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, struct_byval, .Initializer);
- try writer.writeAll(";\n");
- break :blk operand_local;
- } else struct_byval;
- const local = try f.allocLocal(inst, inst_ty);
- try writer.writeAll("memcpy(&");
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(", &");
- try f.writeCValue(writer, operand_lval, .Other);
- try writer.writeAll(", sizeof(");
- try f.renderType(writer, inst_ty);
- try writer.writeAll("));\n");
+ .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
+ .{ .field = extra.field_index }
+ else
+ .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
+
+ .union_type => |union_type| field_name: {
+ const union_obj = mod.unionPtr(union_type.index);
+ if (union_obj.layout == .Packed) {
+ const operand_lval = if (struct_byval == .constant) blk: {
+ const operand_local = try f.allocLocal(inst, struct_ty);
+ try f.writeCValue(writer, operand_local, .Other);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, struct_byval, .Initializer);
+ try writer.writeAll(";\n");
+ break :blk operand_local;
+ } else struct_byval;
- if (struct_byval == .constant) {
- try freeLocal(f, inst, operand_lval.new_local, 0);
- }
+ const local = try f.allocLocal(inst, inst_ty);
+ try writer.writeAll("memcpy(&");
+ try f.writeCValue(writer, local, .Other);
+ try writer.writeAll(", &");
+ try f.writeCValue(writer, operand_lval, .Other);
+ try writer.writeAll(", sizeof(");
+ try f.renderType(writer, inst_ty);
+ try writer.writeAll("));\n");
- return local;
- } else field_name: {
- const name = struct_ty.unionFields().keys()[extra.field_index];
- break :field_name if (struct_ty.unionTagTypeSafety()) |_|
- .{ .payload_identifier = name }
- else
- .{ .identifier = name };
+ if (struct_byval == .constant) {
+ try freeLocal(f, inst, operand_lval.new_local, 0);
+ }
+
+ return local;
+ } else {
+ const name = union_obj.fields.keys()[extra.field_index];
+ break :field_name if (union_type.hasTag()) .{
+ .payload_identifier = ip.stringToSlice(name),
+ } else .{
+ .identifier = ip.stringToSlice(name),
+ };
+ }
},
else => unreachable,
};
@@ -5511,20 +5497,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
/// *(E!T) -> E
/// Note that the result is never a pointer.
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const operand_ty = f.typeOf(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer;
- const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
- const error_ty = error_union_ty.errorUnionSet();
- const payload_ty = error_union_ty.errorUnionPayload();
+ const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer;
+ const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ const error_ty = error_union_ty.errorUnionSet(mod);
+ const payload_ty = error_union_ty.errorUnionPayload(mod);
const local = try f.allocLocal(inst, inst_ty);
- if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) {
+ if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it.
return local;
}
@@ -5533,32 +5520,33 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!payload_ty.hasRuntimeBits()) {
+ if (!payload_ty.hasRuntimeBits(mod)) {
try f.writeCValue(writer, operand, .Other);
} else {
- if (!error_ty.errorSetIsEmpty())
+ if (!error_ty.errorSetIsEmpty(mod))
if (operand_is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
try f.writeCValueMember(writer, operand, .{ .identifier = "error" })
else
- try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer);
+ try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer);
}
try writer.writeAll(";\n");
return local;
}
fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_ty = f.air.typeOf(ty_op.operand);
- const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty;
+ const operand_ty = f.typeOf(ty_op.operand);
+ const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
const writer = f.object.writer();
- if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) {
+ if (!error_union_ty.errorUnionPayload(mod).hasRuntimeBits(mod)) {
if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty);
@@ -5584,11 +5572,12 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
}
fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const repr_is_payload = inst_ty.optionalReprIsPayload();
- const payload_ty = f.air.typeOf(ty_op.operand);
+ const inst_ty = f.typeOfIndex(inst);
+ const repr_is_payload = inst_ty.optionalReprIsPayload(mod);
+ const payload_ty = f.typeOf(ty_op.operand);
const payload = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5615,12 +5604,13 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const payload_ty = inst_ty.errorUnionPayload();
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime();
- const err_ty = inst_ty.errorUnionSet();
+ const inst_ty = f.typeOfIndex(inst);
+ const payload_ty = inst_ty.errorUnionPayload(mod);
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const err_ty = inst_ty.errorUnionSet(mod);
const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5653,19 +5643,20 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
- const error_union_ty = f.air.typeOf(ty_op.operand).childType();
+ const error_union_ty = f.typeOf(ty_op.operand).childType(mod);
- const error_ty = error_union_ty.errorUnionSet();
- const payload_ty = error_union_ty.errorUnionPayload();
+ const error_ty = error_union_ty.errorUnionSet(mod);
+ const payload_ty = error_union_ty.errorUnionPayload(mod);
// First, set the non-error value.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try f.writeCValueDeref(writer, operand);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
+ try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other);
try writer.writeAll(";\n ");
return operand;
@@ -5673,13 +5664,13 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
try f.writeCValueDeref(writer, operand);
try writer.writeAll(".error = ");
- try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
+ try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other);
try writer.writeAll(";\n");
// Then return the payload pointer (only if it is used)
if (f.liveness.isUnused(inst)) return .none;
- const local = try f.allocLocal(inst, f.air.typeOfIndex(inst));
+ const local = try f.allocLocal(inst, f.typeOfIndex(inst));
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = &(");
try f.writeCValueDeref(writer, operand);
@@ -5703,13 +5694,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const payload_ty = inst_ty.errorUnionPayload();
+ const inst_ty = f.typeOfIndex(inst);
+ const payload_ty = inst_ty.errorUnionPayload(mod);
const payload = try f.resolveInst(ty_op.operand);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime();
- const err_ty = inst_ty.errorUnionSet();
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
+ const err_ty = inst_ty.errorUnionSet(mod);
try reap(f, inst, &.{ty_op.operand});
const writer = f.object.writer();
@@ -5728,29 +5720,30 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
else
try f.writeCValueMember(writer, local, .{ .identifier = "error" });
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other);
+ try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other);
try a.end(f, writer);
}
return local;
}
fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const operand_ty = f.air.typeOf(un_op);
+ const operand_ty = f.typeOf(un_op);
const local = try f.allocLocal(inst, Type.bool);
- const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty;
- const payload_ty = err_union_ty.errorUnionPayload();
- const error_ty = err_union_ty.errorUnionSet();
+ const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const error_ty = err_union_ty.errorUnionSet(mod);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!error_ty.errorSetIsEmpty())
- if (payload_ty.hasRuntimeBits())
+ if (!error_ty.errorSetIsEmpty(mod))
+ if (payload_ty.hasRuntimeBits(mod))
if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
@@ -5758,42 +5751,40 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
else
try f.writeCValue(writer, operand, .Other)
else
- try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
+ try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other);
try writer.writeByte(' ');
try writer.writeAll(operator);
try writer.writeByte(' ');
- try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
+ try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other);
try writer.writeAll(";\n");
return local;
}
fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- const array_ty = f.air.typeOf(ty_op.operand).childType();
+ const array_ty = f.typeOf(ty_op.operand).childType(mod);
try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
try writer.writeAll(" = ");
// Unfortunately, C does not support any equivalent to
// &(*(void *)p)[0], although LLVM does via GetElementPtr
if (operand == .undef) {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer);
- } else if (array_ty.hasRuntimeBitsIgnoreComptime()) {
+ try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(mod) }, .Initializer);
+ } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try writer.writeAll("&(");
try f.writeCValueDeref(writer, operand);
- try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
+ try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))});
} else try f.writeCValue(writer, operand, .Initializer);
try writer.writeAll("; ");
- const array_len = array_ty.arrayLen();
- var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len };
- const len_val = Value.initPayload(&len_pl.base);
+ const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod));
try f.writeCValueMember(writer, local, .{ .identifier = "len" });
try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)});
@@ -5801,19 +5792,20 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_ty = f.air.typeOf(ty_op.operand);
+ const operand_ty = f.typeOf(ty_op.operand);
const target = f.object.dg.module.getTarget();
const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat())
if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend"
- else if (inst_ty.isInt() and operand_ty.isRuntimeFloat())
- if (inst_ty.isSignedInt()) "fix" else "fixuns"
- else if (inst_ty.isRuntimeFloat() and operand_ty.isInt())
- if (operand_ty.isSignedInt()) "float" else "floatun"
+ else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat())
+ if (inst_ty.isSignedInt(mod)) "fix" else "fixuns"
+ else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod))
+ if (operand_ty.isSignedInt(mod)) "float" else "floatun"
else
unreachable;
@@ -5822,19 +5814,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
+ if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) {
try writer.writeAll("zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
try writer.writeByte('(');
}
try writer.writeAll("zig_");
try writer.writeAll(operation);
- try writer.writeAll(compilerRtAbbrev(operand_ty, target));
- try writer.writeAll(compilerRtAbbrev(inst_ty, target));
+ try writer.writeAll(compilerRtAbbrev(operand_ty, mod));
+ try writer.writeAll(compilerRtAbbrev(inst_ty, mod));
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeByte(')');
- if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
+ if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) {
try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
}
@@ -5843,12 +5835,13 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const operand = try f.resolveInst(un_op);
- const operand_ty = f.air.typeOf(un_op);
+ const operand_ty = f.typeOf(un_op);
try reap(f, inst, &.{un_op});
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
@@ -5856,7 +5849,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = (");
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
- if (operand_ty.isSlice()) {
+ if (operand_ty.isSlice(mod)) {
try f.writeCValueMember(writer, operand, .{ .identifier = "len" });
} else {
try f.writeCValue(writer, operand, .Other);
@@ -5871,14 +5864,15 @@ fn airUnBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const operand_ty = f.typeOf(ty_op.operand);
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -5914,9 +5908,10 @@ fn airBinBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const operand_ty = f.air.typeOf(bin_op.lhs);
+ const operand_ty = f.typeOf(bin_op.lhs);
const operand_cty = try f.typeToCType(operand_ty, .complete);
const is_big = operand_cty.tag() == .array;
@@ -5924,9 +5919,9 @@ fn airBinBuiltinCall(
const rhs = try f.resolveInst(bin_op.rhs);
if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const scalar_ty = operand_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -5968,14 +5963,15 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const operand_ty = f.air.typeOf(data.lhs);
- const scalar_ty = operand_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const operand_ty = f.typeOf(data.lhs);
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -6008,7 +6004,7 @@ fn airCmpBuiltinCall(
try writer.writeByte(')');
if (!ref_ret) try writer.print(" {s} {}", .{
compareOperatorC(operator),
- try f.fmtIntLiteral(Type.initTag(.i32), Value.zero),
+ try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)),
});
try writer.writeAll(";\n");
try v.end(f, inst, writer);
@@ -6017,28 +6013,27 @@ fn airCmpBuiltinCall(
}
fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const ptr = try f.resolveInst(extra.ptr);
const expected_value = try f.resolveInst(extra.expected_value);
const new_value = try f.resolveInst(extra.new_value);
- const ptr_ty = f.air.typeOf(extra.ptr);
- const ty = ptr_ty.childType();
+ const ptr_ty = f.typeOf(extra.ptr);
+ const ty = ptr_ty.childType(mod);
const writer = f.object.writer();
const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value);
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
const local = try f.allocLocal(inst, inst_ty);
- if (inst_ty.isPtrLikeOptional()) {
+ if (inst_ty.isPtrLikeOptional(mod)) {
{
const a = try Assignment.start(f, writer, ty);
try f.writeCValue(writer, local, .Other);
@@ -6051,7 +6046,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor});
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6093,7 +6088,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor});
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6123,11 +6118,12 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
}
fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
- const inst_ty = f.air.typeOfIndex(inst);
- const ptr_ty = f.air.typeOf(pl_op.operand);
- const ty = ptr_ty.childType();
+ const inst_ty = f.typeOfIndex(inst);
+ const ptr_ty = f.typeOf(pl_op.operand);
+ const ty = ptr_ty.childType(mod);
const ptr = try f.resolveInst(pl_op.operand);
const operand = try f.resolveInst(extra.operand);
@@ -6135,14 +6131,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
+ const repr_bits = @intCast(u16, ty.abiSize(mod) * 8);
const is_float = ty.isRuntimeFloat();
- const is_128 = repr_pl.data == 128;
- const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty;
+ const is_128 = repr_bits == 128;
+ const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
const local = try f.allocLocal(inst, inst_ty);
try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())});
@@ -6158,7 +6150,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
if (use_atomic) try writer.writeAll("zig_atomic(");
try f.renderType(writer, ty);
if (use_atomic) try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6181,20 +6173,19 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const atomic_load = f.air.instructions.items(.data)[inst].atomic_load;
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
- const ptr_ty = f.air.typeOf(atomic_load.ptr);
- const ty = ptr_ty.childType();
+ const ptr_ty = f.typeOf(atomic_load.ptr);
+ const ty = ptr_ty.childType(mod);
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6203,7 +6194,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", (zig_atomic(");
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6218,9 +6209,10 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = f.air.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType();
+ const ptr_ty = f.typeOf(bin_op.lhs);
+ const ty = ptr_ty.childType(mod);
const ptr = try f.resolveInst(bin_op.lhs);
const element = try f.resolveInst(bin_op.rhs);
@@ -6228,17 +6220,15 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
const element_mat = try Materialize.start(f, inst, writer, ty, element);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
try writer.writeAll("zig_atomic_store((zig_atomic(");
try f.renderType(writer, ty);
try writer.writeByte(')');
- if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile");
+ if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile");
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
@@ -6254,7 +6244,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
}
fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void {
- if (ptr_ty.isSlice()) {
+ const mod = f.object.dg.module;
+ if (ptr_ty.isSlice(mod)) {
try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" });
} else {
try f.writeCValue(writer, ptr, .FunctionArgument);
@@ -6262,14 +6253,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo
}
fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const dest_ty = f.air.typeOf(bin_op.lhs);
+ const dest_ty = f.typeOf(bin_op.lhs);
const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs);
- const elem_ty = f.air.typeOf(bin_op.rhs);
- const target = f.object.dg.module.getTarget();
- const elem_abi_size = elem_ty.abiSize(target);
- const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const elem_ty = f.typeOf(bin_op.rhs);
+ const elem_abi_size = elem_ty.abiSize(mod);
+ const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
const writer = f.object.writer();
if (val_is_undef) {
@@ -6279,7 +6270,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", 0xaa, ");
@@ -6291,8 +6282,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
},
.One => {
- const array_ty = dest_ty.childType();
- const len = array_ty.arrayLen() * elem_abi_size;
+ const array_ty = dest_ty.childType(mod);
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.print(", 0xaa, {d});\n", .{len});
@@ -6303,32 +6294,33 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
- if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) {
+ if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) {
// For the assignment in this loop, the array pointer needs to get
// casted to a regular pointer, otherwise an error like this occurs:
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
- var elem_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = elem_ty,
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
+ const elem_ptr_ty = try mod.ptrType(.{
+ .child = elem_ty.ip_index,
+ .flags = .{
+ .size = .C,
+ },
+ });
const index = try f.allocLocal(inst, Type.usize);
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer);
+ try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
},
.One => {
- const array_ty = dest_ty.childType();
- try writer.print("{d}", .{array_ty.arrayLen()});
+ const array_ty = dest_ty.childType(mod);
+ try writer.print("{d}", .{array_ty.arrayLen(mod)});
},
.Many, .C => unreachable,
}
@@ -6357,7 +6349,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const bitcasted = try bitcast(f, Type.u8, value, elem_ty);
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", ");
@@ -6367,8 +6359,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll(");\n");
},
.One => {
- const array_ty = dest_ty.childType();
- const len = array_ty.arrayLen() * elem_abi_size;
+ const array_ty = dest_ty.childType(mod);
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.writeAll(", ");
@@ -6383,12 +6375,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try f.resolveInst(bin_op.lhs);
const src_ptr = try f.resolveInst(bin_op.rhs);
- const dest_ty = f.air.typeOf(bin_op.lhs);
- const src_ty = f.air.typeOf(bin_op.rhs);
- const target = f.object.dg.module.getTarget();
+ const dest_ty = f.typeOf(bin_op.lhs);
+ const src_ty = f.typeOf(bin_op.rhs);
const writer = f.object.writer();
try writer.writeAll("memcpy(");
@@ -6396,10 +6388,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try writeSliceOrPtr(f, writer, src_ptr, src_ty);
try writer.writeAll(", ");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
- const elem_ty = dest_ty.childType();
- const elem_abi_size = elem_ty.abiSize(target);
+ const elem_ty = dest_ty.childType(mod);
+ const elem_abi_size = elem_ty.abiSize(mod);
try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" });
if (elem_abi_size > 1) {
try writer.print(" * {d});\n", .{elem_abi_size});
@@ -6408,10 +6400,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
.One => {
- const array_ty = dest_ty.childType();
- const elem_ty = array_ty.childType();
- const elem_abi_size = elem_ty.abiSize(target);
- const len = array_ty.arrayLen() * elem_abi_size;
+ const array_ty = dest_ty.childType(mod);
+ const elem_ty = array_ty.childType(mod);
+ const elem_abi_size = elem_ty.abiSize(mod);
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try writer.print("{d});\n", .{len});
},
.Many, .C => unreachable,
@@ -6422,16 +6414,16 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const union_ptr = try f.resolveInst(bin_op.lhs);
const new_tag = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const target = f.object.dg.module.getTarget();
- const union_ty = f.air.typeOf(bin_op.lhs).childType();
- const layout = union_ty.unionGetLayout(target);
+ const union_ty = f.typeOf(bin_op.lhs).childType(mod);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return .none;
- const tag_ty = union_ty.unionTagTypeSafety().?;
+ const tag_ty = union_ty.unionTagTypeSafety(mod).?;
const writer = f.object.writer();
const a = try Assignment.start(f, writer, tag_ty);
@@ -6443,17 +6435,17 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const union_ty = f.air.typeOf(ty_op.operand);
- const target = f.object.dg.module.getTarget();
- const layout = union_ty.unionGetLayout(target);
+ const union_ty = f.typeOf(ty_op.operand);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return .none;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const a = try Assignment.start(f, writer, inst_ty);
@@ -6465,10 +6457,11 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const enum_ty = f.air.typeOf(un_op);
+ const inst_ty = f.typeOfIndex(inst);
+ const enum_ty = f.typeOf(un_op);
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
@@ -6476,7 +6469,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
try writer.print(" = {s}(", .{
- try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl() }, .{ .tag_name = enum_ty }),
+ try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(mod) }, .{ .tag_name = enum_ty }),
});
try f.writeCValue(writer, operand, .Other);
try writer.writeAll(");\n");
@@ -6488,7 +6481,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const local = try f.allocLocal(inst, inst_ty);
@@ -6501,13 +6494,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6532,7 +6526,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
const rhs = try f.resolveInst(extra.rhs);
try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6555,41 +6549,31 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- const mask = f.air.values[extra.mask];
+ const mask = extra.mask.toValue();
const lhs = try f.resolveInst(extra.a);
const rhs = try f.resolveInst(extra.b);
- const module = f.object.dg.module;
- const target = module.getTarget();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands
for (0..extra.mask_len) |index| {
- var dst_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = @intCast(u64, index),
- };
-
try f.writeCValue(writer, local, .Other);
try writer.writeByte('[');
- try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other);
+ try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other);
try writer.writeAll("] = ");
- var buf: Value.ElemValueBuffer = undefined;
- const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target);
- var src_pl = Value.Payload.U64{
- .base = .{ .tag = .int_u64 },
- .data = @intCast(u64, mask_elem ^ mask_elem >> 63),
- };
+ const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
+ const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63));
try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
try writer.writeByte('[');
- try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other);
+ try f.object.dg.renderValue(writer, Type.usize, src_val, .Other);
try writer.writeAll("];\n");
}
@@ -6597,16 +6581,16 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const reduce = f.air.instructions.items(.data)[inst].reduce;
- const target = f.object.dg.module.getTarget();
- const scalar_ty = f.air.typeOfIndex(inst);
+ const scalar_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(reduce.operand);
try reap(f, inst, &.{reduce.operand});
- const operand_ty = f.air.typeOf(reduce.operand);
+ const operand_ty = f.typeOf(reduce.operand);
const writer = f.object.writer();
- const use_operator = scalar_ty.bitSize(target) <= 64;
+ const use_operator = scalar_ty.bitSize(mod) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
float_op: Func,
@@ -6617,28 +6601,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
.Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
.Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
- .Min => switch (scalar_ty.zigTypeTag()) {
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .ternary = " < " } else .{
.builtin = .{ .operation = "min" },
},
.Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .ternary = " > " } else .{
.builtin = .{ .operation = "max" },
},
.Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag()) {
+ .Add => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .infix = " += " } else .{
.builtin = .{ .operation = "addw", .info = .bits },
},
.Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag()) {
+ .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .infix = " *= " } else .{
.builtin = .{ .operation = "mulw", .info = .bits },
},
@@ -6663,43 +6647,42 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
- defer arena.deinit();
-
- const ExpectedContents = union {
- u: Value.Payload.U64,
- i: Value.Payload.I64,
- f16: Value.Payload.Float_16,
- f32: Value.Payload.Float_32,
- f64: Value.Payload.Float_64,
- f80: Value.Payload.Float_80,
- f128: Value.Payload.Float_128,
- };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
- .Or, .Xor, .Add => Value.zero,
- .And => switch (scalar_ty.zigTypeTag()) {
- .Bool => Value.one,
- else => switch (scalar_ty.intInfo(target).signedness) {
- .unsigned => try scalar_ty.maxInt(stack.get(), target),
- .signed => Value.negative_one,
+ .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
+ .Bool => Value.false,
+ .Int => try mod.intValue(scalar_ty, 0),
+ else => unreachable,
+ },
+ .And => switch (scalar_ty.zigTypeTag(mod)) {
+ .Bool => Value.true,
+ .Int => switch (scalar_ty.intInfo(mod).signedness) {
+ .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty),
+ .signed => try mod.intValue(scalar_ty, -1),
},
+ else => unreachable,
+ },
+ .Add => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => try mod.intValue(scalar_ty, 0),
+ .Float => try mod.floatValue(scalar_ty, 0.0),
+ else => unreachable,
},
- .Min => switch (scalar_ty.zigTypeTag()) {
- .Bool => Value.one,
- .Int => try scalar_ty.maxInt(stack.get(), target),
- .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
+ .Mul => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => try mod.intValue(scalar_ty, 1),
+ .Float => try mod.floatValue(scalar_ty, 1.0),
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
- .Bool => Value.zero,
- .Int => try scalar_ty.minInt(stack.get(), target),
- .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
+ .Bool => Value.true,
+ .Int => try scalar_ty.maxIntScalar(mod, scalar_ty),
+ .Float => try mod.floatValue(scalar_ty, std.math.nan_f128),
+ else => unreachable,
+ },
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Bool => Value.false,
+ .Int => try scalar_ty.minIntScalar(mod, scalar_ty),
+ .Float => try mod.floatValue(scalar_ty, std.math.nan_f128),
else => unreachable,
},
- .Mul => Value.one,
}, .Initializer);
try writer.writeAll(";\n");
@@ -6753,9 +6736,11 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
+ const ip = &mod.intern_pool;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
- const inst_ty = f.air.typeOfIndex(inst);
- const len = @intCast(usize, inst_ty.arrayLen());
+ const inst_ty = f.typeOfIndex(inst);
+ const len = @intCast(usize, inst_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
@@ -6770,13 +6755,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
- const target = f.object.dg.module.getTarget();
-
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- switch (inst_ty.zigTypeTag()) {
+ switch (inst_ty.zigTypeTag(mod)) {
.Array, .Vector => {
- const elem_ty = inst_ty.childType();
+ const elem_ty = inst_ty.childType(mod);
const a = try Assignment.init(f, elem_ty);
for (resolved_elements, 0..) |element, i| {
try a.restart(f, writer);
@@ -6786,7 +6769,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
}
- if (inst_ty.sentinel()) |sentinel| {
+ if (inst_ty.sentinel(mod)) |sentinel| {
try a.restart(f, writer);
try f.writeCValue(writer, local, .Other);
try writer.print("[{d}]", .{resolved_elements.len});
@@ -6795,17 +6778,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try a.end(f, writer);
}
},
- .Struct => switch (inst_ty.containerLayout()) {
+ .Struct => switch (inst_ty.containerLayout(mod)) {
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
- if (inst_ty.structFieldIsComptime(field_i)) continue;
- const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = inst_ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const a = try Assignment.start(f, writer, field_ty);
- try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
+ try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
.{ .field = field_i }
else
- .{ .identifier = inst_ty.structFieldName(field_i) });
+ .{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) });
try a.assign(f, writer);
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
@@ -6813,22 +6796,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.Packed => {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- const int_info = inst_ty.intInfo(target);
+ const int_info = inst_ty.intInfo(mod);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 };
- const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
+ var bit_offset: u64 = 0;
var empty = true;
for (0..elements.len) |field_i| {
- if (inst_ty.structFieldIsComptime(field_i)) continue;
- const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = inst_ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) {
try writer.writeAll("zig_or_");
@@ -6839,9 +6817,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
empty = true;
for (resolved_elements, 0..) |element, field_i| {
- if (inst_ty.structFieldIsComptime(field_i)) continue;
- const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = inst_ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(", ");
// TODO: Skip this entire shift if val is 0?
@@ -6849,13 +6827,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
try writer.writeByte('(');
- if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
+ if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) {
try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try writer.writeByte('(');
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
- if (field_ty.isPtrAtRuntime()) {
+ if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, switch (int_info.signedness) {
.unsigned => Type.usize,
@@ -6867,12 +6845,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.writeAll(", ");
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
- bit_offset_val_pl.data += field_ty.bitSize(target);
+ bit_offset += field_ty.bitSize(mod);
empty = false;
}
@@ -6886,14 +6865,15 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
+ const ip = &mod.intern_pool;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data;
- const union_ty = f.air.typeOfIndex(inst);
- const target = f.object.dg.module.getTarget();
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
+ const union_ty = f.typeOfIndex(inst);
+ const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.fields.keys()[extra.field_index];
- const payload_ty = f.air.typeOf(extra.init);
+ const payload_ty = f.typeOf(extra.init);
const payload = try f.resolveInst(extra.init);
try reap(f, inst, &.{extra.init});
@@ -6907,19 +6887,14 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
- const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: {
- const layout = union_ty.unionGetLayout(target);
+ const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: {
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
- const field_index = tag_ty.enumFieldIndex(field_name).?;
+ const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
- var tag_pl: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- const tag_val = Value.initPayload(&tag_pl.base);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
- var int_pl: Value.Payload.U64 = undefined;
- const int_val = tag_val.enumToInt(tag_ty, &int_pl);
+ const int_val = try tag_val.enumToInt(tag_ty, mod);
const a = try Assignment.start(f, writer, tag_ty);
try f.writeCValueMember(writer, local, .{ .identifier = "tag" });
@@ -6927,8 +6902,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)});
try a.end(f, writer);
}
- break :field .{ .payload_identifier = field_name };
- } else .{ .identifier = field_name };
+ break :field .{ .payload_identifier = ip.stringToSlice(field_name) };
+ } else .{ .identifier = ip.stringToSlice(field_name) };
const a = try Assignment.start(f, writer, payload_ty);
try f.writeCValueMember(writer, local, field);
@@ -6963,7 +6938,7 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
@@ -6977,7 +6952,7 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const writer = f.object.writer();
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const operand = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
const local = try f.allocLocal(inst, inst_ty);
@@ -6991,13 +6966,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const operand_ty = f.air.typeOf(un_op);
- const scalar_ty = operand_ty.scalarType();
+ const operand_ty = f.typeOf(un_op);
+ const scalar_ty = operand_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, operand_ty);
@@ -7016,13 +6992,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7043,14 +7020,15 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
}
fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7074,6 +7052,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
}
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7082,8 +7061,8 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
const addend = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
- const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_ty = f.typeOfIndex(inst);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7108,7 +7087,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete);
const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len;
@@ -7127,7 +7106,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const va_list = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -7158,7 +7137,7 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue {
fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = f.air.typeOfIndex(inst);
+ const inst_ty = f.typeOfIndex(inst);
const va_list = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -7279,8 +7258,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 {
};
}
-fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
- return if (ty.isInt()) switch (ty.intInfo(target).bits) {
+fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 {
+ const target = mod.getTarget();
+ return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) {
1...32 => "si",
33...64 => "di",
65...128 => "ti",
@@ -7407,7 +7387,7 @@ fn undefPattern(comptime IntType: type) IntType {
const FormatIntLiteralContext = struct {
dg: *DeclGen,
- int_info: std.builtin.Type.Int,
+ int_info: InternPool.Key.IntType,
kind: CType.Kind,
cty: CType,
val: Value,
@@ -7418,7 +7398,8 @@ fn formatIntLiteral(
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const target = data.dg.module.getTarget();
+ const mod = data.dg.module;
+ const target = mod.getTarget();
const ExpectedContents = struct {
const base = 10;
@@ -7438,7 +7419,7 @@ fn formatIntLiteral(
defer allocator.free(undef_limbs);
var int_buf: Value.BigIntSpace = undefined;
- const int = if (data.val.isUndefDeep()) blk: {
+ const int = if (data.val.isUndefDeep(mod)) blk: {
undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
@memset(undef_limbs, undefPattern(BigIntLimb));
@@ -7449,7 +7430,7 @@ fn formatIntLiteral(
};
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
- } else data.val.toBigInt(&int_buf, target);
+ } else data.val.toBigInt(&int_buf, mod);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
@@ -7576,10 +7557,6 @@ fn formatIntLiteral(
c_limb_int_info.signedness = .unsigned;
c_limb_cty = c_limb_info.cty;
}
- var c_limb_val_pl = Value.Payload.BigInt{
- .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative },
- .data = c_limb_mut.limbs[0..c_limb_mut.len],
- };
if (limb_offset > 0) try writer.writeAll(", ");
try formatIntLiteral(.{
@@ -7587,7 +7564,7 @@ fn formatIntLiteral(
.int_info = c_limb_int_info,
.kind = data.kind,
.cty = c_limb_cty,
- .val = Value.initPayload(&c_limb_val_pl.base),
+ .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()),
}, fmt, options, writer);
}
}
@@ -7684,20 +7661,21 @@ const Vectorize = struct {
index: CValue = .none,
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
- return if (ty.zigTypeTag() == .Vector) index: {
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
+ const mod = f.object.dg.module;
+ return if (ty.zigTypeTag(mod) == .Vector) index: {
+ const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod));
const local = try f.allocLocal(inst, Type.usize);
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
- try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))});
try f.writeCValue(writer, local, .Other);
try writer.print(" < {d}; ", .{
- try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)),
+ try f.fmtIntLiteral(Type.usize, len_val),
});
try f.writeCValue(writer, local, .Other);
- try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)});
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };
@@ -7721,34 +7699,30 @@ const Vectorize = struct {
}
};
-const LowerFnRetTyBuffer = struct {
- names: [1][]const u8,
- types: [1]Type,
- values: [1]Value,
- payload: Type.Payload.AnonStruct,
-};
-fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type {
- if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn);
-
- if (lowersToArray(ret_ty, target)) {
- buffer.names = [1][]const u8{"array"};
- buffer.types = [1]Type{ret_ty};
- buffer.values = [1]Value{Value.initTag(.unreachable_value)};
- buffer.payload = .{ .data = .{
- .names = &buffer.names,
- .types = &buffer.types,
- .values = &buffer.values,
- } };
- return Type.initPayload(&buffer.payload.base);
+fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
+ if (ret_ty.ip_index == .noreturn_type) return Type.noreturn;
+
+ if (lowersToArray(ret_ty, mod)) {
+ const names = [1]InternPool.NullTerminatedString{
+ try mod.intern_pool.getOrPutString(mod.gpa, "array"),
+ };
+ const types = [1]InternPool.Index{ret_ty.ip_index};
+ const values = [1]InternPool.Index{.none};
+ const interned = try mod.intern(.{ .anon_struct_type = .{
+ .names = &names,
+ .types = &types,
+ .values = &values,
+ } });
+ return interned.toType();
}
- return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void;
+ return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
}
-fn lowersToArray(ty: Type, target: std.Target) bool {
- return switch (ty.zigTypeTag()) {
+fn lowersToArray(ty: Type, mod: *Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null,
+ else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
};
}
@@ -7765,8 +7739,8 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi
fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void {
const ref_inst = Air.refToIndex(ref) orelse return;
+ assert(f.air.instructions.items(.tag)[ref_inst] != .interned);
const c_value = (f.value_map.fetchRemove(ref_inst) orelse return).value;
- if (f.air.instructions.items(.tag)[ref_inst] == .constant) return;
const local_index = switch (c_value) {
.local, .new_local => |l| l,
else => return,
diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig
index 892914ea3d..81ca1dd80d 100644
--- a/src/codegen/c/type.zig
+++ b/src/codegen/c/type.zig
@@ -292,19 +292,19 @@ pub const CType = extern union {
.abi = std.math.log2_int(u32, abi_alignment),
};
}
- pub fn abiAlign(ty: Type, target: Target) AlignAs {
- const abi_align = ty.abiAlignment(target);
+ pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
+ const abi_align = ty.abiAlignment(mod);
return init(abi_align, abi_align);
}
- pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs {
+ pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
return init(
- struct_ty.structFieldAlign(field_i, target),
- struct_ty.structFieldType(field_i).abiAlignment(target),
+ struct_ty.structFieldAlign(field_i, mod),
+ struct_ty.structFieldType(field_i, mod).abiAlignment(mod),
);
}
- pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs {
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const union_payload_align = union_obj.abiAlignment(target, false);
+ pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
+ const union_obj = mod.typeToUnion(union_ty).?;
+ const union_payload_align = union_obj.abiAlignment(mod, false);
return init(union_payload_align, union_payload_align);
}
@@ -344,8 +344,8 @@ pub const CType = extern union {
return self.map.entries.items(.hash)[index - Tag.no_payload_count];
}
- pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index {
- const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } };
+ pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index {
+ const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } };
var convert: Convert = undefined;
convert.initType(ty, kind, lookup) catch unreachable;
@@ -405,7 +405,7 @@ pub const CType = extern union {
);
if (!gop.found_existing) {
errdefer _ = self.set.map.pop();
- gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert);
+ gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert);
}
if (std.debug.runtime_safety) {
const adapter = TypeAdapter64{
@@ -879,7 +879,7 @@ pub const CType = extern union {
.pointer_const,
.pointer_volatile,
.pointer_const_volatile,
- => @divExact(target.cpu.arch.ptrBitWidth(), 8),
+ => @divExact(target.ptrBitWidth(), 8),
.uint16_t, .int16_t, .zig_f16 => 2,
.uint32_t, .int32_t, .zig_f32 => 4,
.uint64_t, .int64_t, .zig_f64 => 8,
@@ -1236,10 +1236,10 @@ pub const CType = extern union {
}
pub const Lookup = union(enum) {
- fail: Target,
+ fail: *Module,
imm: struct {
set: *const Store.Set,
- target: Target,
+ mod: *Module,
},
mut: struct {
promoted: *Store.Promoted,
@@ -1254,10 +1254,14 @@ pub const CType = extern union {
}
pub fn getTarget(self: @This()) Target {
+ return self.getModule().getTarget();
+ }
+
+ pub fn getModule(self: @This()) *Module {
return switch (self) {
- .fail => |target| target,
- .imm => |imm| imm.target,
- .mut => |mut| mut.mod.getTarget(),
+ .fail => |mod| mod,
+ .imm => |imm| imm.mod,
+ .mut => |mut| mut.mod,
};
}
@@ -1272,7 +1276,7 @@ pub const CType = extern union {
pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index {
return switch (self) {
.fail => null,
- .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind),
+ .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind),
.mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind),
};
}
@@ -1284,7 +1288,7 @@ pub const CType = extern union {
pub fn freeze(self: @This()) @This() {
return switch (self) {
.fail, .imm => self,
- .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } },
+ .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } },
};
}
};
@@ -1292,7 +1296,7 @@ pub const CType = extern union {
fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field {
const Field = Payload.Fields.Field;
const slice = self.storage.anon.fields[0..fields_len];
- std.sort.sort(Field, slice, {}, struct {
+ mem.sort(Field, slice, {}, struct {
fn before(_: void, lhs: Field, rhs: Field) bool {
return lhs.alignas.@"align" > rhs.alignas.@"align";
}
@@ -1338,7 +1342,7 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "array",
.type = array_idx,
- .alignas = AlignAs.abiAlign(ty, lookup.getTarget()),
+ .alignas = AlignAs.abiAlign(ty, lookup.getModule()),
};
self.initAnon(kind, fwd_idx, 1);
} else self.init(switch (kind) {
@@ -1350,30 +1354,30 @@ pub const CType = extern union {
}
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
- const target = lookup.getTarget();
+ const mod = lookup.getModule();
self.* = undefined;
- if (!ty.isFnOrHasRuntimeBitsIgnoreComptime())
+ if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
self.init(.void)
- else if (ty.isAbiInt()) switch (ty.tag()) {
- .usize => self.init(.uintptr_t),
- .isize => self.init(.intptr_t),
- .c_char => self.init(.char),
- .c_short => self.init(.short),
- .c_ushort => self.init(.@"unsigned short"),
- .c_int => self.init(.int),
- .c_uint => self.init(.@"unsigned int"),
- .c_long => self.init(.long),
- .c_ulong => self.init(.@"unsigned long"),
- .c_longlong => self.init(.@"long long"),
- .c_ulonglong => self.init(.@"unsigned long long"),
- else => switch (tagFromIntInfo(ty.intInfo(target))) {
+ else if (ty.isAbiInt(mod)) switch (ty.ip_index) {
+ .usize_type => self.init(.uintptr_t),
+ .isize_type => self.init(.intptr_t),
+ .c_char_type => self.init(.char),
+ .c_short_type => self.init(.short),
+ .c_ushort_type => self.init(.@"unsigned short"),
+ .c_int_type => self.init(.int),
+ .c_uint_type => self.init(.@"unsigned int"),
+ .c_long_type => self.init(.long),
+ .c_ulong_type => self.init(.@"unsigned long"),
+ .c_longlong_type => self.init(.@"long long"),
+ .c_ulonglong_type => self.init(.@"unsigned long long"),
+ else => switch (tagFromIntInfo(ty.intInfo(mod))) {
.void => unreachable,
else => |t| self.init(t),
.array => switch (kind) {
.forward, .complete, .global => {
- const abi_size = ty.abiSize(target);
- const abi_align = ty.abiAlignment(target);
+ const abi_size = ty.abiSize(mod);
+ const abi_align = ty.abiAlignment(mod);
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
@@ -1389,7 +1393,7 @@ pub const CType = extern union {
.payload => unreachable,
},
},
- } else switch (ty.zigTypeTag()) {
+ } else switch (ty.zigTypeTag(mod)) {
.Frame => unreachable,
.AnyFrame => unreachable,
@@ -1408,18 +1412,18 @@ pub const CType = extern union {
.Bool => self.init(.bool),
- .Float => self.init(switch (ty.tag()) {
- .f16 => .zig_f16,
- .f32 => .zig_f32,
- .f64 => .zig_f64,
- .f80 => .zig_f80,
- .f128 => .zig_f128,
- .c_longdouble => .zig_c_longdouble,
+ .Float => self.init(switch (ty.ip_index) {
+ .f16_type => .zig_f16,
+ .f32_type => .zig_f32,
+ .f64_type => .zig_f64,
+ .f80_type => .zig_f80,
+ .f128_type => .zig_f128,
+ .c_longdouble_type => .zig_c_longdouble,
else => unreachable,
}),
.Pointer => {
- const info = ty.ptrInfo().data;
+ const info = ty.ptrInfo(mod);
switch (info.size) {
.Slice => {
if (switch (kind) {
@@ -1427,19 +1431,18 @@ pub const CType = extern union {
.complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
.payload => unreachable,
}) |fwd_idx| {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&buf);
+ const ptr_ty = ty.slicePtrFieldType(mod);
if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| {
self.storage = .{ .anon = undefined };
self.storage.anon.fields[0] = .{
.name = "ptr",
.type = ptr_idx,
- .alignas = AlignAs.abiAlign(ptr_ty, target),
+ .alignas = AlignAs.abiAlign(ptr_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "len",
.type = Tag.uintptr_t.toIndex(),
- .alignas = AlignAs.abiAlign(Type.usize, target),
+ .alignas = AlignAs.abiAlign(Type.usize, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1462,16 +1465,12 @@ pub const CType = extern union {
},
};
- var host_int_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = info.host_size * 8,
- };
const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
- Type.initPayload(&host_int_pl.base)
+ try mod.intType(.unsigned, info.host_size * 8)
else
info.pointee_type;
- if (if (info.size == .C and pointee_ty.tag() == .u8)
+ if (if (info.size == .C and pointee_ty.ip_index == .u8_type)
Tag.char.toIndex()
else
try lookup.typeToIndex(pointee_ty, .forward)) |child_idx|
@@ -1486,26 +1485,24 @@ pub const CType = extern union {
}
},
- .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) {
- if (ty.castTag(.@"struct")) |struct_obj| {
- try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
+ .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
+ if (mod.typeToStruct(ty)) |struct_obj| {
+ try self.initType(struct_obj.backing_int_ty, kind, lookup);
} else {
- var buf: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.bitSize(target)),
- };
- try self.initType(Type.initPayload(&buf.base), kind, lookup);
+ const bits = @intCast(u16, ty.bitSize(mod));
+ const int_ty = try mod.intType(.unsigned, bits);
+ try self.initType(int_ty, kind, lookup);
}
- } else if (ty.isTupleOrAnonStruct()) {
+ } else if (ty.isTupleOrAnonStruct(mod)) {
if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(),
- .Union => ty.unionFields().count(),
+ .Struct => ty.structFieldCount(mod),
+ .Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
@@ -1533,14 +1530,14 @@ pub const CType = extern union {
.payload => unreachable,
});
} else {
- const tag_ty = ty.unionTagTypeSafety();
+ const tag_ty = ty.unionTagTypeSafety(mod);
const is_tagged_union_wrapper = kind != .payload and tag_ty != null;
const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper;
switch (kind) {
.forward, .forward_parameter => {
self.storage = .{ .fwd = .{
.base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union },
- .data = ty.getOwnerDecl(),
+ .data = ty.getOwnerDecl(mod),
} };
self.value = .{ .cty = initPayload(&self.storage.fwd) };
},
@@ -1555,7 +1552,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "payload",
.type = payload_idx.?,
- .alignas = AlignAs.unionPayloadAlign(ty, target),
+ .alignas = AlignAs.unionPayloadAlign(ty, mod),
};
field_count += 1;
}
@@ -1563,7 +1560,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "tag",
.type = tag_idx.?,
- .alignas = AlignAs.abiAlign(tag_ty.?, target),
+ .alignas = AlignAs.abiAlign(tag_ty.?, mod),
};
field_count += 1;
}
@@ -1576,19 +1573,19 @@ pub const CType = extern union {
} };
self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
} else self.init(.@"struct");
- } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) {
+ } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) {
self.init(.void);
} else {
var is_packed = false;
for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(),
- .Union => ty.unionFields().count(),
+ .Struct => ty.structFieldCount(mod),
+ .Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = AlignAs.fieldAlign(ty, field_i, target);
+ const field_align = AlignAs.fieldAlign(ty, field_i, mod);
if (field_align.@"align" < field_align.abi) {
is_packed = true;
if (!lookup.isMutable()) break;
@@ -1627,9 +1624,9 @@ pub const CType = extern union {
.Vector => .vector,
else => unreachable,
};
- if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| {
+ if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| {
self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{
- .len = ty.arrayLenIncludingSentinel(),
+ .len = ty.arrayLenIncludingSentinel(mod),
.elem_type = child_idx,
} } };
self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1641,10 +1638,9 @@ pub const CType = extern union {
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
- if (ty.optionalReprIsPayload()) {
+ const payload_ty = ty.optionalChild(mod);
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (ty.optionalReprIsPayload(mod)) {
try self.initType(payload_ty, kind, lookup);
} else if (switch (kind) {
.forward, .forward_parameter => @as(Index, undefined),
@@ -1661,12 +1657,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, target),
+ .alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "is_null",
.type = Tag.bool.toIndex(),
- .alignas = AlignAs.abiAlign(Type.bool, target),
+ .alignas = AlignAs.abiAlign(Type.bool, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1684,14 +1680,14 @@ pub const CType = extern union {
.complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
.payload => unreachable,
}) |fwd_idx| {
- const payload_ty = ty.errorUnionPayload();
+ const payload_ty = ty.errorUnionPayload(mod);
if (try lookup.typeToIndex(payload_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
.global => .global,
.payload => unreachable,
})) |payload_idx| {
- const error_ty = ty.errorUnionSet();
+ const error_ty = ty.errorUnionSet(mod);
if (payload_idx == Tag.void.toIndex()) {
try self.initType(error_ty, kind, lookup);
} else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| {
@@ -1699,12 +1695,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, target),
+ .alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "error",
.type = error_idx,
- .alignas = AlignAs.abiAlign(error_ty, target),
+ .alignas = AlignAs.abiAlign(error_ty, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1723,7 +1719,7 @@ pub const CType = extern union {
.Opaque => self.init(.void),
.Fn => {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
@@ -1731,10 +1727,10 @@ pub const CType = extern union {
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
- _ = try lookup.typeToIndex(info.return_type, param_kind);
+ _ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
- _ = try lookup.typeToIndex(param_type, param_kind);
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ _ = try lookup.typeToIndex(param_type.toType(), param_kind);
}
}
self.init(if (info.is_var_args) .varargs_function else .function);
@@ -1900,16 +1896,16 @@ pub const CType = extern union {
}
}
- fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType {
+ fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType {
var convert: Convert = undefined;
- try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } });
- return createFromConvert(store, ty, target, kind, &convert);
+ try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
+ return createFromConvert(store, ty, mod, kind, &convert);
}
fn createFromConvert(
store: *Store.Promoted,
ty: Type,
- target: Target,
+ mod: *Module,
kind: Kind,
convert: Convert,
) !CType {
@@ -1930,44 +1926,44 @@ pub const CType = extern union {
.packed_struct,
.packed_union,
=> {
- const zig_ty_tag = ty.zigTypeTag();
+ const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(),
- .Union => ty.unionFields().count(),
+ .Struct => ty.structFieldCount(mod),
+ .Union => ty.unionFields(mod).count(),
else => unreachable,
};
var c_fields_len: usize = 0;
for (0..fields_len) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_fields_len += 1;
}
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0;
for (0..fields_len) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
fields_pl[c_field_i] = .{
- .name = try if (ty.isSimpleTuple())
+ .name = try if (ty.isSimpleTuple(mod))
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
- arena.dupeZ(u8, switch (zig_ty_tag) {
- .Struct => ty.structFieldName(field_i),
- .Union => ty.unionFields().keys()[field_i],
+ arena.dupeZ(u8, mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
+ .Struct => ty.structFieldName(field_i, mod),
+ .Union => ty.unionFields(mod).keys()[field_i],
else => unreachable,
- }),
- .type = store.set.typeToIndex(field_ty, target, switch (kind) {
+ })),
+ .type = store.set.typeToIndex(field_ty, mod, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
- .alignas = AlignAs.fieldAlign(ty, field_i, target),
+ .alignas = AlignAs.fieldAlign(ty, field_i, mod),
};
}
@@ -1988,8 +1984,8 @@ pub const CType = extern union {
const unnamed_pl = try arena.create(Payload.Unnamed);
unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
- .owner_decl = ty.getOwnerDecl(),
- .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable,
+ .owner_decl = ty.getOwnerDecl(mod),
+ .id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable,
} };
return initPayload(unnamed_pl);
},
@@ -2004,7 +2000,7 @@ pub const CType = extern union {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
- .fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
+ .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?,
} };
return initPayload(struct_pl);
},
@@ -2016,7 +2012,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
@@ -2026,21 +2022,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
- params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?,
+ .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@@ -2067,33 +2063,33 @@ pub const CType = extern union {
}
pub fn eql(self: @This(), ty: Type, cty: CType) bool {
+ const mod = self.lookup.getModule();
switch (self.convert.value) {
.cty => |c| return c.eql(cty),
.tag => |t| {
if (t != cty.tag()) return false;
- const target = self.lookup.getTarget();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
=> {
- if (!ty.isTupleOrAnonStruct()) return false;
+ if (!ty.isTupleOrAnonStruct(mod)) return false;
var name_buf: [
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
const c_fields = cty.cast(Payload.Fields).?.data;
- const zig_ty_tag = ty.zigTypeTag();
+ const zig_ty_tag = ty.zigTypeTag(mod);
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
- .Struct => ty.structFieldCount(),
- .Union => ty.unionFields().count(),
+ .Struct => ty.structFieldCount(mod),
+ .Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
const c_field = &c_fields[c_field_i];
@@ -2105,15 +2101,16 @@ pub const CType = extern union {
.payload => unreachable,
}) or !mem.eql(
u8,
- if (ty.isSimpleTuple())
- std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
- else switch (zig_ty_tag) {
- .Struct => ty.structFieldName(field_i),
- .Union => ty.unionFields().keys()[field_i],
- else => unreachable,
- },
+ if (ty.isSimpleTuple(mod))
+ std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable
+ else
+ mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
+ .Struct => ty.structFieldName(field_i, mod),
+ .Union => ty.unionFields(mod).keys()[field_i],
+ else => unreachable,
+ }),
mem.span(c_field.name),
- ) or AlignAs.fieldAlign(ty, field_i, target).@"align" !=
+ ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" !=
c_field.alignas.@"align") return false;
}
return true;
@@ -2125,9 +2122,9 @@ pub const CType = extern union {
.packed_unnamed_union,
=> switch (self.kind) {
.forward, .forward_parameter, .complete, .parameter, .global => unreachable,
- .payload => if (ty.unionTagTypeSafety()) |_| {
+ .payload => if (ty.unionTagTypeSafety(mod)) |_| {
const data = cty.cast(Payload.Unnamed).?.data;
- return ty.getOwnerDecl() == data.owner_decl and data.id == 0;
+ return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0;
} else unreachable,
},
@@ -2146,9 +2143,9 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- if (ty.zigTypeTag() != .Fn) return false;
+ if (ty.zigTypeTag(mod) != .Fn) return false;
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const data = cty.cast(Payload.Function).?.data;
const param_kind: Kind = switch (self.kind) {
@@ -2157,18 +2154,18 @@ pub const CType = extern union {
.payload => unreachable,
};
- if (!self.eqlRecurse(info.return_type, data.return_type, param_kind))
+ if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind))
return false;
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
c_param_i += 1;
- if (!self.eqlRecurse(param_type, param_cty, param_kind))
+ if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind))
return false;
}
return c_param_i == data.param_types.len;
@@ -2202,7 +2199,7 @@ pub const CType = extern union {
.tag => |t| {
autoHash(hasher, t);
- const target = self.lookup.getTarget();
+ const mod = self.lookup.getModule();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2211,15 +2208,15 @@ pub const CType = extern union {
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
- const zig_ty_tag = ty.zigTypeTag();
- for (0..switch (ty.zigTypeTag()) {
- .Struct => ty.structFieldCount(),
- .Union => ty.unionFields().count(),
+ const zig_ty_tag = ty.zigTypeTag(mod);
+ for (0..switch (ty.zigTypeTag(mod)) {
+ .Struct => ty.structFieldCount(mod),
+ .Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
- const field_ty = ty.structFieldType(field_i);
- if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
.forward, .forward_parameter => .forward,
@@ -2227,14 +2224,15 @@ pub const CType = extern union {
.global => .global,
.payload => unreachable,
});
- hasher.update(if (ty.isSimpleTuple())
+ hasher.update(if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
- else switch (zig_ty_tag) {
- .Struct => ty.structFieldName(field_i),
- .Union => ty.unionFields().keys()[field_i],
- else => unreachable,
- });
- autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align");
+ else
+ mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
+ .Struct => ty.structFieldName(field_i, mod),
+ .Union => ty.unionFields(mod).keys()[field_i],
+ else => unreachable,
+ }));
+ autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
}
},
@@ -2244,8 +2242,8 @@ pub const CType = extern union {
.packed_unnamed_union,
=> switch (self.kind) {
.forward, .forward_parameter, .complete, .parameter, .global => unreachable,
- .payload => if (ty.unionTagTypeSafety()) |_| {
- autoHash(hasher, ty.getOwnerDecl());
+ .payload => if (ty.unionTagTypeSafety(mod)) |_| {
+ autoHash(hasher, ty.getOwnerDecl(mod));
autoHash(hasher, @as(u32, 0));
} else unreachable,
},
@@ -2261,7 +2259,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- const info = ty.fnInfo();
+ const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (self.kind) {
.forward, .forward_parameter => .forward_parameter,
@@ -2269,10 +2267,10 @@ pub const CType = extern union {
.payload => unreachable,
};
- self.updateHasherRecurse(hasher, info.return_type, param_kind);
+ self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
- self.updateHasherRecurse(hasher, param_type, param_kind);
+ if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
}
},
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index b46aae7718..11cd752000 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -12,6 +12,7 @@ const link = @import("../link.zig");
const Compilation = @import("../Compilation.zig");
const build_options = @import("build_options");
const Module = @import("../Module.zig");
+const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
const TypedValue = @import("../TypedValue.zig");
const Air = @import("../Air.zig");
@@ -361,15 +362,11 @@ pub const Object = struct {
decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
/// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
- /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
- /// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
- /// TODO we need to remove entries from this map in response to incremental compilation
- /// but I think the frontend won't tell us about types that get deleted because
- /// hasRuntimeBits() is false for types.
+ /// Maps Zig types to LLVM types. The table memory is backed by the GPA of
+ /// the compiler.
+ /// TODO when InternPool garbage collection is implemented, this map needs
+ /// to be garbage collected as well.
type_map: TypeMap,
- /// The backing memory for `type_map`. Periodically garbage collected after flush().
- /// The code for doing the periodical GC is not yet implemented.
- type_map_arena: std.heap.ArenaAllocator,
di_type_map: DITypeMap,
/// The LLVM global table which holds the names corresponding to Zig errors.
/// Note that the values are not added until flushModule, when all errors in
@@ -380,21 +377,11 @@ pub const Object = struct {
/// name collision.
extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void),
- pub const TypeMap = std.HashMapUnmanaged(
- Type,
- *llvm.Type,
- Type.HashContext64,
- std.hash_map.default_max_load_percentage,
- );
+ pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type);
/// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we
/// want to iterate over it while adding entries to it.
- pub const DITypeMap = std.ArrayHashMapUnmanaged(
- Type,
- AnnotatedDITypePtr,
- Type.HashContext32,
- true,
- );
+ pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr);
pub fn create(gpa: Allocator, options: link.Options) !*Object {
const obj = try gpa.create(Object);
@@ -542,7 +529,6 @@ pub const Object = struct {
.decl_map = .{},
.named_enum_map = .{},
.type_map = .{},
- .type_map_arena = std.heap.ArenaAllocator.init(gpa),
.di_type_map = .{},
.error_name_table = null,
.extern_collisions = .{},
@@ -562,7 +548,6 @@ pub const Object = struct {
self.decl_map.deinit(gpa);
self.named_enum_map.deinit(gpa);
self.type_map.deinit(gpa);
- self.type_map_arena.deinit();
self.extern_collisions.deinit(gpa);
self.* = undefined;
}
@@ -591,22 +576,22 @@ pub const Object = struct {
const target = mod.getTarget();
const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space
- const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
+ const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
const type_fields = [_]*llvm.Type{
llvm_ptr_ty,
llvm_usize_ty,
};
const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const slice_alignment = slice_ty.abiAlignment(target);
+ const slice_ty = Type.slice_const_u8_sentinel_0;
+ const slice_alignment = slice_ty.abiAlignment(mod);
- const error_name_list = mod.error_name_list.items;
+ const error_name_list = mod.global_error_set.keys();
const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len);
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
- for (llvm_errors[1..], 0..) |*llvm_error, i| {
- const name = error_name_list[1..][i];
+ for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| {
+ const name = mod.intern_pool.stringToSlice(name_nts);
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_global = self.llvm_module.addGlobal(str_init.typeOf(), "");
str_global.setInitializer(str_init);
@@ -686,7 +671,7 @@ pub const Object = struct {
const llvm_global = entry.value_ptr.*;
// Same logic as below but for externs instead of exports.
const decl = mod.declPtr(decl_index);
- const other_global = object.getLlvmGlobal(decl.name) orelse continue;
+ const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue;
if (other_global == llvm_global) continue;
llvm_global.replaceAllUsesWith(other_global);
@@ -702,12 +687,9 @@ pub const Object = struct {
for (export_list.items) |exp| {
// Detect if the LLVM global has already been created as an extern. In such
// case, we need to replace all uses of it with this exported global.
- // TODO update std.builtin.ExportOptions to have the name be a
- // null-terminated slice.
- const exp_name_z = try mod.gpa.dupeZ(u8, exp.options.name);
- defer mod.gpa.free(exp_name_z);
+ const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
- const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue;
+ const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue;
if (other_global == llvm_global) continue;
other_global.replaceAllUsesWith(llvm_global);
@@ -880,28 +862,29 @@ pub const Object = struct {
pub fn updateFunc(
o: *Object,
- module: *Module,
- func: *Module.Fn,
+ mod: *Module,
+ func_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
) !void {
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
- const decl = module.declPtr(decl_index);
- const target = module.getTarget();
+ const decl = mod.declPtr(decl_index);
+ const target = mod.getTarget();
var dg: DeclGen = .{
.context = o.context,
.object = o,
- .module = module,
+ .module = mod,
.decl_index = decl_index,
.decl = decl,
.err_msg = null,
- .gpa = module.gpa,
+ .gpa = mod.gpa,
};
const llvm_func = try dg.resolveLlvmFunction(decl_index);
- if (module.align_stack_fns.get(func)) |align_info| {
+ if (mod.align_stack_fns.get(func_index)) |align_info| {
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
dg.addFnAttr(llvm_func, "noinline");
} else {
@@ -922,7 +905,7 @@ pub const Object = struct {
}
// TODO: disable this if safety is off for the function scope
- const ssp_buf_size = module.comp.bin_file.options.stack_protector;
+ const ssp_buf_size = mod.comp.bin_file.options.stack_protector;
if (ssp_buf_size != 0) {
var buf: [12]u8 = undefined;
const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable;
@@ -931,15 +914,14 @@ pub const Object = struct {
}
// TODO: disable this if safety is off for the function scope
- if (module.comp.bin_file.options.stack_check) {
+ if (mod.comp.bin_file.options.stack_check) {
dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack");
} else if (target.os.tag == .uefi) {
dg.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
}
- if (decl.@"linksection") |section| {
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
llvm_func.setSection(section);
- }
// Remove all the basic blocks of a function in order to start over, generating
// LLVM IR from an empty function body.
@@ -953,18 +935,18 @@ pub const Object = struct {
builder.positionBuilderAtEnd(entry_block);
// This gets the LLVM values from the function and stores them in `dg.args`.
- const fn_info = decl.ty.fnInfo();
- const sret = firstParamSRet(fn_info, target);
+ const fn_info = mod.typeToFunc(decl.ty).?;
+ const sret = firstParamSRet(fn_info, mod);
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
const gpa = dg.gpa;
- if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) {
+ if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) {
.signed => dg.addAttr(llvm_func, 0, "signext"),
.unsigned => dg.addAttr(llvm_func, 0, "zeroext"),
};
- const err_return_tracing = fn_info.return_type.isError() and
- module.comp.bin_file.options.error_return_tracing;
+ const err_return_tracing = fn_info.return_type.toType().isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing;
const err_ret_trace = if (err_return_tracing)
llvm_func.getParam(@boolToInt(ret_ptr != null))
@@ -985,12 +967,12 @@ pub const Object = struct {
.byval => {
assert(!it.byval_attr);
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
const param = llvm_func.getParam(llvm_arg_i);
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = param.typeOf();
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
const store_inst = builder.buildStore(param, arg_ptr);
@@ -1004,17 +986,17 @@ pub const Object = struct {
llvm_arg_i += 1;
},
.byref => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
llvm_arg_i += 1;
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(param);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, param, "");
@@ -1023,17 +1005,17 @@ pub const Object = struct {
}
},
.byref_mut => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
dg.addArgAttr(llvm_func, llvm_arg_i, "noundef");
llvm_arg_i += 1;
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(param);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, param, "");
@@ -1043,15 +1025,15 @@ pub const Object = struct {
},
.abi_sized_int => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
const param_llvm_ty = try dg.lowerType(param_ty);
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
const int_llvm_ty = dg.context.intType(abi_size * 8);
const alignment = @max(
- param_ty.abiAlignment(target),
+ param_ty.abiAlignment(mod),
dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
@@ -1060,7 +1042,7 @@ pub const Object = struct {
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1070,15 +1052,15 @@ pub const Object = struct {
},
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
- const ptr_info = param_ty.ptrInfo().data;
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
dg.addArgAttr(llvm_func, llvm_arg_i, "noalias");
}
}
- if (param_ty.zigTypeTag() != .Optional) {
+ if (param_ty.zigTypeTag(mod) != .Optional) {
dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -1087,7 +1069,7 @@ pub const Object = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
}
const ptr_param = llvm_func.getParam(llvm_arg_i);
@@ -1103,9 +1085,9 @@ pub const Object = struct {
.multiple_llvm_types => {
assert(!it.byval_attr);
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
- const param_alignment = param_ty.abiAlignment(target);
+ const param_alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
for (field_types, 0..) |_, field_i_usize| {
@@ -1114,10 +1096,10 @@ pub const Object = struct {
llvm_arg_i += 1;
const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
const store_inst = builder.buildStore(param, field_ptr);
- store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ store_inst.setAlignment(target.ptrBitWidth() / 8);
}
- const is_by_ref = isByRef(param_ty);
+ const is_by_ref = isByRef(param_ty, mod);
const loaded = if (is_by_ref) arg_ptr else l: {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
load_inst.setAlignment(param_alignment);
@@ -1134,16 +1116,16 @@ pub const Object = struct {
args.appendAssumeCapacity(casted);
},
.float_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
_ = builder.buildStore(param, arg_ptr);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try args.append(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1152,16 +1134,16 @@ pub const Object = struct {
}
},
.i32_array, .i64_array => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
_ = builder.buildStore(param, arg_ptr);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try args.append(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1176,27 +1158,28 @@ pub const Object = struct {
var di_scope: ?*llvm.DIScope = null;
if (dg.object.di_builder) |dib| {
- di_file = try dg.object.getDIFile(gpa, decl.src_namespace.file_scope);
+ di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope);
const line_number = decl.src_line + 1;
- const is_internal_linkage = decl.val.tag() != .extern_fn and
- !module.decl_exports.contains(decl_index);
- const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
+ const is_internal_linkage = decl.val.getExternFunc(mod) == null and
+ !mod.decl_exports.contains(decl_index);
+ const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type)
llvm.DIFlags.NoReturn
else
0;
+ const decl_di_ty = try o.lowerDebugType(decl.ty, .full);
const subprogram = dib.createFunction(
di_file.?.toScope(),
- decl.name,
+ mod.intern_pool.stringToSlice(decl.name),
llvm_func.getValueName(),
di_file.?,
line_number,
- try o.lowerDebugType(decl.ty, .full),
+ decl_di_ty,
is_internal_linkage,
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember | noret_bit,
- module.comp.bin_file.options.optimize_mode != .Debug,
+ mod.comp.bin_file.options.optimize_mode != .Debug,
null, // decl_subprogram
);
try dg.object.di_map.put(gpa, decl, subprogram.toNode());
@@ -1219,7 +1202,7 @@ pub const Object = struct {
.func_inst_table = .{},
.llvm_func = llvm_func,
.blocks = .{},
- .single_threaded = module.comp.bin_file.options.single_threaded,
+ .single_threaded = mod.comp.bin_file.options.single_threaded,
.di_scope = di_scope,
.di_file = di_file,
.base_line = dg.decl.src_line,
@@ -1232,14 +1215,14 @@ pub const Object = struct {
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?);
+ try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
- try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
+ try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void {
@@ -1275,63 +1258,72 @@ pub const Object = struct {
pub fn updateDeclExports(
self: *Object,
- module: *Module,
+ mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const gpa = mod.gpa;
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
const llvm_global = self.decl_map.get(decl_index) orelse return;
- const decl = module.declPtr(decl_index);
- if (decl.isExtern()) {
- const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction();
- const mangle_name = is_wasm_fn and
- decl.getExternFn().?.lib_name != null and
- !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c");
- const decl_name = if (mangle_name) name: {
- const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? });
- break :name tmp.ptr;
- } else decl.name;
- defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0));
+ const decl = mod.declPtr(decl_index);
+ if (decl.isExtern(mod)) {
+ var free_decl_name = false;
+ const decl_name = decl_name: {
+ const decl_name = mod.intern_pool.stringToSlice(decl.name);
+
+ if (mod.getTarget().isWasm() and try decl.isFunction(mod)) {
+ if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
+ if (!std.mem.eql(u8, lib_name, "c")) {
+ free_decl_name = true;
+ break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{
+ decl_name, lib_name,
+ });
+ }
+ }
+ }
+
+ break :decl_name decl_name;
+ };
+ defer if (free_decl_name) gpa.free(decl_name);
llvm_global.setValueName(decl_name);
if (self.getLlvmGlobal(decl_name)) |other_global| {
if (other_global != llvm_global) {
- log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name});
- try self.extern_collisions.put(module.gpa, decl_index, {});
+ try self.extern_collisions.put(gpa, decl_index, {});
}
}
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
if (self.di_map.get(decl)) |di_node| {
- if (try decl.isFunction()) {
+ if (try decl.isFunction(mod)) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
- const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name));
+ const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
di_func.replaceLinkageName(linkage_name);
} else {
const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node);
- const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name));
+ const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
di_global.replaceLinkageName(linkage_name);
}
}
- if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) {
+ if (decl.val.getVariable(mod)) |variable| {
+ if (variable.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.data.is_weak_linkage) {
+ if (variable.is_weak_linkage) {
llvm_global.setLinkage(.ExternalWeak);
}
}
} else if (exports.len != 0) {
- const exp_name = exports[0].options.name;
+ const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name);
llvm_global.setValueName2(exp_name.ptr, exp_name.len);
llvm_global.setUnnamedAddr(.False);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
if (self.di_map.get(decl)) |di_node| {
- if (try decl.isFunction()) {
+ if (try decl.isFunction(mod)) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
di_func.replaceLinkageName(linkage_name);
@@ -1341,37 +1333,34 @@ pub const Object = struct {
di_global.replaceLinkageName(linkage_name);
}
}
- switch (exports[0].options.linkage) {
+ switch (exports[0].opts.linkage) {
.Internal => unreachable,
.Strong => llvm_global.setLinkage(.External),
.Weak => llvm_global.setLinkage(.WeakODR),
.LinkOnce => llvm_global.setLinkage(.LinkOnceODR),
}
- switch (exports[0].options.visibility) {
+ switch (exports[0].opts.visibility) {
.default => llvm_global.setVisibility(.Default),
.hidden => llvm_global.setVisibility(.Hidden),
.protected => llvm_global.setVisibility(.Protected),
}
- if (exports[0].options.section) |section| {
- const section_z = try module.gpa.dupeZ(u8, section);
- defer module.gpa.free(section_z);
- llvm_global.setSection(section_z);
+ if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| {
+ llvm_global.setSection(section);
}
- if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) {
+ if (decl.val.getVariable(mod)) |variable| {
+ if (variable.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
}
}
// If a Decl is exported more than one time (which is rare),
// we add aliases for all but the first export.
- // TODO LLVM C API does not support deleting aliases. We need to
- // patch it to support this or figure out how to wrap the C++ API ourselves.
+ // TODO LLVM C API does not support deleting aliases.
+ // The planned solution to this is https://github.com/ziglang/zig/issues/13265
// Until then we iterate over existing aliases and make them point
// to the correct decl, or otherwise add a new alias. Old aliases are leaked.
for (exports[1..]) |exp| {
- const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name);
- defer module.gpa.free(exp_name_z);
+ const exp_name_z = mod.intern_pool.stringToSlice(exp.opts.name);
if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
alias.setAliasee(llvm_global);
@@ -1385,15 +1374,14 @@ pub const Object = struct {
}
}
} else {
- const fqn = try decl.getFullyQualifiedName(module);
- defer module.gpa.free(fqn);
+ const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
llvm_global.setValueName2(fqn.ptr, fqn.len);
llvm_global.setLinkage(.Internal);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
llvm_global.setUnnamedAddr(.True);
- if (decl.val.castTag(.variable)) |variable| {
- const single_threaded = module.comp.bin_file.options.single_threaded;
- if (variable.data.is_threadlocal and !single_threaded) {
+ if (decl.val.getVariable(mod)) |variable| {
+ const single_threaded = mod.comp.bin_file.options.single_threaded;
+ if (variable.is_threadlocal and !single_threaded) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
@@ -1444,7 +1432,7 @@ pub const Object = struct {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
- const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module });
+ const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern());
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
const di_type = annotated.toDIType();
@@ -1457,10 +1445,7 @@ pub const Object = struct {
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
}
- errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module }));
- // The Type memory is ephemeral; since we want to store a longer-lived
- // reference, we need to copy it here.
- gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator());
+ errdefer assert(o.di_type_map.orderedRemove(ty.toIntern()));
const entry: Object.DITypeMap.Entry = .{
.key_ptr = gop.key_ptr,
.value_ptr = gop.value_ptr,
@@ -1475,18 +1460,19 @@ pub const Object = struct {
resolve: DebugResolveStatus,
opt_fwd_decl: ?*llvm.DIType,
) Allocator.Error!*llvm.DIType {
- const ty = gop.key_ptr.*;
+ const ty = gop.key_ptr.toType();
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
- switch (ty.zigTypeTag()) {
+ const mod = o.module;
+ switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => {
const di_type = dib.createBasicType("void", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
},
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
@@ -1494,49 +1480,41 @@ pub const Object = struct {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_type = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
},
.Enum => {
- const owner_decl_index = ty.getOwnerDecl();
+ const owner_decl_index = ty.getOwnerDecl(mod);
const owner_decl = o.module.declPtr(owner_decl_index);
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
}
- const field_names = ty.enumFields().keys();
+ const ip = &mod.intern_pool;
+ const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
- const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len);
+ const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len);
defer gpa.free(enumerators);
- var buf_field_index: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = undefined,
- };
- const field_index_val = Value.initPayload(&buf_field_index.base);
-
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = ty.intTagType(&buffer);
- const int_info = ty.intInfo(target);
+ const int_ty = enum_type.tag_ty.toType();
+ const int_info = ty.intInfo(mod);
assert(int_info.bits != 0);
- for (field_names, 0..) |field_name, i| {
- const field_name_z = try gpa.dupeZ(u8, field_name);
- defer gpa.free(field_name_z);
-
- buf_field_index.data = @intCast(u32, i);
- var buf_u64: Value.Payload.U64 = undefined;
- const field_int_val = field_index_val.enumToInt(ty, &buf_u64);
+ for (enum_type.names, 0..) |field_name_ip, i| {
+ const field_name_z = ip.stringToSlice(field_name_ip);
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = field_int_val.toBigInt(&bigint_space, target);
+ const bigint = if (enum_type.values.len != 0)
+ enum_type.values[i].toValue().toBigInt(&bigint_space, mod)
+ else
+ std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
if (bigint.limbs.len == 1) {
enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned);
@@ -1555,7 +1533,7 @@ pub const Object = struct {
@panic("TODO implement bigint debug enumerators to llvm int for 32-bit compiler builds");
}
- const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope);
+ const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope);
const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
const name = try ty.nameAlloc(gpa, o.module);
@@ -1566,15 +1544,15 @@ pub const Object = struct {
name,
di_file,
owner_decl.src_node + 1,
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
enumerators.ptr,
@intCast(c_int, enumerators.len),
try o.lowerDebugType(int_ty, .full),
"",
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty));
return enum_di_ty;
},
.Float => {
@@ -1593,49 +1571,40 @@ pub const Object = struct {
},
.Pointer => {
// Normalize everything that the debug info does not represent.
- const ptr_info = ty.ptrInfo().data;
-
- if (ptr_info.sentinel != null or
- ptr_info.@"addrspace" != .generic or
- ptr_info.bit_offset != 0 or
- ptr_info.host_size != 0 or
- ptr_info.vector_index != .none or
- ptr_info.@"allowzero" or
- !ptr_info.mutable or
- ptr_info.@"volatile" or
- ptr_info.size == .Many or ptr_info.size == .C or
- !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime())
+ const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern());
+
+ if (ptr_info.sentinel != .none or
+ ptr_info.flags.address_space != .generic or
+ ptr_info.packed_offset.bit_offset != 0 or
+ ptr_info.packed_offset.host_size != 0 or
+ ptr_info.flags.vector_index != .none or
+ ptr_info.flags.is_allowzero or
+ ptr_info.flags.is_const or
+ ptr_info.flags.is_volatile or
+ ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
+ !ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
{
- var payload: Type.Payload.Pointer = .{
- .data = .{
- .pointee_type = ptr_info.pointee_type,
- .sentinel = null,
- .@"align" = ptr_info.@"align",
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = switch (ptr_info.size) {
+ const bland_ptr_ty = try mod.ptrType(.{
+ .child = if (!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
+ .anyopaque_type
+ else
+ ptr_info.child,
+ .flags = .{
+ .alignment = ptr_info.flags.alignment,
+ .size = switch (ptr_info.flags.size) {
.Many, .C, .One => .One,
.Slice => .Slice,
},
},
- };
- if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) {
- payload.data.pointee_type = Type.anyopaque;
- }
- const bland_ptr_ty = Type.initPayload(&payload.base);
+ });
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
- if (ty.isSlice()) {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&buf);
+ if (ty.isSlice(mod)) {
+ const ptr_ty = ty.slicePtrFieldType(mod);
const len_ty = Type.usize;
const name = try ty.nameAlloc(gpa, o.module);
@@ -1657,14 +1626,14 @@ pub const Object = struct {
break :blk fwd_decl;
};
- const ptr_size = ptr_ty.abiSize(target);
- const ptr_align = ptr_ty.abiAlignment(target);
- const len_size = len_ty.abiSize(target);
- const len_align = len_ty.abiAlignment(target);
+ const ptr_size = ptr_ty.abiSize(mod);
+ const ptr_align = ptr_ty.abiAlignment(mod);
+ const len_size = len_ty.abiSize(mod);
+ const len_align = len_ty.abiAlignment(mod);
var offset: u64 = 0;
offset += ptr_size;
- offset = std.mem.alignForwardGeneric(u64, offset, len_align);
+ offset = std.mem.alignForward(u64, offset, len_align);
const len_offset = offset;
const fields: [2]*llvm.DIType = .{
@@ -1697,8 +1666,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1709,65 +1678,65 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
- const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd);
+ const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
- target.cpu.arch.ptrBitWidth(),
- ty.ptrAlignment(target) * 8,
+ target.ptrBitWidth(),
+ ty.ptrAlignment(mod) * 8,
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty));
return ptr_di_ty;
},
.Opaque => {
- if (ty.tag() == .anyopaque) {
+ if (ty.toIntern() == .anyopaque_type) {
const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
- const owner_decl_index = ty.getOwnerDecl();
+ const owner_decl_index = ty.getOwnerDecl(mod);
const owner_decl = o.module.declPtr(owner_decl_index);
const opaque_di_ty = dib.createForwardDeclType(
DW.TAG.structure_type,
name,
try o.namespaceToDebugScope(owner_decl.src_namespace),
- try o.getDIFile(gpa, owner_decl.src_namespace.file_scope),
+ try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope),
owner_decl.src_node + 1,
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty));
return opaque_di_ty;
},
.Array => {
const array_di_ty = dib.createArrayType(
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
- try o.lowerDebugType(ty.childType(), .full),
- @intCast(c_int, ty.arrayLen()),
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
+ try o.lowerDebugType(ty.childType(mod), .full),
+ @intCast(c_int, ty.arrayLen(mod)),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
return array_di_ty;
},
.Vector => {
- const elem_ty = ty.elemType2();
+ const elem_ty = ty.elemType2(mod);
// Vector elements cannot be padded since that would make
// @bitSizOf(elem) * len > @bitSizOf(vec).
// Neither gdb nor lldb seem to be able to display non-byte sized
// vectors properly.
- const elem_di_type = switch (elem_ty.zigTypeTag()) {
+ const elem_di_type = switch (elem_ty.zigTypeTag(mod)) {
.Int => blk: {
- const info = elem_ty.intInfo(target);
+ const info = elem_ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
@@ -1778,34 +1747,33 @@ pub const Object = struct {
break :blk dib.createBasicType(name, info.bits, dwarf_encoding);
},
.Bool => dib.createBasicType("bool", 1, DW.ATE.boolean),
- else => try o.lowerDebugType(ty.childType(), .full),
+ else => try o.lowerDebugType(ty.childType(mod), .full),
};
const vector_di_ty = dib.createVectorType(
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
elem_di_type,
- ty.vectorLen(),
+ ty.vectorLen(mod),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty));
return vector_di_ty;
},
.Optional => {
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
- if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
+ const child_ty = ty.optionalChild(mod);
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const di_bits = 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve));
return ptr_di_ty;
}
@@ -1826,14 +1794,14 @@ pub const Object = struct {
};
const non_null_ty = Type.u8;
- const payload_size = child_ty.abiSize(target);
- const payload_align = child_ty.abiAlignment(target);
- const non_null_size = non_null_ty.abiSize(target);
- const non_null_align = non_null_ty.abiAlignment(target);
+ const payload_size = child_ty.abiSize(mod);
+ const payload_align = child_ty.abiAlignment(mod);
+ const non_null_size = non_null_ty.abiSize(mod);
+ const non_null_align = non_null_ty.abiAlignment(mod);
var offset: u64 = 0;
offset += payload_size;
- offset = std.mem.alignForwardGeneric(u64, offset, non_null_align);
+ offset = std.mem.alignForward(u64, offset, non_null_align);
const non_null_offset = offset;
const fields: [2]*llvm.DIType = .{
@@ -1866,8 +1834,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1878,15 +1846,15 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = ty.errorUnionPayload(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty));
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa, o.module);
@@ -1907,10 +1875,10 @@ pub const Object = struct {
break :blk fwd_decl;
};
- const error_size = Type.anyerror.abiSize(target);
- const error_align = Type.anyerror.abiAlignment(target);
- const payload_size = payload_ty.abiSize(target);
- const payload_align = payload_ty.abiAlignment(target);
+ const error_size = Type.anyerror.abiSize(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const payload_size = payload_ty.abiSize(mod);
+ const payload_align = payload_ty.abiAlignment(mod);
var error_index: u32 = undefined;
var payload_index: u32 = undefined;
@@ -1920,12 +1888,12 @@ pub const Object = struct {
error_index = 0;
payload_index = 1;
error_offset = 0;
- payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align);
+ payload_offset = std.mem.alignForward(u64, error_size, payload_align);
} else {
payload_index = 0;
error_index = 1;
payload_offset = 0;
- error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align);
+ error_offset = std.mem.alignForward(u64, payload_size, error_align);
}
var fields: [2]*llvm.DIType = undefined;
@@ -1957,8 +1925,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1969,7 +1937,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.ErrorSet => {
@@ -1984,16 +1952,15 @@ pub const Object = struct {
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
- if (ty.castTag(.@"struct")) |payload| {
- const struct_obj = payload.data;
+ if (mod.typeToStruct(ty)) |struct_obj| {
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
assert(struct_obj.haveLayout());
- const info = struct_obj.backing_int_ty.intInfo(target);
+ const info = struct_obj.backing_int_ty.intInfo(mod);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
@@ -2013,98 +1980,98 @@ pub const Object = struct {
break :blk fwd_decl;
};
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
-
- var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
- defer di_fields.deinit(gpa);
-
- try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
-
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
-
- for (tuple.types, 0..) |field_ty, i| {
- const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
-
- const field_size = field_ty.abiSize(target);
- const field_align = field_ty.abiAlignment(target);
- const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- offset = field_offset + field_size;
-
- const field_name = if (ty.castTag(.anon_struct)) |payload|
- try gpa.dupeZ(u8, payload.data.names[i])
- else
- try std.fmt.allocPrintZ(gpa, "{d}", .{i});
- defer gpa.free(field_name);
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .anon_struct_type => |tuple| {
+ var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
+ defer di_fields.deinit(gpa);
+
+ try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+
+ const field_size = field_ty.toType().abiSize(mod);
+ const field_align = field_ty.toType().abiAlignment(mod);
+ const field_offset = std.mem.alignForward(u64, offset, field_align);
+ offset = field_offset + field_size;
+
+ const field_name = if (tuple.names.len != 0)
+ mod.intern_pool.stringToSlice(tuple.names[i])
+ else
+ try std.fmt.allocPrintZ(gpa, "{d}", .{i});
+ defer if (tuple.names.len == 0) gpa.free(field_name);
+
+ try di_fields.append(gpa, dib.createMemberType(
+ fwd_decl.toScope(),
+ field_name,
+ null, // file
+ 0, // line
+ field_size * 8, // size in bits
+ field_align * 8, // align in bits
+ field_offset * 8, // offset in bits
+ 0, // flags
+ try o.lowerDebugType(field_ty.toType(), .full),
+ ));
+ }
- try di_fields.append(gpa, dib.createMemberType(
- fwd_decl.toScope(),
- field_name,
+ const full_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
null, // file
0, // line
- field_size * 8, // size in bits
- field_align * 8, // align in bits
- field_offset * 8, // offset in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
- try o.lowerDebugType(field_ty, .full),
- ));
- }
-
- const full_di_ty = dib.createStructType(
- compile_unit_scope,
- name.ptr,
- null, // file
- 0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
- 0, // flags
- null, // derived from
- di_fields.items.ptr,
- @intCast(c_int, di_fields.items.len),
- 0, // run time lang
- null, // vtable holder
- "", // unique id
- );
- dib.replaceTemporary(fwd_decl, full_di_ty);
- // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
- return full_di_ty;
- }
-
- if (ty.castTag(.@"struct")) |payload| {
- const struct_obj = payload.data;
- if (!struct_obj.haveFieldTypes()) {
- // This can happen if a struct type makes it all the way to
- // flush() without ever being instantiated or referenced (even
- // via pointer). The only reason we are hearing about it now is
- // that it is being used as a namespace to put other debug types
- // into. Therefore we can satisfy this by making an empty namespace,
- // rather than changing the frontend to unnecessarily resolve the
- // struct field types.
- const owner_decl_index = ty.getOwnerDecl();
- const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
- dib.replaceTemporary(fwd_decl, struct_di_ty);
- // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
- // means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
- return struct_di_ty;
- }
+ null, // derived from
+ di_fields.items.ptr,
+ @intCast(c_int, di_fields.items.len),
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, full_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
+ return full_di_ty;
+ },
+ .struct_type => |struct_type| s: {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
+
+ if (!struct_obj.haveFieldTypes()) {
+ // This can happen if a struct type makes it all the way to
+ // flush() without ever being instantiated or referenced (even
+ // via pointer). The only reason we are hearing about it now is
+ // that it is being used as a namespace to put other debug types
+ // into. Therefore we can satisfy this by making an empty namespace,
+ // rather than changing the frontend to unnecessarily resolve the
+ // struct field types.
+ const owner_decl_index = ty.getOwnerDecl(mod);
+ const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
+ dib.replaceTemporary(fwd_decl, struct_di_ty);
+ // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
+ // means we can't use `gop` anymore.
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
+ return struct_di_ty;
+ }
+ },
+ else => {},
}
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
- const owner_decl_index = ty.getOwnerDecl();
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const owner_decl_index = ty.getOwnerDecl(mod);
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty));
return struct_di_ty;
}
- const fields = ty.structFields();
- const layout = ty.containerLayout();
+ const fields = ty.structFields(mod);
+ const layout = ty.containerLayout(mod);
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
@@ -2114,16 +2081,15 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator();
+ var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_size = field.ty.abiSize(target);
- const field_align = field.alignment(target, layout);
- const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.alignment(mod, layout);
+ const field_offset = std.mem.alignForward(u64, offset, field_align);
offset = field_offset + field_size;
- const field_name = try gpa.dupeZ(u8, fields.keys()[field_and_index.index]);
- defer gpa.free(field_name);
+ const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]);
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
@@ -2143,8 +2109,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@@ -2155,12 +2121,12 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Union => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
- const owner_decl_index = ty.getOwnerDecl();
+ const owner_decl_index = ty.getOwnerDecl(mod);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
@@ -2178,17 +2144,17 @@ pub const Object = struct {
break :blk fwd_decl;
};
- const union_obj = ty.cast(Type.Payload.Union).?.data;
- if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) {
+ const union_obj = mod.typeToUnion(ty).?;
+ if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full);
@@ -2198,8 +2164,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&di_fields,
@@ -2211,7 +2177,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
}
@@ -2225,24 +2191,22 @@ pub const Object = struct {
const field_name = kv.key_ptr.*;
const field = kv.value_ptr.*;
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
-
- const field_size = field.ty.abiSize(target);
- const field_align = field.normalAlignment(target);
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_name_copy = try gpa.dupeZ(u8, field_name);
- defer gpa.free(field_name_copy);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.normalAlignment(mod);
+ const field_di_ty = try o.lowerDebugType(field.ty, .full);
di_fields.appendAssumeCapacity(dib.createMemberType(
fwd_decl.toScope(),
- field_name_copy,
+ mod.intern_pool.stringToSlice(field_name),
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
0, // offset in bits
0, // flags
- try o.lowerDebugType(field.ty, .full),
+ field_di_ty,
));
}
@@ -2258,8 +2222,8 @@ pub const Object = struct {
union_name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
di_fields.items.ptr,
@intCast(c_int, di_fields.items.len),
@@ -2270,7 +2234,7 @@ pub const Object = struct {
if (layout.tag_size == 0) {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty));
return union_di_ty;
}
@@ -2278,10 +2242,10 @@ pub const Object = struct {
var payload_offset: u64 = undefined;
if (layout.tag_align >= layout.payload_align) {
tag_offset = 0;
- payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
+ payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
} else {
payload_offset = 0;
- tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align);
+ tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align);
}
const tag_di = dib.createMemberType(
@@ -2319,8 +2283,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&full_di_fields,
@@ -2331,53 +2295,42 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.Fn => {
- const fn_info = ty.fnInfo();
+ const fn_info = mod.typeToFunc(ty).?;
var param_di_types = std.ArrayList(*llvm.DIType).init(gpa);
defer param_di_types.deinit();
// Return type goes first.
- if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) {
- const sret = firstParamSRet(fn_info, target);
- const di_ret_ty = if (sret) Type.void else fn_info.return_type;
+ if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
+ const sret = firstParamSRet(fn_info, mod);
+ const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType();
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
if (sret) {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = fn_info.return_type,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
} else {
try param_di_types.append(try o.lowerDebugType(Type.void, .full));
}
- if (fn_info.return_type.isError() and
+ if (fn_info.return_type.toType().isError(mod) and
o.module.comp.bin_file.options.error_return_tracing)
{
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = o.getStackTraceType(),
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
- for (fn_info.param_types) |param_ty| {
- if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ for (0..mod.typeToFunc(ty).?.param_types.len) |i| {
+ const param_ty = mod.typeToFunc(ty).?.param_types[i].toType();
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (isByRef(param_ty)) {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = param_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ if (isByRef(param_ty, mod)) {
+ const ptr_ty = try mod.singleMutPtrType(param_ty);
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
} else {
try param_di_types.append(try o.lowerDebugType(param_ty, .full));
@@ -2390,7 +2343,7 @@ pub const Object = struct {
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module });
+ try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty));
return fn_di_ty;
},
.ComptimeInt => unreachable,
@@ -2405,8 +2358,10 @@ pub const Object = struct {
}
}
- fn namespaceToDebugScope(o: *Object, namespace: *const Module.Namespace) !*llvm.DIScope {
- if (namespace.parent == null) {
+ fn namespaceToDebugScope(o: *Object, namespace_index: Module.Namespace.Index) !*llvm.DIScope {
+ const mod = o.module;
+ const namespace = mod.namespacePtr(namespace_index);
+ if (namespace.parent == .none) {
const di_file = try o.getDIFile(o.gpa, namespace.file_scope);
return di_file.toScope();
}
@@ -2418,12 +2373,14 @@ pub const Object = struct {
/// Assertion `!isa<DIType>(Scope) && "shouldn't make a namespace scope for a type"'
/// when targeting CodeView (Windows).
fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType {
- const decl = o.module.declPtr(decl_index);
+ const mod = o.module;
+ const decl = mod.declPtr(decl_index);
const fields: [0]*llvm.DIType = .{};
+ const di_scope = try o.namespaceToDebugScope(decl.src_namespace);
return o.di_builder.?.createStructType(
- try o.namespaceToDebugScope(decl.src_namespace),
- decl.name, // TODO use fully qualified name
- try o.getDIFile(o.gpa, decl.src_namespace.file_scope),
+ di_scope,
+ mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name
+ try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope),
decl.src_line + 1,
0, // size in bits
0, // align in bits
@@ -2437,28 +2394,28 @@ pub const Object = struct {
);
}
- fn getStackTraceType(o: *Object) Type {
+ fn getStackTraceType(o: *Object) Allocator.Error!Type {
const mod = o.module;
const std_pkg = mod.main_pkg.table.get("std").?;
const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
- const builtin_str: []const u8 = "builtin";
- const std_namespace = mod.declPtr(std_file.root_decl.unwrap().?).src_namespace;
+ const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin");
+ const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace);
const builtin_decl = std_namespace.decls
.getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?;
- const stack_trace_str: []const u8 = "StackTrace";
+ const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace");
// buffer is only used for int_type, `builtin` is a struct.
- const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined);
- const builtin_namespace = builtin_ty.getNamespace().?;
+ const builtin_ty = mod.declPtr(builtin_decl).val.toType();
+ const builtin_namespace = builtin_ty.getNamespace(mod).?;
const stack_trace_decl_index = builtin_namespace.decls
.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?;
const stack_trace_decl = mod.declPtr(stack_trace_decl_index);
// Sema should have ensured that StackTrace was analyzed.
assert(stack_trace_decl.has_tv);
- return stack_trace_decl.val.toType(undefined);
+ return stack_trace_decl.val.toType();
}
};
@@ -2474,7 +2431,8 @@ pub const DeclGen = struct {
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
assert(self.err_msg == null);
- const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl);
+ const mod = self.module;
+ const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl, mod);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args);
return error.CodegenFail;
}
@@ -2484,31 +2442,27 @@ pub const DeclGen = struct {
}
fn genDecl(dg: *DeclGen) !void {
+ const mod = dg.module;
const decl = dg.decl;
const decl_index = dg.decl_index;
assert(decl.has_tv);
- log.debug("gen: {s} type: {}, value: {}", .{
- decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(),
- });
- assert(decl.val.tag() != .function);
- if (decl.val.castTag(.extern_fn)) |extern_fn| {
- _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl);
+ if (decl.val.getExternFunc(mod)) |extern_func| {
+ _ = try dg.resolveLlvmFunction(extern_func.decl);
} else {
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
var global = try dg.resolveGlobalDecl(decl_index);
- global.setAlignment(decl.getAlignment(target));
- if (decl.@"linksection") |section| global.setSection(section);
+ global.setAlignment(decl.getAlignment(mod));
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s);
assert(decl.has_tv);
- const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
- const variable = payload.data;
+ const init_val = if (decl.val.getVariable(mod)) |variable| init_val: {
break :init_val variable.init;
} else init_val: {
global.setGlobalConstant(.True);
- break :init_val decl.val;
+ break :init_val decl.val.toIntern();
};
- if (init_val.tag() != .unreachable_value) {
- const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val });
+ if (init_val != .none) {
+ const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() });
if (global.globalGetValueType() == llvm_init.typeOf()) {
global.setInitializer(llvm_init);
} else {
@@ -2533,7 +2487,8 @@ pub const DeclGen = struct {
new_global.setLinkage(global.getLinkage());
new_global.setUnnamedAddr(global.getUnnamedAddress());
new_global.setAlignment(global.getAlignment());
- if (decl.@"linksection") |section| new_global.setSection(section);
+ if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
+ new_global.setSection(s);
new_global.setInitializer(llvm_init);
// TODO: How should this work then the address space of a global changed?
global.replaceAllUsesWith(new_global);
@@ -2545,13 +2500,13 @@ pub const DeclGen = struct {
}
if (dg.object.di_builder) |dib| {
- const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope);
+ const di_file = try dg.object.getDIFile(dg.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
const line_number = decl.src_line + 1;
const is_internal_linkage = !dg.module.decl_exports.contains(decl_index);
const di_global = dib.createGlobalVariableExpression(
di_file.toScope(),
- decl.name,
+ mod.intern_pool.stringToSlice(decl.name),
global.getValueName(),
di_file,
line_number,
@@ -2560,7 +2515,7 @@ pub const DeclGen = struct {
);
try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode());
- if (!is_internal_linkage) global.attachMetaData(di_global);
+ if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global);
}
}
}
@@ -2569,36 +2524,35 @@ pub const DeclGen = struct {
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value {
- const decl = dg.module.declPtr(decl_index);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
const zig_fn_type = decl.ty;
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.*;
assert(decl.has_tv);
- const fn_info = zig_fn_type.fnInfo();
- const target = dg.module.getTarget();
- const sret = firstParamSRet(fn_info, target);
+ const fn_info = mod.typeToFunc(zig_fn_type).?;
+ const target = mod.getTarget();
+ const sret = firstParamSRet(fn_info, mod);
const fn_type = try dg.lowerType(zig_fn_type);
- const fqn = try decl.getFullyQualifiedName(dg.module);
- defer dg.gpa.free(fqn);
+ const fqn = try decl.getFullyQualifiedName(mod);
const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace);
+ const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(mod.intern_pool.stringToSlice(fqn), fn_type, llvm_addrspace);
gop.value_ptr.* = llvm_fn;
- const is_extern = decl.isExtern();
+ const is_extern = decl.isExtern(mod);
if (!is_extern) {
llvm_fn.setLinkage(.Internal);
llvm_fn.setUnnamedAddr(.True);
} else {
- if (dg.module.getTarget().isWasm()) {
- dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0));
- if (decl.getExternFn().?.lib_name) |lib_name| {
- const module_name = std.mem.sliceTo(lib_name, 0);
- if (!std.mem.eql(u8, module_name, "c")) {
- dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name);
+ if (target.isWasm()) {
+ dg.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name));
+ if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
+ if (!std.mem.eql(u8, lib_name, "c")) {
+ dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name);
}
}
}
@@ -2608,12 +2562,12 @@ pub const DeclGen = struct {
dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
dg.addArgAttr(llvm_fn, 0, "noalias");
- const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type);
+ const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType());
llvm_fn.addSretAttr(raw_llvm_ret_ty);
}
- const err_return_tracing = fn_info.return_type.isError() and
- dg.module.comp.bin_file.options.error_return_tracing;
+ const err_return_tracing = fn_info.return_type.toType().isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull");
@@ -2635,14 +2589,14 @@ pub const DeclGen = struct {
},
}
- if (fn_info.alignment != 0) {
- llvm_fn.setAlignment(fn_info.alignment);
+ if (fn_info.alignment.toByteUnitsOptional()) |a| {
+ llvm_fn.setAlignment(@intCast(c_uint, a));
}
// Function attributes that are independent of analysis results of the function body.
dg.addCommonFnAttributes(llvm_fn);
- if (fn_info.return_type.isNoReturn()) {
+ if (fn_info.return_type == .noreturn_type) {
dg.addFnAttr(llvm_fn, "noreturn");
}
@@ -2655,15 +2609,15 @@ pub const DeclGen = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
- if (!isByRef(param_ty)) {
+ const param_ty = fn_info.param_types[param_index].toType();
+ if (!isByRef(param_ty, mod)) {
dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_ty = fn_info.param_types[it.zig_index - 1];
- const param_llvm_ty = try dg.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(target);
+ const param_llvm_ty = try dg.lowerType(param_ty.toType());
+ const alignment = param_ty.toType().abiAlignment(mod);
dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
@@ -2735,35 +2689,35 @@ pub const DeclGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(dg.object.decl_map.remove(decl_index));
- const decl = dg.module.declPtr(decl_index);
- const fqn = try decl.getFullyQualifiedName(dg.module);
- defer dg.gpa.free(fqn);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ const fqn = try decl.getFullyQualifiedName(mod);
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
const llvm_type = try dg.lowerType(decl.ty);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(
llvm_type,
- fqn,
+ mod.intern_pool.stringToSlice(fqn),
llvm_actual_addrspace,
);
gop.value_ptr.* = llvm_global;
// This is needed for declarations created by `@extern`.
- if (decl.isExtern()) {
- llvm_global.setValueName(decl.name);
+ if (decl.isExtern(mod)) {
+ llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name));
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
- if (decl.val.castTag(.variable)) |variable| {
- const single_threaded = dg.module.comp.bin_file.options.single_threaded;
- if (variable.data.is_threadlocal and !single_threaded) {
+ if (decl.val.getVariable(mod)) |variable| {
+ const single_threaded = mod.comp.bin_file.options.single_threaded;
+ if (variable.is_threadlocal and !single_threaded) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
+ if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
}
} else {
llvm_global.setLinkage(.Internal);
@@ -2784,12 +2738,13 @@ pub const DeclGen = struct {
fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const llvm_ty = try lowerTypeInner(dg, t);
+ const mod = dg.module;
if (std.debug.runtime_safety and false) check: {
- if (t.zigTypeTag() == .Opaque) break :check;
- if (!t.hasRuntimeBits()) break :check;
+ if (t.zigTypeTag(mod) == .Opaque) break :check;
+ if (!t.hasRuntimeBits(mod)) break :check;
if (!llvm_ty.isSized().toBool()) break :check;
- const zig_size = t.abiSize(dg.module.getTarget());
+ const zig_size = t.abiSize(mod);
const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty);
if (llvm_size != zig_size) {
log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{
@@ -2802,18 +2757,18 @@ pub const DeclGen = struct {
fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const gpa = dg.gpa;
- const target = dg.module.getTarget();
- switch (t.zigTypeTag()) {
+ const mod = dg.module;
+ const target = mod.getTarget();
+ switch (t.zigTypeTag(mod)) {
.Void, .NoReturn => return dg.context.voidType(),
.Int => {
- const info = t.intInfo(target);
+ const info = t.intInfo(mod);
assert(info.bits != 0);
return dg.context.intType(info.bits);
},
.Enum => {
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = t.intTagType(&buffer);
- const bit_count = int_ty.intInfo(target).bits;
+ const int_ty = t.intTagType(mod);
+ const bit_count = int_ty.intInfo(mod).bits;
assert(bit_count != 0);
return dg.context.intType(bit_count);
},
@@ -2827,9 +2782,8 @@ pub const DeclGen = struct {
},
.Bool => return dg.context.intType(1),
.Pointer => {
- if (t.isSlice()) {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = t.slicePtrFieldType(&buf);
+ if (t.isSlice(mod)) {
+ const ptr_type = t.slicePtrFieldType(mod);
const fields: [2]*llvm.Type = .{
try dg.lowerType(ptr_type),
@@ -2837,49 +2791,41 @@ pub const DeclGen = struct {
};
return dg.context.structType(&fields, fields.len, .False);
}
- const ptr_info = t.ptrInfo().data;
+ const ptr_info = t.ptrInfo(mod);
const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target);
return dg.context.pointerType(llvm_addrspace);
},
- .Opaque => switch (t.tag()) {
- .@"opaque" => {
- const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
- if (gop.found_existing) return gop.value_ptr.*;
+ .Opaque => {
+ if (t.toIntern() == .anyopaque_type) return dg.context.intType(8);
- // The Type memory is ephemeral; since we want to store a longer-lived
- // reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
+ const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern());
+ if (gop.found_existing) return gop.value_ptr.*;
- const opaque_obj = t.castTag(.@"opaque").?.data;
- const name = try opaque_obj.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
+ const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type;
+ const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type));
- const llvm_struct_ty = dg.context.structCreateNamed(name);
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
- return llvm_struct_ty;
- },
- .anyopaque => return dg.context.intType(8),
- else => unreachable,
+ const llvm_struct_ty = dg.context.structCreateNamed(name);
+ gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+ return llvm_struct_ty;
},
.Array => {
- const elem_ty = t.childType();
- assert(elem_ty.onePossibleValue() == null);
+ const elem_ty = t.childType(mod);
+ if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null);
const elem_llvm_ty = try dg.lowerType(elem_ty);
- const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
+ const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null);
return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
},
.Vector => {
- const elem_type = try dg.lowerType(t.childType());
- return elem_type.vectorType(t.vectorLen());
+ const elem_type = try dg.lowerType(t.childType(mod));
+ return elem_type.vectorType(t.vectorLen(mod));
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = t.optionalChild(&buf);
- if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
+ const child_ty = t.optionalChild(mod);
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.context.intType(8);
}
const payload_llvm_ty = try dg.lowerType(child_ty);
- if (t.optionalReprIsPayload()) {
+ if (t.optionalReprIsPayload(mod)) {
return payload_llvm_ty;
}
@@ -2887,8 +2833,8 @@ pub const DeclGen = struct {
var fields_buf: [3]*llvm.Type = .{
payload_llvm_ty, dg.context.intType(8), undefined,
};
- const offset = child_ty.abiSize(target) + 1;
- const abi_size = t.abiSize(target);
+ const offset = child_ty.abiSize(mod) + 1;
+ const abi_size = t.abiSize(mod);
const padding = @intCast(c_uint, abi_size - offset);
if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False);
@@ -2897,27 +2843,27 @@ pub const DeclGen = struct {
return dg.context.structType(&fields_buf, 3, .False);
},
.ErrorUnion => {
- const payload_ty = t.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = t.errorUnionPayload(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try dg.lowerType(Type.anyerror);
}
const llvm_error_type = try dg.lowerType(Type.anyerror);
const llvm_payload_type = try dg.lowerType(payload_ty);
- const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
- const payload_size = payload_ty.abiSize(target);
- const error_size = Type.anyerror.abiSize(target);
+ const payload_size = payload_ty.abiSize(mod);
+ const error_size = Type.anyerror.abiSize(mod);
var fields_buf: [3]*llvm.Type = undefined;
if (error_align > payload_align) {
fields_buf[0] = llvm_error_type;
fields_buf[1] = llvm_payload_type;
const payload_end =
- std.mem.alignForwardGeneric(u64, error_size, payload_align) +
+ std.mem.alignForward(u64, error_size, payload_align) +
payload_size;
- const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align);
+ const abi_size = std.mem.alignForward(u64, payload_end, error_align);
const padding = @intCast(c_uint, abi_size - payload_end);
if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False);
@@ -2928,9 +2874,9 @@ pub const DeclGen = struct {
fields_buf[0] = llvm_payload_type;
fields_buf[1] = llvm_error_type;
const error_end =
- std.mem.alignForwardGeneric(u64, payload_size, error_align) +
+ std.mem.alignForward(u64, payload_size, error_align) +
error_size;
- const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align);
+ const abi_size = std.mem.alignForward(u64, error_end, payload_align);
const padding = @intCast(c_uint, abi_size - error_end);
if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False);
@@ -2941,66 +2887,64 @@ pub const DeclGen = struct {
},
.ErrorSet => return dg.context.intType(16),
.Struct => {
- const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
+ const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
- // The Type memory is ephemeral; since we want to store a longer-lived
- // reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
+ const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) {
+ .anon_struct_type => |tuple| {
+ const llvm_struct_ty = dg.context.structCreateNamed("");
+ gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
- if (t.isSimpleTupleOrAnonStruct()) {
- const tuple = t.tupleFields();
- const llvm_struct_ty = dg.context.structCreateNamed("");
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+ var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
+ defer llvm_field_types.deinit(gpa);
- var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
- defer llvm_field_types.deinit(gpa);
+ try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
- try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
- for (tuple.types, 0..) |field_ty, i| {
- const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, field_align);
- const field_align = field_ty.abiAlignment(target);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ try llvm_field_types.append(gpa, llvm_array_ty);
+ }
+ const field_llvm_ty = try dg.lowerType(field_ty.toType());
+ try llvm_field_types.append(gpa, field_llvm_ty);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- try llvm_field_types.append(gpa, llvm_array_ty);
+ offset += field_ty.toType().abiSize(mod);
}
- const field_llvm_ty = try dg.lowerType(field_ty);
- try llvm_field_types.append(gpa, field_llvm_ty);
-
- offset += field_ty.abiSize(target);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- try llvm_field_types.append(gpa, llvm_array_ty);
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ try llvm_field_types.append(gpa, llvm_array_ty);
+ }
}
- }
- llvm_struct_ty.structSetBody(
- llvm_field_types.items.ptr,
- @intCast(c_uint, llvm_field_types.items.len),
- .False,
- );
+ llvm_struct_ty.structSetBody(
+ llvm_field_types.items.ptr,
+ @intCast(c_uint, llvm_field_types.items.len),
+ .False,
+ );
- return llvm_struct_ty;
- }
+ return llvm_struct_ty;
+ },
+ .struct_type => |struct_type| struct_type,
+ else => unreachable,
+ };
- const struct_obj = t.castTag(.@"struct").?.data;
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
@@ -3009,8 +2953,7 @@ pub const DeclGen = struct {
return int_llvm_ty;
}
- const name = try struct_obj.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
+ const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod));
const llvm_struct_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
@@ -3027,16 +2970,16 @@ pub const DeclGen = struct {
var big_align: u32 = 1;
var any_underaligned_fields = false;
- var it = struct_obj.runtimeFieldIterator();
+ var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_align = field.alignment(target, struct_obj.layout);
- const field_ty_align = field.ty.abiAlignment(target);
+ const field_align = field.alignment(mod, struct_obj.layout);
+ const field_ty_align = field.ty.abiAlignment(mod);
any_underaligned_fields = any_underaligned_fields or
field_align < field_ty_align;
big_align = @max(big_align, field_align);
const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
@@ -3046,11 +2989,11 @@ pub const DeclGen = struct {
const field_llvm_ty = try dg.lowerType(field.ty);
try llvm_field_types.append(gpa, field_llvm_ty);
- offset += field.ty.abiSize(target);
+ offset += field.ty.abiSize(mod);
}
{
const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@@ -3067,18 +3010,14 @@ pub const DeclGen = struct {
return llvm_struct_ty;
},
.Union => {
- const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
+ const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
- // The Type memory is ephemeral; since we want to store a longer-lived
- // reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
-
- const layout = t.unionGetLayout(target);
- const union_obj = t.cast(Type.Payload.Union).?.data;
+ const layout = t.unionGetLayout(mod);
+ const union_obj = mod.typeToUnion(t).?;
if (union_obj.layout == .Packed) {
- const bitsize = @intCast(c_uint, t.bitSize(target));
+ const bitsize = @intCast(c_uint, t.bitSize(mod));
const int_llvm_ty = dg.context.intType(bitsize);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
@@ -3090,8 +3029,7 @@ pub const DeclGen = struct {
return enum_tag_llvm_ty;
}
- const name = try union_obj.getFullyQualifiedName(dg.module);
- defer gpa.free(name);
+ const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod));
const llvm_union_ty = dg.context.structCreateNamed(name);
gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
@@ -3155,25 +3093,21 @@ pub const DeclGen = struct {
}
fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type {
- const target = dg.module.getTarget();
- const fn_info = fn_ty.fnInfo();
+ const mod = dg.module;
+ const fn_info = mod.typeToFunc(fn_ty).?;
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa);
defer llvm_params.deinit();
- if (firstParamSRet(fn_info, target)) {
+ if (firstParamSRet(fn_info, mod)) {
try llvm_params.append(dg.context.pointerType(0));
}
- if (fn_info.return_type.isError() and
- dg.module.comp.bin_file.options.error_return_tracing)
+ if (fn_info.return_type.toType().isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing)
{
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = dg.object.getStackTraceType(),
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(try dg.object.getStackTraceType());
try llvm_params.append(try dg.lowerType(ptr_ty));
}
@@ -3181,25 +3115,23 @@ pub const DeclGen = struct {
while (it.next()) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
try llvm_params.append(try dg.lowerType(param_ty));
},
.byref, .byref_mut => {
try llvm_params.append(dg.context.pointerType(0));
},
.abi_sized_int => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
try llvm_params.append(dg.context.intType(abi_size * 8));
},
.slice => {
- const param_ty = fn_info.param_types[it.zig_index - 1];
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- var opt_buf: Type.Payload.ElemType = undefined;
- const ptr_ty = if (param_ty.zigTypeTag() == .Optional)
- param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf)
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
+ param_ty.optionalChild(mod).slicePtrFieldType(mod)
else
- param_ty.slicePtrFieldType(&buf);
+ param_ty.slicePtrFieldType(mod);
const ptr_llvm_ty = try dg.lowerType(ptr_ty);
const len_llvm_ty = try dg.lowerType(Type.usize);
@@ -3214,8 +3146,8 @@ pub const DeclGen = struct {
try llvm_params.append(dg.context.intType(16));
},
.float_array => |count| {
- const param_ty = fn_info.param_types[it.zig_index - 1];
- const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?);
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
const field_count = @intCast(c_uint, count);
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
@@ -3239,11 +3171,12 @@ pub const DeclGen = struct {
/// being a zero bit type, but it should still be lowered as an i8 in such case.
/// There are other similar cases handled here as well.
fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type {
- const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
+ const mod = dg.module;
+ const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
.Opaque => true,
- .Fn => !elem_ty.fnInfo().is_generic,
- .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(),
- else => elem_ty.hasRuntimeBitsIgnoreComptime(),
+ .Fn => !mod.typeToFunc(elem_ty).?.is_generic,
+ .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
};
const llvm_elem_ty = if (lower_elem_ty)
try dg.lowerType(elem_ty)
@@ -3254,59 +3187,132 @@ pub const DeclGen = struct {
}
fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value {
+ const mod = dg.module;
+ const target = mod.getTarget();
var tv = arg_tv;
- if (tv.val.castTag(.runtime_value)) |rt| {
- tv.val = rt.data;
+ switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
+ .runtime_value => |rt| tv.val = rt.val.toValue(),
+ else => {},
}
- if (tv.val.isUndef()) {
+ if (tv.val.isUndefDeep(mod)) {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
}
- const target = dg.module.getTarget();
- switch (tv.ty.zigTypeTag()) {
- .Bool => {
- const llvm_type = try dg.lowerType(tv.ty);
- return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
- },
- // TODO this duplicates code with Pointer but they should share the handling
- // of the tv.val.tag() and then Int should do extra constPtrToInt on top
- .Int => switch (tv.val.tag()) {
- .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
- .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
- else => {
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = tv.val.toBigInt(&bigint_space, target);
- const int_info = tv.ty.intInfo(target);
- assert(int_info.bits != 0);
- const llvm_type = dg.context.intType(int_info.bits);
-
- const unsigned_val = v: {
- if (bigint.limbs.len == 1) {
- break :v llvm_type.constInt(bigint.limbs[0], .False);
- }
- if (@sizeOf(usize) == @sizeOf(u64)) {
- break :v llvm_type.constIntOfArbitraryPrecision(
- @intCast(c_uint, bigint.limbs.len),
- bigint.limbs.ptr,
- );
- }
- @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
- };
- if (!bigint.positive) {
- return llvm.constNeg(unsigned_val);
- }
- return unsigned_val;
+ const val_key = mod.intern_pool.indexToKey(tv.val.toIntern());
+ switch (val_key) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => {
+ const llvm_type = try dg.lowerType(tv.ty);
+ return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
},
},
- .Enum => {
- var int_buffer: Value.Payload.U64 = undefined;
- const int_val = tv.enumToInt(&int_buffer);
+ .variable,
+ .enum_literal,
+ .empty_enum_value,
+ => unreachable, // non-runtime values
+ .extern_func, .func => {
+ const fn_decl_index = switch (val_key) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ else => unreachable,
+ };
+ const fn_decl = dg.module.declPtr(fn_decl_index);
+ try dg.module.markDeclAlive(fn_decl);
+ return dg.resolveLlvmFunction(fn_decl_index);
+ },
+ .int => {
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = tv.val.toBigInt(&bigint_space, mod);
+ return lowerBigInt(dg, tv.ty, bigint);
+ },
+ .err => |err| {
+ const llvm_ty = try dg.lowerType(Type.anyerror);
+ const int = try mod.getErrorValue(err.name);
+ return llvm_ty.constInt(int, .False);
+ },
+ .error_union => |error_union| {
+ const err_tv: TypedValue = switch (error_union.val) {
+ .err_name => |err_name| .{
+ .ty = tv.ty.errorUnionSet(mod),
+ .val = (try mod.intern(.{ .err = .{
+ .ty = tv.ty.errorUnionSet(mod).toIntern(),
+ .name = err_name,
+ } })).toValue(),
+ },
+ .payload => .{
+ .ty = Type.err_int,
+ .val = try mod.intValue(Type.err_int, 0),
+ },
+ };
+ const payload_type = tv.ty.errorUnionPayload(mod);
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ // We use the error type directly as the type.
+ return dg.lowerValue(err_tv);
+ }
+
+ const payload_align = payload_type.abiAlignment(mod);
+ const error_align = err_tv.ty.abiAlignment(mod);
+ const llvm_error_value = try dg.lowerValue(err_tv);
+ const llvm_payload_value = try dg.lowerValue(.{
+ .ty = payload_type,
+ .val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }),
+ .payload => |payload| payload,
+ }.toValue(),
+ });
+ var fields_buf: [3]*llvm.Value = undefined;
+
+ const llvm_ty = try dg.lowerType(tv.ty);
+ const llvm_field_count = llvm_ty.countStructElementTypes();
+ if (llvm_field_count > 2) {
+ assert(llvm_field_count == 3);
+ fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
+ }
+
+ if (error_align > payload_align) {
+ fields_buf[0] = llvm_error_value;
+ fields_buf[1] = llvm_payload_value;
+ return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
+ } else {
+ fields_buf[0] = llvm_payload_value;
+ fields_buf[1] = llvm_error_value;
+ return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
+ }
+ },
+ .enum_tag => {
+ const int_val = try tv.enumToInt(mod);
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_space, target);
+ const bigint = int_val.toBigInt(&bigint_space, mod);
- const int_info = tv.ty.intInfo(target);
+ const int_info = tv.ty.intInfo(mod);
const llvm_type = dg.context.intType(int_info.bits);
const unsigned_val = v: {
@@ -3326,29 +3332,29 @@ pub const DeclGen = struct {
}
return unsigned_val;
},
- .Float => {
+ .float => {
const llvm_ty = try dg.lowerType(tv.ty);
switch (tv.ty.floatBits(target)) {
16 => {
- const repr = @bitCast(u16, tv.val.toFloat(f16));
+ const repr = @bitCast(u16, tv.val.toFloat(f16, mod));
const llvm_i16 = dg.context.intType(16);
const int = llvm_i16.constInt(repr, .False);
return int.constBitCast(llvm_ty);
},
32 => {
- const repr = @bitCast(u32, tv.val.toFloat(f32));
+ const repr = @bitCast(u32, tv.val.toFloat(f32, mod));
const llvm_i32 = dg.context.intType(32);
const int = llvm_i32.constInt(repr, .False);
return int.constBitCast(llvm_ty);
},
64 => {
- const repr = @bitCast(u64, tv.val.toFloat(f64));
+ const repr = @bitCast(u64, tv.val.toFloat(f64, mod));
const llvm_i64 = dg.context.intType(64);
const int = llvm_i64.constInt(repr, .False);
return int.constBitCast(llvm_ty);
},
80 => {
- const float = tv.val.toFloat(f80);
+ const float = tv.val.toFloat(f80, mod);
const repr = std.math.break_f80(float);
const llvm_i80 = dg.context.intType(80);
var x = llvm_i80.constInt(repr.exp, .False);
@@ -3361,7 +3367,7 @@ pub const DeclGen = struct {
}
},
128 => {
- var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128));
+ var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod));
// LLVM seems to require that the lower half of the f128 be placed first
// in the buffer.
if (native_endian == .Big) {
@@ -3373,204 +3379,60 @@ pub const DeclGen = struct {
else => unreachable,
}
},
- .Pointer => switch (tv.val.tag()) {
- .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
- .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
- .variable => {
- const decl_index = tv.val.castTag(.variable).?.data.owner_decl;
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
-
- const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
-
- const val = try dg.resolveGlobalDecl(decl_index);
- const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
- val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace))
- else
- val;
- return addrspace_casted_ptr;
- },
- .slice => {
- const slice = tv.val.castTag(.slice).?.data;
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const fields: [2]*llvm.Value = .{
- try dg.lowerValue(.{
- .ty = tv.ty.slicePtrFieldType(&buf),
- .val = slice.ptr,
- }),
- try dg.lowerValue(.{
- .ty = Type.usize,
- .val = slice.len,
- }),
- };
- return dg.context.constStruct(&fields, fields.len, .False);
- },
- .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => {
- const llvm_usize = try dg.lowerType(Type.usize);
- const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False);
- return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
- },
- .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
- return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0);
- },
- .null_value, .zero => {
- const llvm_type = try dg.lowerType(tv.ty);
- return llvm_type.constNull();
- },
- .opt_payload => {
- const payload = tv.val.castTag(.opt_payload).?.data;
- return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0);
- },
- else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
- tv.ty.fmtDebug(), tag,
- }),
- },
- .Array => switch (tv.val.tag()) {
- .bytes => {
- const bytes = tv.val.castTag(.bytes).?.data;
- return dg.context.constString(
- bytes.ptr,
- @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()),
- .True, // Don't null terminate. Bytes has the sentinel, if any.
- );
- },
- .str_lit => {
- const str_lit = tv.val.castTag(.str_lit).?.data;
- const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- if (tv.ty.sentinel()) |sent_val| {
- const byte = @intCast(u8, sent_val.toUnsignedInt(target));
- if (byte == 0 and bytes.len > 0) {
- return dg.context.constString(
- bytes.ptr,
- @intCast(c_uint, bytes.len),
- .False, // Yes, null terminate.
- );
- }
- var array = std.ArrayList(u8).init(dg.gpa);
- defer array.deinit();
- try array.ensureUnusedCapacity(bytes.len + 1);
- array.appendSliceAssumeCapacity(bytes);
- array.appendAssumeCapacity(byte);
- return dg.context.constString(
- array.items.ptr,
- @intCast(c_uint, array.items.len),
- .True, // Don't null terminate.
- );
- } else {
- return dg.context.constString(
- bytes.ptr,
- @intCast(c_uint, bytes.len),
- .True, // Don't null terminate. `bytes` has the sentinel, if any.
- );
- }
- },
- .aggregate => {
- const elem_vals = tv.val.castTag(.aggregate).?.data;
- const elem_ty = tv.ty.elemType();
- const gpa = dg.gpa;
- const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel());
- const llvm_elems = try gpa.alloc(*llvm.Value, len);
- defer gpa.free(llvm_elems);
- var need_unnamed = false;
- for (elem_vals[0..len], 0..) |elem_val, i| {
- llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
- );
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- }
- },
- .repeated => {
- const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.elemType();
- const sentinel = tv.ty.sentinel();
- const len = @intCast(usize, tv.ty.arrayLen());
- const len_including_sent = len + @boolToInt(sentinel != null);
- const gpa = dg.gpa;
- const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
- defer gpa.free(llvm_elems);
-
- var need_unnamed = false;
- if (len != 0) {
- for (llvm_elems[0..len]) |*elem| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
- }
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
- }
-
- if (sentinel) |sent| {
- llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
- }
-
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
- );
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- }
- },
- .empty_array_sentinel => {
- const elem_ty = tv.ty.elemType();
- const sent_val = tv.ty.sentinel().?;
- const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
- const llvm_elems: [1]*llvm.Value = .{sentinel};
- const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
- if (need_unnamed) {
- return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
- }
- },
- else => unreachable,
+ .ptr => |ptr| {
+ const ptr_tv: TypedValue = switch (ptr.len) {
+ .none => tv,
+ else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) },
+ };
+ const llvm_ptr_val = switch (ptr.addr) {
+ .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl),
+ .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl),
+ .int => |int| try dg.lowerIntAsPtr(int.toValue()),
+ .eu_payload,
+ .opt_payload,
+ .elem,
+ .field,
+ => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0),
+ .comptime_field => unreachable,
+ };
+ switch (ptr.len) {
+ .none => return llvm_ptr_val,
+ else => {
+ const fields: [2]*llvm.Value = .{
+ llvm_ptr_val,
+ try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }),
+ };
+ return dg.context.constStruct(&fields, fields.len, .False);
+ },
+ }
},
- .Optional => {
+ .opt => |opt| {
comptime assert(optional_layout_version == 3);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = tv.ty.optionalChild(&buf);
+ const payload_ty = tv.ty.optionalChild(mod);
const llvm_i8 = dg.context.intType(8);
- const is_pl = !tv.val.isNull();
- const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const non_null_bit = switch (opt.val) {
+ .none => llvm_i8.constNull(),
+ else => llvm_i8.constInt(1, .False),
+ };
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return non_null_bit;
}
const llvm_ty = try dg.lowerType(tv.ty);
- if (tv.ty.optionalReprIsPayload()) {
- if (tv.val.castTag(.opt_payload)) |payload| {
- return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data });
- } else if (is_pl) {
- return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val });
- } else {
- return llvm_ty.constNull();
- }
- }
- assert(payload_ty.zigTypeTag() != .Fn);
+ if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) {
+ .none => llvm_ty.constNull(),
+ else => |payload| dg.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }),
+ };
+ assert(payload_ty.zigTypeTag(mod) != .Fn);
const llvm_field_count = llvm_ty.countStructElementTypes();
var fields_buf: [3]*llvm.Value = undefined;
fields_buf[0] = try dg.lowerValue(.{
.ty = payload_ty,
- .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
+ .val = switch (opt.val) {
+ .none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
+ else => |payload| payload,
+ }.toValue(),
});
fields_buf[1] = non_null_bit;
if (llvm_field_count > 2) {
@@ -3579,76 +3441,100 @@ pub const DeclGen = struct {
}
return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
},
- .Fn => {
- const fn_decl_index = switch (tv.val.tag()) {
- .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl,
- .function => tv.val.castTag(.function).?.data.owner_decl,
- else => unreachable,
- };
- const fn_decl = dg.module.declPtr(fn_decl_index);
- dg.module.markDeclAlive(fn_decl);
- return dg.resolveLlvmFunction(fn_decl_index);
- },
- .ErrorSet => {
- const llvm_ty = try dg.lowerType(Type.anyerror);
- switch (tv.val.tag()) {
- .@"error" => {
- const err_name = tv.val.castTag(.@"error").?.data.name;
- const kv = try dg.module.getErrorValue(err_name);
- return llvm_ty.constInt(kv.value, .False);
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return llvm_ty.constNull();
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) {
+ .array_type => switch (aggregate.storage) {
+ .bytes => |bytes| return dg.context.constString(
+ bytes.ptr,
+ @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
+ .True, // Don't null terminate. Bytes has the sentinel, if any.
+ ),
+ .elems => |elem_vals| {
+ const elem_ty = tv.ty.childType(mod);
+ const gpa = dg.gpa;
+ const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len);
+ defer gpa.free(llvm_elems);
+ var need_unnamed = false;
+ for (elem_vals, 0..) |elem_val, i| {
+ llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
+ }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ }
},
- }
- },
- .ErrorUnion => {
- const payload_type = tv.ty.errorUnionPayload();
- const is_pl = tv.val.errorUnionIsPayload();
-
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- // We use the error type directly as the type.
- const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
- return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val });
- }
-
- const payload_align = payload_type.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
- const llvm_error_value = try dg.lowerValue(.{
- .ty = Type.anyerror,
- .val = if (is_pl) Value.initTag(.zero) else tv.val,
- });
- const llvm_payload_value = try dg.lowerValue(.{
- .ty = payload_type,
- .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef),
- });
- var fields_buf: [3]*llvm.Value = undefined;
+ .repeated_elem => |val| {
+ const elem_ty = tv.ty.childType(mod);
+ const sentinel = tv.ty.sentinel(mod);
+ const len = @intCast(usize, tv.ty.arrayLen(mod));
+ const len_including_sent = len + @boolToInt(sentinel != null);
+ const gpa = dg.gpa;
+ const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
+ defer gpa.free(llvm_elems);
+
+ var need_unnamed = false;
+ if (len != 0) {
+ for (llvm_elems[0..len]) |*elem| {
+ elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() });
+ }
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
+ }
- const llvm_ty = try dg.lowerType(tv.ty);
- const llvm_field_count = llvm_ty.countStructElementTypes();
- if (llvm_field_count > 2) {
- assert(llvm_field_count == 3);
- fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
- }
+ if (sentinel) |sent| {
+ llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
+ }
- if (error_align > payload_align) {
- fields_buf[0] = llvm_error_value;
- fields_buf[1] = llvm_payload_value;
- return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
- } else {
- fields_buf[0] = llvm_payload_value;
- fields_buf[1] = llvm_error_value;
- return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
- }
- },
- .Struct => {
- const llvm_struct_ty = try dg.lowerType(tv.ty);
- const field_vals = tv.val.castTag(.aggregate).?.data;
- const gpa = dg.gpa;
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ }
+ },
+ },
+ .vector_type => |vector_type| {
+ const elem_ty = vector_type.child.toType();
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len);
+ defer dg.gpa.free(llvm_elems);
+ const llvm_i8 = dg.context.intType(8);
+ for (llvm_elems, 0..) |*llvm_elem, i| {
+ llvm_elem.* = switch (aggregate.storage) {
+ .bytes => |bytes| llvm_i8.constInt(bytes[i], .False),
+ .elems => |elems| try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = elems[i].toValue(),
+ }),
+ .repeated_elem => |elem| try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = elem.toValue(),
+ }),
+ };
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .anon_struct_type => |tuple| {
+ const gpa = dg.gpa;
- if (tv.ty.isSimpleTupleOrAnonStruct()) {
- const tuple = tv.ty.tupleFields();
var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
@@ -3659,14 +3545,14 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var need_unnamed = false;
- for (tuple.types, 0..) |field_ty, i| {
- if (tuple.values[i].tag() != .unreachable_value) continue;
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none) continue;
+ if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = field_ty.abiAlignment(target);
+ const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
@@ -3677,19 +3563,19 @@ pub const DeclGen = struct {
}
const field_llvm_val = try dg.lowerValue(.{
- .ty = field_ty,
- .val = field_vals[i],
+ .ty = field_ty.toType(),
+ .val = try tv.val.fieldValue(mod, i),
});
- need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val);
+ need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
- offset += field_ty.abiSize(target);
+ offset += field_ty.toType().abiSize(mod);
}
{
const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ offset = std.mem.alignForward(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
@@ -3704,132 +3590,142 @@ pub const DeclGen = struct {
.False,
);
} else {
+ const llvm_struct_ty = try dg.lowerType(tv.ty);
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
);
}
- }
-
- const struct_obj = tv.ty.castTag(.@"struct").?.data;
-
- if (struct_obj.layout == .Packed) {
- assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(target);
- const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
- const fields = struct_obj.fields.values();
- comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *llvm.Value = int_llvm_ty.constNull();
- var running_bits: u16 = 0;
- for (field_vals, 0..) |field_val, i| {
- const field = fields[i];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ },
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const llvm_struct_ty = try dg.lowerType(tv.ty);
+ const gpa = dg.gpa;
- const non_int_val = try dg.lowerValue(.{
- .ty = field.ty,
- .val = field_val,
- });
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
- const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
- non_int_val.constPtrToInt(small_int_ty)
- else
- non_int_val.constBitCast(small_int_ty);
- const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
- // If the field is as large as the entire packed struct, this
- // zext would go from, e.g. i16 to i16. This is legal with
- // constZExtOrBitCast but not legal with constZExt.
- const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
- const shifted = extended_int_val.constShl(shift_rhs);
- running_int = running_int.constOr(shifted);
- running_bits += ty_bit_size;
+ if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
+ const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
+ const fields = struct_obj.fields.values();
+ comptime assert(Type.packed_struct_layout_version == 2);
+ var running_int: *llvm.Value = int_llvm_ty.constNull();
+ var running_bits: u16 = 0;
+ for (fields, 0..) |field, i| {
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const non_int_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, i),
+ });
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const small_int_ty = dg.context.intType(ty_bit_size);
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
+ non_int_val.constPtrToInt(small_int_ty)
+ else
+ non_int_val.constBitCast(small_int_ty);
+ const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
+ // If the field is as large as the entire packed struct, this
+ // zext would go from, e.g. i16 to i16. This is legal with
+ // constZExtOrBitCast but not legal with constZExt.
+ const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
+ const shifted = extended_int_val.constShl(shift_rhs);
+ running_int = running_int.constOr(shifted);
+ running_bits += ty_bit_size;
+ }
+ return running_int;
}
- return running_int;
- }
- const llvm_field_count = llvm_struct_ty.countStructElementTypes();
- var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
- defer llvm_fields.deinit(gpa);
+ const llvm_field_count = llvm_struct_ty.countStructElementTypes();
+ var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
+ defer llvm_fields.deinit(gpa);
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
- var need_unnamed = false;
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ var need_unnamed = false;
- var it = struct_obj.runtimeFieldIterator();
- while (it.next()) |field_and_index| {
- const field = field_and_index.field;
- const field_align = field.alignment(target, struct_obj.layout);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ var it = struct_obj.runtimeFieldIterator(mod);
+ while (it.next()) |field_and_index| {
+ const field = field_and_index.field;
+ const field_align = field.alignment(mod, struct_obj.layout);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, field_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- // TODO make this and all other padding elsewhere in debug
- // builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
- }
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
- const field_llvm_val = try dg.lowerValue(.{
- .ty = field.ty,
- .val = field_vals[field_and_index.index],
- });
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, field_and_index.index),
+ });
- need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
+ need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
- offset += field.ty.abiSize(target);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ offset += field.ty.abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
}
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- .False,
- );
- } else {
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- );
- }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
+ }
+ },
+ else => unreachable,
},
- .Union => {
+ .un => {
const llvm_union_ty = try dg.lowerType(tv.ty);
- const tag_and_val = tv.val.castTag(.@"union").?.data;
+ const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) {
+ .none => tv.val.castTag(.@"union").?.data,
+ else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
+ .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() },
+ else => unreachable,
+ },
+ };
- const layout = tv.ty.unionGetLayout(target);
+ const layout = tv.ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
return lowerValue(dg, .{
- .ty = tv.ty.unionTagTypeSafety().?,
+ .ty = tv.ty.unionTagTypeSafety(mod).?,
.val = tag_and_val.tag,
});
}
- const union_obj = tv.ty.cast(Type.Payload.Union).?.data;
+ const union_obj = mod.typeToUnion(tv.ty).?;
const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?;
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
if (union_obj.layout == .Packed) {
- if (!field_ty.hasRuntimeBits())
+ if (!field_ty.hasRuntimeBits(mod))
return llvm_union_ty.constNull();
const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
- const ty_bit_size = @intCast(u16, field_ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field_ty.bitSize(mod));
const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = if (field_ty.isPtrAtRuntime())
+ const small_int_val = if (field_ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
else
non_int_val.constBitCast(small_int_ty);
@@ -3842,13 +3738,13 @@ pub const DeclGen = struct {
// must pointer cast to the expected type before accessing the union.
var need_unnamed: bool = layout.most_aligned_field != field_index;
const payload = p: {
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
}
const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field);
- const field_size = field_ty.abiSize(target);
+ const field_size = field_ty.abiSize(mod);
if (field_size == layout.payload_size) {
break :p field;
}
@@ -3868,7 +3764,7 @@ pub const DeclGen = struct {
}
}
const llvm_tag_value = try lowerValue(dg, .{
- .ty = tv.ty.unionTagTypeSafety().?,
+ .ty = tv.ty.unionTagTypeSafety(mod).?,
.val = tag_and_val.tag,
});
var fields: [3]*llvm.Value = undefined;
@@ -3888,107 +3784,45 @@ pub const DeclGen = struct {
return llvm_union_ty.constNamedStruct(&fields, fields_len);
}
},
- .Vector => switch (tv.val.tag()) {
- .bytes => {
- // Note, sentinel is not stored even if the type has a sentinel.
- const bytes = tv.val.castTag(.bytes).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen());
- assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
-
- const elem_ty = tv.ty.elemType();
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- var byte_payload: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = bytes[i],
- };
-
- elem.* = try dg.lowerValue(.{
- .ty = elem_ty,
- .val = Value.initPayload(&byte_payload.base),
- });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .aggregate => {
- // Note, sentinel is not stored even if the type has a sentinel.
- // The value includes the sentinel in those cases.
- const elem_vals = tv.val.castTag(.aggregate).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen());
- assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
- const elem_ty = tv.ty.elemType();
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .repeated => {
- // Note, sentinel is not stored even if the type has a sentinel.
- const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.elemType();
- const len = @intCast(usize, tv.ty.arrayLen());
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .str_lit => {
- // Note, sentinel is not stored
- const str_lit = tv.val.castTag(.str_lit).?.data;
- const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- const vector_len = @intCast(usize, tv.ty.arrayLen());
- assert(vector_len == bytes.len);
-
- const elem_ty = tv.ty.elemType();
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- var byte_payload: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = bytes[i],
- };
+ .memoized_call => unreachable,
+ }
+ }
- elem.* = try dg.lowerValue(.{
- .ty = elem_ty,
- .val = Value.initPayload(&byte_payload.base),
- });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- else => unreachable,
+ fn lowerIntAsPtr(dg: *DeclGen, val: Value) Error!*llvm.Value {
+ switch (dg.module.intern_pool.indexToKey(val.toIntern())) {
+ .undef => return dg.context.pointerType(0).getUndef(),
+ .int => {
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = val.toBigInt(&bigint_space, dg.module);
+ const llvm_int = lowerBigInt(dg, Type.usize, bigint);
+ return llvm_int.constIntToPtr(dg.context.pointerType(0));
},
+ else => unreachable,
+ }
+ }
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
+ fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value {
+ const mod = dg.module;
+ const int_info = ty.intInfo(mod);
+ assert(int_info.bits != 0);
+ const llvm_type = dg.context.intType(int_info.bits);
- .Frame,
- .AnyFrame,
- => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}),
+ const unsigned_val = v: {
+ if (bigint.limbs.len == 1) {
+ break :v llvm_type.constInt(bigint.limbs[0], .False);
+ }
+ if (@sizeOf(usize) == @sizeOf(u64)) {
+ break :v llvm_type.constIntOfArbitraryPrecision(
+ @intCast(c_uint, bigint.limbs.len),
+ bigint.limbs.ptr,
+ );
+ }
+ @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
+ };
+ if (!bigint.positive) {
+ return llvm.constNeg(unsigned_val);
}
+ return unsigned_val;
}
const ParentPtr = struct {
@@ -4001,57 +3835,86 @@ pub const DeclGen = struct {
ptr_val: Value,
decl_index: Module.Decl.Index,
) Error!*llvm.Value {
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = decl.ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ try mod.markDeclAlive(decl);
+ const ptr_ty = try mod.singleMutPtrType(decl.ty);
return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
}
fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value {
- const target = dg.module.getTarget();
- switch (ptr_val.tag()) {
- .decl_ref_mut => {
- const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
- return dg.lowerParentPtrDecl(ptr_val, decl);
- },
- .decl_ref => {
- const decl = ptr_val.castTag(.decl_ref).?.data;
- return dg.lowerParentPtrDecl(ptr_val, decl);
- },
- .variable => {
- const decl = ptr_val.castTag(.variable).?.data.owner_decl;
- return dg.lowerParentPtrDecl(ptr_val, decl);
+ const mod = dg.module;
+ const target = mod.getTarget();
+ return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
+ .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl),
+ .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl),
+ .int => |int| dg.lowerIntAsPtr(int.toValue()),
+ .eu_payload => |eu_ptr| {
+ const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true);
+
+ const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
+ const payload_ty = eu_ty.errorUnionPayload(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ // In this case, we represent pointer to error union the same as pointer
+ // to the payload.
+ return parent_llvm_ptr;
+ }
+
+ const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1;
+ const llvm_u32 = dg.context.intType(32);
+ const indices: [2]*llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(payload_offset, .False),
+ };
+ const eu_llvm_ty = try dg.lowerType(eu_ty);
+ return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
- .int_i64 => {
- const int = ptr_val.castTag(.int_i64).?.data;
- const llvm_usize = try dg.lowerType(Type.usize);
- const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False);
- return llvm_int.constIntToPtr(dg.context.pointerType(0));
+ .opt_payload => |opt_ptr| {
+ const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true);
+
+ const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
+ const payload_ty = opt_ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
+ payload_ty.optionalReprIsPayload(mod))
+ {
+ // In this case, we represent pointer to optional the same as pointer
+ // to the payload.
+ return parent_llvm_ptr;
+ }
+
+ const llvm_u32 = dg.context.intType(32);
+ const indices: [2]*llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(0, .False),
+ };
+ const opt_llvm_ty = try dg.lowerType(opt_ty);
+ return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
- .int_u64 => {
- const int = ptr_val.castTag(.int_u64).?.data;
+ .comptime_field => unreachable,
+ .elem => |elem_ptr| {
+ const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true);
+
const llvm_usize = try dg.lowerType(Type.usize);
- const llvm_int = llvm_usize.constInt(int, .False);
- return llvm_int.constIntToPtr(dg.context.pointerType(0));
+ const indices: [1]*llvm.Value = .{
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
+ const elem_llvm_ty = try dg.lowerType(elem_ty);
+ return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned);
- const parent_ty = field_ptr.container_ty;
+ .field => |field_ptr| {
+ const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
+ const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
- const field_index = @intCast(u32, field_ptr.field_index);
+ const field_index = @intCast(u32, field_ptr.index);
const llvm_u32 = dg.context.intType(32);
- switch (parent_ty.zigTypeTag()) {
+ switch (parent_ty.zigTypeTag(mod)) {
.Union => {
- if (parent_ty.containerLayout() == .Packed) {
+ if (parent_ty.containerLayout(mod) == .Packed) {
return parent_llvm_ptr;
}
- const layout = parent_ty.unionGetLayout(target);
+ const layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
@@ -4069,16 +3932,16 @@ pub const DeclGen = struct {
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.Struct => {
- if (parent_ty.containerLayout() == .Packed) {
+ if (parent_ty.containerLayout(mod) == .Packed) {
if (!byte_aligned) return parent_llvm_ptr;
- const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth());
+ const llvm_usize = dg.context.intType(target.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
// count bits of fields before this one
const prev_bits = b: {
var b: usize = 0;
- for (parent_ty.structFields().values()[0..field_index]) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
- b += @intCast(usize, field.ty.bitSize(target));
+ for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
+ if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ b += @intCast(usize, field.ty.bitSize(mod));
}
break :b b;
};
@@ -4088,23 +3951,21 @@ pub const DeclGen = struct {
return field_addr.constIntToPtr(final_llvm_ty);
}
- var ty_buf: Type.Payload.Pointer = undefined;
-
const parent_llvm_ty = try dg.lowerType(parent_ty);
- if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| {
+ if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_field_index, .False),
+ llvm_u32.constInt(llvm_field.index, .False),
};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
} else {
- const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False);
+ const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
}
},
.Pointer => {
- assert(parent_ty.isSlice());
+ assert(parent_ty.isSlice(mod));
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(field_index, .False),
@@ -4115,61 +3976,7 @@ pub const DeclGen = struct {
else => unreachable,
}
},
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true);
-
- const llvm_usize = try dg.lowerType(Type.usize);
- const indices: [1]*llvm.Value = .{
- llvm_usize.constInt(elem_ptr.index, .False),
- };
- const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty);
- return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- .opt_payload_ptr => {
- const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true);
- var buf: Type.Payload.ElemType = undefined;
-
- const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or
- payload_ty.optionalReprIsPayload())
- {
- // In this case, we represent pointer to optional the same as pointer
- // to the payload.
- return parent_llvm_ptr;
- }
-
- const llvm_u32 = dg.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(0, .False),
- };
- const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty);
- return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- .eu_payload_ptr => {
- const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true);
-
- const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- // In this case, we represent pointer to error union the same as pointer
- // to the payload.
- return parent_llvm_ptr;
- }
-
- const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1;
- const llvm_u32 = dg.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(payload_offset, .False),
- };
- const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty);
- return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- else => unreachable,
- }
+ };
}
fn lowerDeclRefValue(
@@ -4177,57 +3984,39 @@ pub const DeclGen = struct {
tv: TypedValue,
decl_index: Module.Decl.Index,
) Error!*llvm.Value {
- if (tv.ty.isSlice()) {
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = tv.ty.slicePtrFieldType(&buf);
- var slice_len: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = tv.val.sliceLen(self.module),
- };
- const fields: [2]*llvm.Value = .{
- try self.lowerValue(.{
- .ty = ptr_ty,
- .val = tv.val,
- }),
- try self.lowerValue(.{
- .ty = Type.usize,
- .val = Value.initPayload(&slice_len.base),
- }),
- };
- return self.context.constStruct(&fields, fields.len, .False);
- }
+ const mod = self.module;
// In the case of something like:
// fn foo() void {}
// const bar = foo;
// ... &bar;
// `bar` is just an alias and we actually want to lower a reference to `foo`.
- const decl = self.module.declPtr(decl_index);
- if (decl.val.castTag(.function)) |func| {
- if (func.data.owner_decl != decl_index) {
- return self.lowerDeclRefValue(tv, func.data.owner_decl);
+ const decl = mod.declPtr(decl_index);
+ if (decl.val.getFunction(mod)) |func| {
+ if (func.owner_decl != decl_index) {
+ return self.lowerDeclRefValue(tv, func.owner_decl);
}
- } else if (decl.val.castTag(.extern_fn)) |func| {
- if (func.data.owner_decl != decl_index) {
- return self.lowerDeclRefValue(tv, func.data.owner_decl);
+ } else if (decl.val.getExternFunc(mod)) |func| {
+ if (func.decl != decl_index) {
+ return self.lowerDeclRefValue(tv, func.decl);
}
}
- const is_fn_body = decl.ty.zigTypeTag() == .Fn;
- if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or
- (is_fn_body and decl.ty.fnInfo().is_generic))
+ const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
+ if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
+ (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic))
{
return self.lowerPtrToVoid(tv.ty);
}
- self.module.markDeclAlive(decl);
+ try mod.markDeclAlive(decl);
const llvm_decl_val = if (is_fn_body)
try self.resolveLlvmFunction(decl_index)
else
try self.resolveGlobalDecl(decl_index);
- const target = self.module.getTarget();
+ const target = mod.getTarget();
const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: {
@@ -4236,7 +4025,7 @@ pub const DeclGen = struct {
} else llvm_decl_val;
const llvm_type = try self.lowerType(tv.ty);
- if (tv.ty.zigTypeTag() == .Int) {
+ if (tv.ty.zigTypeTag(mod) == .Int) {
return llvm_val.constPtrToInt(llvm_type);
} else {
return llvm_val.constBitCast(llvm_type);
@@ -4244,7 +4033,8 @@ pub const DeclGen = struct {
}
fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value {
- const alignment = ptr_ty.ptrInfo().data.@"align";
+ const mod = dg.module;
+ const alignment = ptr_ty.ptrInfo(mod).@"align";
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
@@ -4261,7 +4051,7 @@ pub const DeclGen = struct {
// instruction is followed by a `wrap_optional`, it will return this value
// verbatim, and the result should test as non-null.
const target = dg.module.getTarget();
- const int = switch (target.cpu.arch.ptrBitWidth()) {
+ const int = switch (target.ptrBitWidth()) {
16 => llvm_usize.constInt(0xaaaa, .False),
32 => llvm_usize.constInt(0xaaaaaaaa, .False),
64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False),
@@ -4338,21 +4128,20 @@ pub const DeclGen = struct {
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type {
- const target = dg.module.getTarget();
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = switch (ty.zigTypeTag()) {
+ const mod = dg.module;
+ const int_ty = switch (ty.zigTypeTag(mod)) {
.Int => ty,
- .Enum => ty.intTagType(&buffer),
+ .Enum => ty.intTagType(mod),
.Float => {
if (!is_rmw_xchg) return null;
- return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8));
+ return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8));
},
.Bool => return dg.context.intType(8),
else => return null,
};
- const bit_count = int_ty.intInfo(target).bits;
+ const bit_count = int_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8));
+ return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8));
} else {
return null;
}
@@ -4363,18 +4152,18 @@ pub const DeclGen = struct {
llvm_fn: *llvm.Value,
param_ty: Type,
param_index: u32,
- fn_info: Type.Payload.Function.Data,
+ fn_info: InternPool.Key.FuncType,
llvm_arg_i: u32,
) void {
- const target = dg.module.getTarget();
- if (param_ty.isPtrAtRuntime()) {
- const ptr_info = param_ty.ptrInfo().data;
+ const mod = dg.module;
+ if (param_ty.isPtrAtRuntime(mod)) {
+ const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, param_index)) |i| {
if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
}
}
- if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") {
+ if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") {
dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -4383,13 +4172,10 @@ pub const DeclGen = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(
- ptr_info.pointee_type.abiAlignment(target),
- 1,
- );
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align);
}
- } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) {
+ } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"),
.unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"),
};
@@ -4490,21 +4276,23 @@ pub const FuncGen = struct {
const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
+ const mod = self.dg.module;
const llvm_val = try self.resolveValue(.{
- .ty = self.air.typeOf(inst),
- .val = self.air.value(inst).?,
+ .ty = self.typeOf(inst),
+ .val = (try self.air.value(inst, mod)).?,
});
gop.value_ptr.* = llvm_val;
return llvm_val;
}
fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value {
+ const mod = self.dg.module;
const llvm_val = try self.dg.lowerValue(tv);
- if (!isByRef(tv.ty)) return llvm_val;
+ if (!isByRef(tv.ty, mod)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target);
const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace);
@@ -4512,7 +4300,7 @@ pub const FuncGen = struct {
global.setLinkage(.Private);
global.setGlobalConstant(.True);
global.setUnnamedAddr(.True);
- global.setAlignment(tv.ty.abiAlignment(target));
+ global.setAlignment(tv.ty.abiAlignment(mod));
const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace))
else
@@ -4521,11 +4309,12 @@ pub const FuncGen = struct {
}
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
+ const mod = self.dg.module;
+ const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
for (body, 0..) |inst, i| {
- if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
continue;
- }
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
@@ -4742,8 +4531,8 @@ pub const FuncGen = struct {
.vector_store_elem => try self.airVectorStoreElem(inst),
- .constant => unreachable,
- .const_ty => unreachable,
+ .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable,
+
.unreach => self.airUnreach(inst),
.dbg_stmt => self.airDbgStmt(inst),
.dbg_inline_begin => try self.airDbgInlineBegin(inst),
@@ -4774,29 +4563,30 @@ pub const FuncGen = struct {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
- const callee_ty = self.air.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
+ const mod = self.dg.module;
+ const callee_ty = self.typeOf(pl_op.operand);
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(),
+ .Pointer => callee_ty.childType(mod),
else => unreachable,
};
- const fn_info = zig_fn_ty.fnInfo();
- const return_type = fn_info.return_type;
+ const fn_info = mod.typeToFunc(zig_fn_ty).?;
+ const return_type = fn_info.return_type.toType();
const llvm_fn = try self.resolveInst(pl_op.operand);
- const target = self.dg.module.getTarget();
- const sret = firstParamSRet(fn_info, target);
+ const target = mod.getTarget();
+ const sret = firstParamSRet(fn_info, mod);
var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa);
defer llvm_args.deinit();
const ret_ptr = if (!sret) null else blk: {
const llvm_ret_ty = try self.dg.lowerType(return_type);
- const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target));
+ const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod));
try llvm_args.append(ret_ptr);
break :blk ret_ptr;
};
- const err_return_tracing = fn_info.return_type.isError() and
+ const err_return_tracing = return_type.isError(mod) and
self.dg.module.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
try llvm_args.append(self.err_ret_trace.?);
@@ -4807,11 +4597,11 @@ pub const FuncGen = struct {
.no_bits => continue,
.byval => {
const arg = args[it.zig_index - 1];
- const param_ty = self.air.typeOf(arg);
+ const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try self.dg.lowerType(param_ty);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4821,12 +4611,12 @@ pub const FuncGen = struct {
},
.byref => {
const arg = args[it.zig_index - 1];
- const param_ty = self.air.typeOf(arg);
+ const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try llvm_args.append(llvm_arg);
} else {
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = llvm_arg.typeOf();
const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
@@ -4836,13 +4626,13 @@ pub const FuncGen = struct {
},
.byref_mut => {
const arg = args[it.zig_index - 1];
- const param_ty = self.air.typeOf(arg);
+ const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = try self.dg.lowerType(param_ty);
const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
@@ -4857,13 +4647,13 @@ pub const FuncGen = struct {
},
.abi_sized_int => {
const arg = args[it.zig_index - 1];
- const param_ty = self.air.typeOf(arg);
+ const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
const int_llvm_ty = self.context.intType(abi_size * 8);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4871,7 +4661,7 @@ pub const FuncGen = struct {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
const alignment = @max(
- param_ty.abiAlignment(target),
+ param_ty.abiAlignment(mod),
self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
const int_ptr = self.buildAlloca(int_llvm_ty, alignment);
@@ -4893,14 +4683,14 @@ pub const FuncGen = struct {
},
.multiple_llvm_types => {
const arg = args[it.zig_index - 1];
- const param_ty = self.air.typeOf(arg);
+ const param_ty = self.typeOf(arg);
const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len];
const llvm_arg = try self.resolveInst(arg);
- const is_by_ref = isByRef(param_ty);
+ const is_by_ref = isByRef(param_ty, mod);
const arg_ptr = if (is_by_ref) llvm_arg else p: {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(param_ty.abiAlignment(target));
+ store_inst.setAlignment(param_ty.abiAlignment(mod));
break :p p;
};
@@ -4910,7 +4700,7 @@ pub const FuncGen = struct {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
- load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
+ load_inst.setAlignment(target.ptrBitWidth() / 8);
llvm_args.appendAssumeCapacity(load_inst);
}
},
@@ -4922,19 +4712,19 @@ pub const FuncGen = struct {
},
.float_array => |count| {
const arg = args[it.zig_index - 1];
- const arg_ty = self.air.typeOf(arg);
+ const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- if (!isByRef(arg_ty)) {
+ if (!isByRef(arg_ty, mod)) {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(target));
+ store_inst.setAlignment(arg_ty.abiAlignment(mod));
llvm_arg = store_inst;
}
- const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?);
+ const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?);
const array_llvm_ty = float_ty.arrayType(count);
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4942,17 +4732,17 @@ pub const FuncGen = struct {
.i32_array, .i64_array => |arr_len| {
const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
const arg = args[it.zig_index - 1];
- const arg_ty = self.air.typeOf(arg);
+ const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- if (!isByRef(arg_ty)) {
+ if (!isByRef(arg_ty, mod)) {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(target));
+ store_inst.setAlignment(arg_ty.abiAlignment(mod));
llvm_arg = store_inst;
}
const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len);
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4969,7 +4759,7 @@ pub const FuncGen = struct {
"",
);
- if (callee_ty.zigTypeTag() == .Pointer) {
+ if (callee_ty.zigTypeTag(mod) == .Pointer) {
// Add argument attributes for function pointer calls.
it = iterateParamTypes(self.dg, fn_info);
it.llvm_index += @boolToInt(sret);
@@ -4977,16 +4767,16 @@ pub const FuncGen = struct {
while (it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
- if (!isByRef(param_ty)) {
+ const param_ty = fn_info.param_types[param_index].toType();
+ if (!isByRef(param_ty, mod)) {
self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types[param_index];
+ const param_ty = fn_info.param_types[param_index].toType();
const param_llvm_ty = try self.dg.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
@@ -5004,8 +4794,8 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types[it.zig_index - 1];
- const ptr_info = param_ty.ptrInfo().data;
+ const param_ty = fn_info.param_types[it.zig_index - 1].toType();
+ const ptr_info = param_ty.ptrInfo(mod);
const llvm_arg_i = it.llvm_index - 2;
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -5013,7 +4803,7 @@ pub const FuncGen = struct {
self.dg.addArgAttr(call, llvm_arg_i, "noalias");
}
}
- if (param_ty.zigTypeTag() != .Optional) {
+ if (param_ty.zigTypeTag(mod) != .Optional) {
self.dg.addArgAttr(call, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -5022,19 +4812,18 @@ pub const FuncGen = struct {
if (ptr_info.@"align" != 0) {
self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
}
},
};
}
- if (return_type.isNoReturn() and attr != .AlwaysTail) {
- _ = self.builder.buildUnreachable();
+ if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) {
return null;
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
@@ -5042,12 +4831,12 @@ pub const FuncGen = struct {
if (ret_ptr) |rp| {
call.setCallSret(llvm_ret_ty);
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
const loaded = self.builder.buildLoad(llvm_ret_ty, rp, "");
- loaded.setAlignment(return_type.abiAlignment(target));
+ loaded.setAlignment(return_type.abiAlignment(mod));
return loaded;
}
}
@@ -5062,7 +4851,7 @@ pub const FuncGen = struct {
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const store_inst = self.builder.buildStore(call, rp);
store_inst.setAlignment(alignment);
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
return rp;
} else {
const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, "");
@@ -5071,10 +4860,10 @@ pub const FuncGen = struct {
}
}
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
- const alignment = return_type.abiAlignment(target);
+ const alignment = return_type.abiAlignment(mod);
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const store_inst = self.builder.buildStore(call, rp);
store_inst.setAlignment(alignment);
@@ -5085,22 +4874,19 @@ pub const FuncGen = struct {
}
fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const ret_ty = self.air.typeOf(un_op);
+ const ret_ty = self.typeOf(un_op);
if (self.ret_ptr) |ret_ptr| {
const operand = try self.resolveInst(un_op);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.store(ret_ptr, ptr_ty, operand, .NotAtomic);
_ = self.builder.buildRetVoid();
return null;
}
- const fn_info = self.dg.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (fn_info.return_type.isError()) {
+ const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (fn_info.return_type.toType().isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5114,10 +4900,9 @@ pub const FuncGen = struct {
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
const operand = try self.resolveInst(un_op);
- const target = self.dg.module.getTarget();
- const alignment = ret_ty.abiAlignment(target);
+ const alignment = ret_ty.abiAlignment(mod);
- if (isByRef(ret_ty)) {
+ if (isByRef(ret_ty, mod)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
const load_inst = self.builder.buildLoad(abi_ret_ty, operand, "");
@@ -5142,12 +4927,13 @@ pub const FuncGen = struct {
}
fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const ptr_ty = self.air.typeOf(un_op);
- const ret_ty = ptr_ty.childType();
- const fn_info = self.dg.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (fn_info.return_type.isError()) {
+ const ptr_ty = self.typeOf(un_op);
+ const ret_ty = ptr_ty.childType(mod);
+ const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (fn_info.return_type.toType().isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5163,10 +4949,9 @@ pub const FuncGen = struct {
return null;
}
const ptr = try self.resolveInst(un_op);
- const target = self.dg.module.getTarget();
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
const loaded = self.builder.buildLoad(abi_ret_ty, ptr, "");
- loaded.setAlignment(ret_ty.abiAlignment(target));
+ loaded.setAlignment(ret_ty.abiAlignment(mod));
_ = self.builder.buildRet(loaded);
return null;
}
@@ -5185,9 +4970,9 @@ pub const FuncGen = struct {
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = self.air.getRefType(ty_op.ty);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+ const mod = self.dg.module;
- const target = self.dg.module.getTarget();
- const result_alignment = va_list_ty.abiAlignment(target);
+ const result_alignment = va_list_ty.abiAlignment(mod);
const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_copy";
@@ -5203,7 +4988,7 @@ pub const FuncGen = struct {
const args: [2]*llvm.Value = .{ dest_list, src_list };
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- if (isByRef(va_list_ty)) {
+ if (isByRef(va_list_ty, mod)) {
return dest_list;
} else {
const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, "");
@@ -5228,11 +5013,11 @@ pub const FuncGen = struct {
}
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const va_list_ty = self.air.typeOfIndex(inst);
+ const mod = self.dg.module;
+ const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
- const target = self.dg.module.getTarget();
- const result_alignment = va_list_ty.abiAlignment(target);
+ const result_alignment = va_list_ty.abiAlignment(mod);
const list = self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_start";
@@ -5244,7 +5029,7 @@ pub const FuncGen = struct {
const args: [1]*llvm.Value = .{list};
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- if (isByRef(va_list_ty)) {
+ if (isByRef(va_list_ty, mod)) {
return list;
} else {
const loaded = self.builder.buildLoad(llvm_va_list_ty, list, "");
@@ -5259,7 +5044,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const operand_ty = self.air.typeOf(bin_op.lhs);
+ const operand_ty = self.typeOf(bin_op.lhs);
return self.cmp(lhs, rhs, operand_ty, op);
}
@@ -5272,7 +5057,7 @@ pub const FuncGen = struct {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
- const vec_ty = self.air.typeOf(extra.lhs);
+ const vec_ty = self.typeOf(extra.lhs);
const cmp_op = extra.compareOperator();
return self.cmp(lhs, rhs, vec_ty, cmp_op);
@@ -5293,23 +5078,21 @@ pub const FuncGen = struct {
operand_ty: Type,
op: math.CompareOperator,
) Allocator.Error!*llvm.Value {
- var int_buffer: Type.Payload.Bits = undefined;
- var opt_buffer: Type.Payload.ElemType = undefined;
-
- const scalar_ty = operand_ty.scalarType();
- const int_ty = switch (scalar_ty.zigTypeTag()) {
- .Enum => scalar_ty.intTagType(&int_buffer),
+ const mod = self.dg.module;
+ const scalar_ty = operand_ty.scalarType(mod);
+ const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
+ .Enum => scalar_ty.intTagType(mod),
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
.Optional => blk: {
- const payload_ty = operand_ty.optionalChild(&opt_buffer);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or
- operand_ty.optionalReprIsPayload())
+ const payload_ty = operand_ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
+ operand_ty.optionalReprIsPayload(mod))
{
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
// of optionals that are not pointers.
- const is_by_ref = isByRef(scalar_ty);
+ const is_by_ref = isByRef(scalar_ty, mod);
const opt_llvm_ty = try self.dg.lowerType(scalar_ty);
const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref);
const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref);
@@ -5376,7 +5159,7 @@ pub const FuncGen = struct {
.Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }),
else => unreachable,
};
- const is_signed = int_ty.isSignedInt();
+ const is_signed = int_ty.isSignedInt(mod);
const operation: llvm.IntPredicate = switch (op) {
.eq => .EQ,
.neq => .NE,
@@ -5389,13 +5172,14 @@ pub const FuncGen = struct {
}
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
const parent_bb = self.context.createBasicBlock("Block");
- if (inst_ty.isNoReturn()) {
+ if (inst_ty.isNoReturn(mod)) {
try self.genBody(body);
return null;
}
@@ -5415,8 +5199,8 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(parent_bb);
// Create a phi node only if the block returns a value.
- const is_body = inst_ty.zigTypeTag() == .Fn;
- if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ const is_body = inst_ty.zigTypeTag(mod) == .Fn;
+ if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
const raw_llvm_ty = try self.dg.lowerType(inst_ty);
@@ -5425,7 +5209,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
- if (is_body or isByRef(inst_ty)) {
+ if (is_body or isByRef(inst_ty, mod)) {
break :ty self.context.pointerType(0);
}
break :ty raw_llvm_ty;
@@ -5445,8 +5229,9 @@ pub const FuncGen = struct {
const block = self.blocks.get(branch.block_inst).?;
// Add the values to the lists only if the break provides a value.
- const operand_ty = self.air.typeOf(branch.operand);
- if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) {
+ const operand_ty = self.typeOf(branch.operand);
+ const mod = self.dg.module;
+ if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@@ -5482,24 +5267,26 @@ pub const FuncGen = struct {
}
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = self.air.typeOf(pl_op.operand);
- const payload_ty = self.air.typeOfIndex(inst);
- const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false;
+ const err_union_ty = self.typeOf(pl_op.operand);
+ const payload_ty = self.typeOfIndex(inst);
+ const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = self.air.typeOf(extra.data.ptr).childType();
+ const err_union_ty = self.typeOf(extra.data.ptr).childType(mod);
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused);
}
@@ -5513,12 +5300,12 @@ pub const FuncGen = struct {
can_elide_load: bool,
is_unused: bool,
) !?*llvm.Value {
- const payload_ty = err_union_ty.errorUnionPayload();
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
- const target = fg.dg.module.getTarget();
+ const mod = fg.dg.module;
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty);
- if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
const is_err = err: {
const err_set_ty = try fg.dg.lowerType(Type.anyerror);
const zero = err_set_ty.constNull();
@@ -5530,8 +5317,8 @@ pub const FuncGen = struct {
err_union;
break :err fg.builder.buildICmp(.NE, loaded, zero, "");
}
- const err_field_index = errUnionErrorOffset(payload_ty, target);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ const err_field_index = errUnionErrorOffset(payload_ty, mod);
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, "");
@@ -5556,31 +5343,32 @@ pub const FuncGen = struct {
if (!payload_has_bits) {
return if (operand_is_ptr) err_union else null;
}
- const offset = errUnionPayloadOffset(payload_ty, target);
+ const offset = errUnionPayloadOffset(payload_ty, mod);
if (operand_is_ptr) {
return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
- } else if (isByRef(err_union_ty)) {
+ } else if (isByRef(err_union_ty, mod)) {
const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
- if (isByRef(payload_ty)) {
+ if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
- return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
+ return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
}
const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(target));
+ load_inst.setAlignment(payload_ty.abiAlignment(mod));
return load_inst;
}
return fg.builder.buildExtractValue(err_union, offset, "");
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- const target = self.dg.module.getTarget();
- const llvm_usize = self.context.intType(target.cpu.arch.ptrBitWidth());
+ const target = mod.getTarget();
+ const llvm_usize = self.context.intType(target.ptrBitWidth());
const cond_int = if (cond.typeOf().getTypeKind() == .Pointer)
self.builder.buildPtrToInt(cond, llvm_usize, "")
else
@@ -5624,6 +5412,7 @@ pub const FuncGen = struct {
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
@@ -5639,21 +5428,22 @@ pub const FuncGen = struct {
// would have been emitted already. Also the main loop in genBody can
// be while(true) instead of for(body), which will eliminate 1 branch on
// a hot path.
- if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) {
+ if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) {
_ = self.builder.buildBr(loop_block);
}
return null;
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
- const array_ty = operand_ty.childType();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const array_ty = operand_ty.childType(mod);
const llvm_usize = try self.dg.lowerType(Type.usize);
- const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
- const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
+ const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False);
+ const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
- if (!array_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
}
@@ -5667,30 +5457,31 @@ pub const FuncGen = struct {
}
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const operand_scalar_ty = operand_ty.scalarType(mod);
- const dest_ty = self.air.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType();
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_scalar_ty = dest_ty.scalarType(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
if (intrinsicsAllowed(dest_scalar_ty, target)) {
- if (operand_scalar_ty.isSignedInt()) {
+ if (operand_scalar_ty.isSignedInt(mod)) {
return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
} else {
return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
}
}
- const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target));
+ const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod));
const rt_int_bits = compilerRtIntBits(operand_bits);
const rt_int_ty = self.context.intType(rt_int_bits);
var extended = e: {
- if (operand_scalar_ty.isSignedInt()) {
+ if (operand_scalar_ty.isSignedInt(mod)) {
break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, "");
} else {
break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, "");
@@ -5699,7 +5490,7 @@ pub const FuncGen = struct {
const dest_bits = dest_scalar_ty.floatBits(target);
const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits);
const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits);
- const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un";
+ const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un";
var fn_name_buf: [64]u8 = undefined;
const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{
sign_prefix,
@@ -5725,27 +5516,28 @@ pub const FuncGen = struct {
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const operand_scalar_ty = operand_ty.scalarType(mod);
- const dest_ty = self.air.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType();
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_scalar_ty = dest_ty.scalarType(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
if (intrinsicsAllowed(operand_scalar_ty, target)) {
// TODO set fast math flag
- if (dest_scalar_ty.isSignedInt()) {
+ if (dest_scalar_ty.isSignedInt(mod)) {
return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
} else {
return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
}
}
- const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target)));
+ const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod)));
const ret_ty = self.context.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -5757,7 +5549,7 @@ pub const FuncGen = struct {
const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits);
- const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns";
+ const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns";
var fn_name_buf: [64]u8 = undefined;
const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{
@@ -5779,7 +5571,8 @@ pub const FuncGen = struct {
}
fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
- if (ty.isSlice()) {
+ const mod = fg.dg.module;
+ if (ty.isSlice(mod)) {
return fg.builder.buildExtractValue(ptr, 0, "");
} else {
return ptr;
@@ -5787,22 +5580,23 @@ pub const FuncGen = struct {
}
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
- const target = fg.dg.module.getTarget();
- const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
- switch (ty.ptrSize()) {
+ const mod = fg.dg.module;
+ const target = mod.getTarget();
+ const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
+ switch (ty.ptrSize(mod)) {
.Slice => {
const len = fg.builder.buildExtractValue(ptr, 1, "");
- const elem_ty = ty.childType();
- const abi_size = elem_ty.abiSize(target);
+ const elem_ty = ty.childType(mod);
+ const abi_size = elem_ty.abiSize(mod);
if (abi_size == 1) return len;
const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
return fg.builder.buildMul(len, abi_size_llvm_val, "");
},
.One => {
- const array_ty = ty.childType();
- const elem_ty = array_ty.childType();
- const abi_size = elem_ty.abiSize(target);
- return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False);
+ const array_ty = ty.childType(mod);
+ const elem_ty = array_ty.childType(mod);
+ const abi_size = elem_ty.abiSize(mod);
+ return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False);
},
.Many, .C => unreachable,
}
@@ -5815,67 +5609,69 @@ pub const FuncGen = struct {
}
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
- const slice_ptr_ty = self.air.typeOf(ty_op.operand);
- const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType());
+ const slice_ptr_ty = self.typeOf(ty_op.operand);
+ const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod));
return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, "");
}
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs);
+ const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType();
+ const elem_ty = slice_ty.childType(mod);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
const indices: [1]*llvm.Value = .{index};
const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail))
return ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
}
return self.load(ptr, slice_ty);
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const slice_ty = self.air.typeOf(bin_op.lhs);
+ const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType());
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod));
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
const indices: [1]*llvm.Value = .{index};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
}
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const array_ty = self.air.typeOf(bin_op.lhs);
+ const array_ty = self.typeOf(bin_op.lhs);
const array_llvm_val = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const array_llvm_ty = try self.dg.lowerType(array_ty);
- const elem_ty = array_ty.childType();
- if (isByRef(array_ty)) {
+ const elem_ty = array_ty.childType(mod);
+ if (isByRef(array_ty, mod)) {
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, "");
if (canElideLoad(self, body_tail))
return elem_ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false);
} else {
const lhs_index = Air.refToIndex(bin_op.lhs).?;
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -5902,15 +5698,16 @@ pub const FuncGen = struct {
}
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(mod);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
// TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
- const ptr = if (ptr_ty.isSinglePointer()) ptr: {
+ const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: {
// If this is a single-item pointer to an array, we need another index in the GEP.
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
@@ -5918,32 +5715,32 @@ pub const FuncGen = struct {
const indices: [1]*llvm.Value = .{rhs};
break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
};
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail))
return ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
}
return self.load(ptr, ptr_ty);
}
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
- if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(mod);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const elem_ptr = self.air.getRefType(ty_pl.ty);
- if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr;
+ if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr;
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
- if (ptr_ty.isSinglePointer()) {
+ if (ptr_ty.isSinglePointer(mod)) {
// If this is a single-item pointer to an array, we need another index in the GEP.
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
@@ -5957,7 +5754,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ptr = try self.resolveInst(struct_field.struct_operand);
- const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand);
+ const struct_ptr_ty = self.typeOf(struct_field.struct_operand);
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index);
}
@@ -5968,41 +5765,41 @@ pub const FuncGen = struct {
) !?*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolveInst(ty_op.operand);
- const struct_ptr_ty = self.air.typeOf(ty_op.operand);
+ const struct_ptr_ty = self.typeOf(ty_op.operand);
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
}
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
- const struct_ty = self.air.typeOf(struct_field.struct_operand);
+ const struct_ty = self.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) {
+ const field_ty = struct_ty.structFieldType(field_index, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
- const target = self.dg.module.getTarget();
- if (!isByRef(struct_ty)) {
- assert(!isByRef(field_ty));
- switch (struct_ty.zigTypeTag()) {
- .Struct => switch (struct_ty.containerLayout()) {
+ if (!isByRef(struct_ty, mod)) {
+ assert(!isByRef(field_ty, mod));
+ switch (struct_ty.zigTypeTag(mod)) {
+ .Struct => switch (struct_ty.containerLayout(mod)) {
.Packed => {
- const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const bit_offset = struct_obj.packedFieldBitOffset(target, field_index);
+ const struct_obj = mod.typeToStruct(struct_ty).?;
+ const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
const containing_int = struct_llvm_val;
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime()) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -6010,22 +5807,21 @@ pub const FuncGen = struct {
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
},
else => {
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index;
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
},
},
.Union => {
- assert(struct_ty.containerLayout() == .Packed);
+ assert(struct_ty.containerLayout(mod) == .Packed);
const containing_int = struct_llvm_val;
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime()) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -6036,30 +5832,35 @@ pub const FuncGen = struct {
}
}
- switch (struct_ty.zigTypeTag()) {
+ switch (struct_ty.zigTypeTag(mod)) {
.Struct => {
- assert(struct_ty.containerLayout() != .Packed);
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ assert(struct_ty.containerLayout(mod) != .Packed);
+ const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
- const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
- if (isByRef(field_ty)) {
+ const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
+ const field_ptr_ty = try mod.ptrType(.{
+ .child = llvm_field.ty.toIntern(),
+ .flags = .{
+ .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
+ },
+ });
+ if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
- return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false);
+ assert(llvm_field.alignment != 0);
+ return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false);
} else {
return self.load(field_ptr, field_ptr_ty);
}
},
.Union => {
const union_llvm_ty = try self.dg.lowerType(struct_ty);
- const layout = struct_ty.unionGetLayout(target);
+ const layout = struct_ty.unionGetLayout(mod);
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, "");
const llvm_field_ty = try self.dg.lowerType(field_ty);
- if (isByRef(field_ty)) {
+ if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
@@ -6073,20 +5874,21 @@ pub const FuncGen = struct {
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try self.resolveInst(extra.field_ptr);
const target = self.dg.module.getTarget();
- const parent_ty = self.air.getRefType(ty_pl.ty).childType();
- const field_offset = parent_ty.structFieldOffset(extra.field_index, target);
+ const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
if (field_offset == 0) {
return field_ptr;
}
- const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
+ const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, "");
const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), "");
@@ -6121,12 +5923,13 @@ pub const FuncGen = struct {
fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const dib = self.dg.object.di_builder orelse return null;
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
- const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.dg.module;
+ const func = mod.funcPtr(ty_fn.func);
const decl_index = func.owner_decl;
- const decl = self.dg.module.declPtr(decl_index);
- const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope);
+ const decl = mod.declPtr(decl_index);
+ const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
const line_number = decl.src_line + 1;
const cur_debug_location = self.builder.getCurrentDebugLocation2();
@@ -6137,22 +5940,37 @@ pub const FuncGen = struct {
.base_line = self.base_line,
});
- const fqn = try decl.getFullyQualifiedName(self.dg.module);
- defer self.gpa.free(fqn);
-
- const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index);
+ const fqn = try decl.getFullyQualifiedName(mod);
+
+ const is_internal_linkage = !mod.decl_exports.contains(decl_index);
+ const fn_ty = try mod.funcType(.{
+ .param_types = &.{},
+ .return_type = .void_type,
+ .alignment = .none,
+ .noalias_bits = 0,
+ .comptime_bits = 0,
+ .cc = .Unspecified,
+ .is_var_args = false,
+ .is_generic = false,
+ .is_noinline = false,
+ .align_is_generic = false,
+ .cc_is_generic = false,
+ .section_is_generic = false,
+ .addrspace_is_generic = false,
+ });
+ const fn_di_ty = try self.dg.object.lowerDebugType(fn_ty, .full);
const subprogram = dib.createFunction(
di_file.toScope(),
- decl.name,
- fqn,
+ mod.intern_pool.stringToSlice(decl.name),
+ mod.intern_pool.stringToSlice(fqn),
di_file,
line_number,
- try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full),
+ fn_di_ty,
is_internal_linkage,
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember,
- self.dg.module.comp.bin_file.options.optimize_mode != .Debug,
+ mod.comp.bin_file.options.optimize_mode != .Debug,
null, // decl_subprogram
);
@@ -6164,12 +5982,12 @@ pub const FuncGen = struct {
fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.dg.object.di_builder == null) return null;
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
- const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
const mod = self.dg.module;
+ const func = mod.funcPtr(ty_fn.func);
const decl = mod.declPtr(func.owner_decl);
- const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope);
+ const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
const old = self.dbg_inlined.pop();
self.di_scope = old.scope;
@@ -6193,18 +6011,19 @@ pub const FuncGen = struct {
}
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const dib = self.dg.object.di_builder orelse return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
- const ptr_ty = self.air.typeOf(pl_op.operand);
+ const ptr_ty = self.typeOf(pl_op.operand);
const di_local_var = dib.createAutoVariable(
self.di_scope.?,
name.ptr,
self.di_file.?,
self.prev_dbg_line,
- try self.dg.object.lowerDebugType(ptr_ty.childType(), .full),
+ try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full),
true, // always preserve
0, // flags
);
@@ -6222,7 +6041,7 @@ pub const FuncGen = struct {
const dib = self.dg.object.di_builder orelse return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
- const operand_ty = self.air.typeOf(pl_op.operand);
+ const operand_ty = self.typeOf(pl_op.operand);
const name = self.air.nullTerminatedString(pl_op.payload);
if (needDbgVarWorkaround(self.dg)) {
@@ -6244,10 +6063,11 @@ pub const FuncGen = struct {
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
const insert_block = self.builder.getInsertBlock();
- if (isByRef(operand_ty)) {
+ const mod = self.dg.module;
+ if (isByRef(operand_ty, mod)) {
_ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block);
} else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = operand_ty.abiAlignment(self.dg.module.getTarget());
+ const alignment = operand_ty.abiAlignment(mod);
const alloca = self.buildAlloca(operand.typeOf(), alignment);
const store_inst = self.builder.buildStore(operand, alloca);
store_inst.setAlignment(alignment);
@@ -6295,7 +6115,8 @@ pub const FuncGen = struct {
// This stores whether we need to add an elementtype attribute and
// if so, the element type itself.
const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
var llvm_ret_i: usize = 0;
var llvm_param_i: usize = 0;
@@ -6322,9 +6143,9 @@ pub const FuncGen = struct {
llvm_ret_indirect[i] = (output != .none) and constraintAllowsMemory(constraint);
if (output != .none) {
const output_inst = try self.resolveInst(output);
- const output_ty = self.air.typeOf(output);
- assert(output_ty.zigTypeTag() == .Pointer);
- const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType());
+ const output_ty = self.typeOf(output);
+ assert(output_ty.zigTypeTag(mod) == .Pointer);
+ const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod));
if (llvm_ret_indirect[i]) {
// Pass the result by reference as an indirect output (e.g. "=*m")
@@ -6340,7 +6161,7 @@ pub const FuncGen = struct {
llvm_ret_i += 1;
}
} else {
- const ret_ty = self.air.typeOfIndex(inst);
+ const ret_ty = self.typeOfIndex(inst);
llvm_ret_types[llvm_ret_i] = try self.dg.lowerType(ret_ty);
llvm_ret_i += 1;
}
@@ -6375,15 +6196,15 @@ pub const FuncGen = struct {
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
const arg_llvm_value = try self.resolveInst(input);
- const arg_ty = self.air.typeOf(input);
+ const arg_ty = self.typeOf(input);
var llvm_elem_ty: ?*llvm.Type = null;
- if (isByRef(arg_ty)) {
+ if (isByRef(arg_ty, mod)) {
llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty);
if (constraintAllowsMemory(constraint)) {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
} else {
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const arg_llvm_ty = try self.dg.lowerType(arg_ty);
const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, "");
load_inst.setAlignment(alignment);
@@ -6395,7 +6216,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
} else {
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment);
const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr);
store_inst.setAlignment(alignment);
@@ -6425,7 +6246,7 @@ pub const FuncGen = struct {
// an elementtype(<ty>) attribute.
if (constraint[0] == '*') {
llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse
- try self.dg.lowerPtrElemTy(arg_ty.childType());
+ try self.dg.lowerPtrElemTy(arg_ty.childType(mod));
} else {
llvm_param_attrs[llvm_param_i] = null;
}
@@ -6597,10 +6418,10 @@ pub const FuncGen = struct {
if (output != .none) {
const output_ptr = try self.resolveInst(output);
- const output_ptr_ty = self.air.typeOf(output);
+ const output_ptr_ty = self.typeOf(output);
const store_inst = self.builder.buildStore(output_value, output_ptr);
- store_inst.setAlignment(output_ptr_ty.ptrAlignment(target));
+ store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod));
} else {
ret_val = output_value;
}
@@ -6616,22 +6437,21 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
pred: llvm.IntPredicate,
) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const operand_ty = self.air.typeOf(un_op);
- const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const operand_ty = self.typeOf(un_op);
+ const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
- if (optional_ty.optionalReprIsPayload()) {
+ const payload_ty = optional_ty.optionalChild(mod);
+ if (optional_ty.optionalReprIsPayload(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
operand;
- if (payload_ty.isSlice()) {
+ if (payload_ty.isSlice(mod)) {
const slice_ptr = self.builder.buildExtractValue(loaded, 0, "");
- var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf));
+ const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(mod));
return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), "");
}
return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), "");
@@ -6639,7 +6459,7 @@ pub const FuncGen = struct {
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
@@ -6648,7 +6468,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), "");
}
- const is_by_ref = operand_is_ptr or isByRef(optional_ty);
+ const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod);
const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref);
if (pred == .EQ) {
return self.builder.buildNot(non_null_bit, "");
@@ -6663,15 +6483,16 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const operand_ty = self.air.typeOf(un_op);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
- const payload_ty = err_union_ty.errorUnionPayload();
+ const operand_ty = self.typeOf(un_op);
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
const err_set_ty = try self.dg.lowerType(Type.anyerror);
const zero = err_set_ty.constNull();
- if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
const llvm_i1 = self.context.intType(1);
switch (op) {
.EQ => return llvm_i1.constInt(1, .False), // 0 == 0
@@ -6680,7 +6501,7 @@ pub const FuncGen = struct {
}
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "")
else
@@ -6688,10 +6509,9 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- const target = self.dg.module.getTarget();
- const err_field_index = errUnionErrorOffset(payload_ty, target);
+ const err_field_index = errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, "");
const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, "");
@@ -6703,17 +6523,17 @@ pub const FuncGen = struct {
}
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.air.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const optional_ty = self.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = optional_ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
}
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// The payload and the optional are the same value.
return operand;
}
@@ -6724,18 +6544,18 @@ pub const FuncGen = struct {
fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
comptime assert(optional_layout_version == 3);
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.air.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const optional_ty = self.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = optional_ty.optionalChild(mod);
const non_null_bit = self.context.intType(8).constInt(1, .False);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
_ = self.builder.buildStore(non_null_bit, operand);
return operand;
}
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// The payload and the optional are the same value.
// Setting to non-null will be done when the payload is set.
return operand;
@@ -6755,20 +6575,21 @@ pub const FuncGen = struct {
}
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.air.typeOf(ty_op.operand);
- const payload_ty = self.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ const optional_ty = self.typeOf(ty_op.operand);
+ const payload_ty = self.typeOfIndex(inst);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// Payload value is the same as the optional value.
return operand;
}
const opt_llvm_ty = try self.dg.lowerType(optional_ty);
- const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
}
@@ -6777,32 +6598,32 @@ pub const FuncGen = struct {
body_tail: []const Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
- const result_ty = self.air.typeOfIndex(inst);
- const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty;
- const target = self.dg.module.getTarget();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ const result_ty = self.typeOfIndex(inst);
+ const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return if (operand_is_ptr) operand else null;
}
- const offset = errUnionPayloadOffset(payload_ty, target);
+ const offset = errUnionPayloadOffset(payload_ty, mod);
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
if (operand_is_ptr) {
return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
- } else if (isByRef(err_union_ty)) {
+ } else if (isByRef(err_union_ty, mod)) {
const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
- if (isByRef(payload_ty)) {
+ if (isByRef(payload_ty, mod)) {
if (self.canElideLoad(body_tail))
return payload_ptr;
- return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
+ return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
}
const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(target));
+ load_inst.setAlignment(payload_ty.abiAlignment(mod));
return load_inst;
}
return self.builder.buildExtractValue(operand, offset, "");
@@ -6813,11 +6634,12 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
- if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ const operand_ty = self.typeOf(ty_op.operand);
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
+ if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
const err_llvm_ty = try self.dg.lowerType(Type.anyerror);
if (operand_is_ptr) {
return operand;
@@ -6828,16 +6650,15 @@ pub const FuncGen = struct {
const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror);
- const payload_ty = err_union_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (!operand_is_ptr) return operand;
return self.builder.buildLoad(err_set_llvm_ty, operand, "");
}
- const target = self.dg.module.getTarget();
- const offset = errUnionErrorOffset(payload_ty, target);
+ const offset = errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, "");
@@ -6847,30 +6668,30 @@ pub const FuncGen = struct {
}
fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const err_union_ty = self.air.typeOf(ty_op.operand).childType();
+ const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
- const payload_ty = err_union_ty.errorUnionPayload();
- const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero });
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
+ const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) });
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
_ = self.builder.buildStore(non_error_val, operand);
return operand;
}
- const target = self.dg.module.getTarget();
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
{
- const error_offset = errUnionErrorOffset(payload_ty, target);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
// First set the non-error value.
const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, "");
const store_inst = self.builder.buildStore(non_error_val, non_null_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
}
// Then return the payload pointer (only if it is used).
if (self.liveness.isUnused(inst))
return null;
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, "");
}
@@ -6886,42 +6707,41 @@ pub const FuncGen = struct {
}
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const target = self.dg.module.getTarget();
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
//const struct_ty = try self.resolveInst(ty_pl.ty);
const struct_ty = self.air.getRefType(ty_pl.ty);
const field_index = ty_pl.payload;
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ const mod = self.dg.module;
+ const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, "");
- const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
+ const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
+ const field_ptr_ty = try mod.ptrType(.{
+ .child = llvm_field.ty.toIntern(),
+ .flags = .{
+ .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
+ },
+ });
return self.load(field_ptr, field_ptr_ty);
}
fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const payload_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(8).constInt(1, .False);
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.air.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload()) {
+ const optional_ty = self.typeOfIndex(inst);
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand;
}
const llvm_optional_ty = try self.dg.lowerType(optional_ty);
- if (isByRef(optional_ty)) {
- const target = self.dg.module.getTarget();
- const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target));
+ if (isByRef(optional_ty, mod)) {
+ const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, "");
_ = self.builder.buildStore(non_null_bit, non_null_ptr);
@@ -6932,30 +6752,26 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const err_un_ty = self.air.typeOfIndex(inst);
+ const err_un_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
- const payload_ty = self.air.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = self.typeOf(ty_op.operand);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return operand;
}
const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull();
const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
- const target = self.dg.module.getTarget();
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
- const error_offset = errUnionErrorOffset(payload_ty, target);
- if (isByRef(err_un_ty)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target));
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
+ if (isByRef(err_un_ty, mod)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(ok_err_code, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
return result_ptr;
}
@@ -6965,29 +6781,25 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const err_un_ty = self.air.typeOfIndex(inst);
- const payload_ty = err_un_ty.errorUnionPayload();
+ const err_un_ty = self.typeOfIndex(inst);
+ const payload_ty = err_un_ty.errorUnionPayload(mod);
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return operand;
}
const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
- const target = self.dg.module.getTarget();
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
- const error_offset = errUnionErrorOffset(payload_ty, target);
- if (isByRef(err_un_ty)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target));
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
+ if (isByRef(err_un_ty, mod)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(operand, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
// TODO store undef to payload_ptr
_ = payload_ptr;
_ = payload_ptr_ty;
@@ -7022,20 +6834,20 @@ pub const FuncGen = struct {
}
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const data = self.air.instructions.items(.data)[inst].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
const vector_ptr = try self.resolveInst(data.vector_ptr);
- const vector_ptr_ty = self.air.typeOf(data.vector_ptr);
+ const vector_ptr_ty = self.typeOf(data.vector_ptr);
const index = try self.resolveInst(extra.lhs);
const operand = try self.resolveInst(extra.rhs);
const loaded_vector = blk: {
- const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.elemType2());
+ const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod));
const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, "");
- const target = self.dg.module.getTarget();
- load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target));
- load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr()));
+ load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod));
+ load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod)));
break :blk load_inst;
};
const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, "");
@@ -7044,24 +6856,26 @@ pub const FuncGen = struct {
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const scalar_ty = self.air.typeOfIndex(inst).scalarType();
+ const scalar_ty = self.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, "");
return self.builder.buildUMin(lhs, rhs, "");
}
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const scalar_ty = self.air.typeOfIndex(inst).scalarType();
+ const scalar_ty = self.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, "");
return self.builder.buildUMax(lhs, rhs, "");
}
@@ -7070,7 +6884,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
const llvm_slice_ty = try self.dg.lowerType(inst_ty);
// In case of slicing a global, the result type looks something like `{ i8*, i64 }`
@@ -7082,14 +6896,15 @@ pub const FuncGen = struct {
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, "");
return self.builder.buildNUWAdd(lhs, rhs, "");
}
@@ -7104,14 +6919,15 @@ pub const FuncGen = struct {
}
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, "");
return self.builder.buildUAddSat(lhs, rhs, "");
}
@@ -7119,14 +6935,15 @@ pub const FuncGen = struct {
fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, "");
return self.builder.buildNUWSub(lhs, rhs, "");
}
@@ -7141,28 +6958,30 @@ pub const FuncGen = struct {
}
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, "");
return self.builder.buildUSubSat(lhs, rhs, "");
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, "");
return self.builder.buildNUWMul(lhs, rhs, "");
}
@@ -7177,14 +6996,15 @@ pub const FuncGen = struct {
}
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, "");
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
@@ -7194,7 +7014,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
}
@@ -7202,39 +7022,40 @@ pub const FuncGen = struct {
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.trunc, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.floor, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt()) {
- const target = self.dg.module.getTarget();
+ if (scalar_ty.isSignedInt(mod)) {
const inst_llvm_ty = try self.dg.lowerType(inst_ty);
- const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen();
+ const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
+ const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
+ const vec_len = inst_ty.vectorLen(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -7259,40 +7080,43 @@ pub const FuncGen = struct {
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, "");
return self.builder.buildExactUDiv(lhs, rhs, "");
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const inst_ty = self.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, "");
return self.builder.buildURem(lhs, rhs, "");
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
const inst_llvm_ty = try self.dg.lowerType(inst_ty);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
@@ -7302,11 +7126,10 @@ pub const FuncGen = struct {
const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero });
return self.builder.buildSelect(ltz, c, a, "");
}
- if (scalar_ty.isSignedInt()) {
- const target = self.dg.module.getTarget();
- const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen();
+ if (scalar_ty.isSignedInt(mod)) {
+ const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
+ const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
+ const vec_len = inst_ty.vectorLen(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -7329,13 +7152,14 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
- switch (ptr_ty.ptrSize()) {
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod));
+ switch (ptr_ty.ptrSize(mod)) {
.One => {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset };
@@ -7354,14 +7178,15 @@ pub const FuncGen = struct {
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
- switch (ptr_ty.ptrSize()) {
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod));
+ switch (ptr_ty.ptrSize(mod)) {
.One => {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
const indices: [2]*llvm.Value = .{
@@ -7387,36 +7212,33 @@ pub const FuncGen = struct {
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
- const lhs_ty = self.air.typeOf(extra.lhs);
- const scalar_ty = lhs_ty.scalarType();
- const dest_ty = self.air.typeOfIndex(inst);
+ const lhs_ty = self.typeOf(extra.lhs);
+ const scalar_ty = lhs_ty.scalarType(mod);
+ const dest_ty = self.typeOfIndex(inst);
- const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic;
+ const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
const llvm_lhs_ty = try self.dg.lowerType(lhs_ty);
const llvm_dest_ty = try self.dg.lowerType(dest_ty);
- const tg = self.dg.module.getTarget();
-
const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
const result = self.builder.buildExtractValue(result_struct, 0, "");
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
- var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?;
+ const result_index = llvmField(dest_ty, 0, mod).?.index;
+ const overflow_index = llvmField(dest_ty, 1, mod).?.index;
- if (isByRef(dest_ty)) {
- const target = self.dg.module.getTarget();
- const result_alignment = dest_ty.abiAlignment(target);
+ if (isByRef(dest_ty, mod)) {
+ const result_alignment = dest_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
{
const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
@@ -7487,8 +7309,9 @@ pub const FuncGen = struct {
ty: Type,
params: [2]*llvm.Value,
) !*llvm.Value {
+ const mod = self.dg.module;
const target = self.dg.module.getTarget();
- const scalar_ty = ty.scalarType();
+ const scalar_ty = ty.scalarType(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
if (intrinsicsAllowed(scalar_ty, target)) {
@@ -7532,8 +7355,8 @@ pub const FuncGen = struct {
.gte => .SGE,
};
- if (ty.zigTypeTag() == .Vector) {
- const vec_len = ty.vectorLen();
+ if (ty.zigTypeTag(mod) == .Vector) {
+ const vec_len = ty.vectorLen(mod);
const vector_result_ty = llvm_i32.vectorType(vec_len);
var result = vector_result_ty.getUndef();
@@ -7588,8 +7411,9 @@ pub const FuncGen = struct {
comptime params_len: usize,
params: [params_len]*llvm.Value,
) !*llvm.Value {
- const target = self.dg.module.getTarget();
- const scalar_ty = ty.scalarType();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
+ const scalar_ty = ty.scalarType(mod);
const llvm_ty = try self.dg.lowerType(ty);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -7616,9 +7440,9 @@ pub const FuncGen = struct {
const one = int_llvm_ty.constInt(1, .False);
const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
const sign_mask = one.constShl(shift_amt);
- const result = if (ty.zigTypeTag() == .Vector) blk: {
- const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, "");
- const cast_ty = int_llvm_ty.vectorType(ty.vectorLen());
+ const result = if (ty.zigTypeTag(mod) == .Vector) blk: {
+ const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, "");
+ const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod));
const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, "");
break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, "");
} else blk: {
@@ -7663,9 +7487,9 @@ pub const FuncGen = struct {
.libc => |fn_name| b: {
const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty };
const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result = llvm_ty.getUndef();
- return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen());
+ return self.buildElementwiseCall(libc_fn, &params, result, ty.vectorLen(mod));
}
break :b libc_fn;
@@ -7682,47 +7506,44 @@ pub const FuncGen = struct {
const mulend2 = try self.resolveInst(extra.rhs);
const addend = try self.resolveInst(pl_op.operand);
- const ty = self.air.typeOfIndex(inst);
+ const ty = self.typeOfIndex(inst);
return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend });
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
- const lhs_ty = self.air.typeOf(extra.lhs);
- const rhs_ty = self.air.typeOf(extra.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
+ const lhs_ty = self.typeOf(extra.lhs);
+ const rhs_ty = self.typeOf(extra.rhs);
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const dest_ty = self.air.typeOfIndex(inst);
+ const dest_ty = self.typeOfIndex(inst);
const llvm_dest_ty = try self.dg.lowerType(dest_ty);
- const tg = self.dg.module.getTarget();
-
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
const result = self.builder.buildShl(lhs, casted_rhs, "");
- const reconstructed = if (lhs_scalar_ty.isSignedInt())
+ const reconstructed = if (lhs_scalar_ty.isSignedInt(mod))
self.builder.buildAShr(result, casted_rhs, "")
else
self.builder.buildLShr(result, casted_rhs, "");
const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
- var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?;
+ const result_index = llvmField(dest_ty, 0, mod).?.index;
+ const overflow_index = llvmField(dest_ty, 1, mod).?.index;
- if (isByRef(dest_ty)) {
- const target = self.dg.module.getTarget();
- const result_alignment = dest_ty.abiAlignment(target);
+ if (isByRef(dest_ty, mod)) {
+ const result_alignment = dest_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
{
const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
@@ -7764,40 +7585,38 @@ pub const FuncGen = struct {
}
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const lhs_ty = self.air.typeOf(bin_op.lhs);
- const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
-
- const tg = self.dg.module.getTarget();
+ const lhs_ty = self.typeOf(bin_op.lhs);
+ const rhs_ty = self.typeOf(bin_op.rhs);
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
- if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, "");
+ if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, "");
return self.builder.buildNUWShl(lhs, casted_rhs, "");
}
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const lhs_type = self.air.typeOf(bin_op.lhs);
- const rhs_type = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_type.scalarType();
- const rhs_scalar_ty = rhs_type.scalarType();
+ const lhs_type = self.typeOf(bin_op.lhs);
+ const rhs_type = self.typeOf(bin_op.rhs);
+ const lhs_scalar_ty = lhs_type.scalarType(mod);
+ const rhs_scalar_ty = rhs_type.scalarType(mod);
- const tg = self.dg.module.getTarget();
-
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "")
else
rhs;
@@ -7805,24 +7624,24 @@ pub const FuncGen = struct {
}
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const lhs_ty = self.air.typeOf(bin_op.lhs);
- const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const tg = self.dg.module.getTarget();
- const lhs_bits = lhs_scalar_ty.bitSize(tg);
+ const lhs_ty = self.typeOf(bin_op.lhs);
+ const rhs_ty = self.typeOf(bin_op.rhs);
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const lhs_bits = lhs_scalar_ty.bitSize(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits)
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits)
self.builder.buildZExt(rhs, lhs.typeOf(), "")
else
rhs;
- const result = if (lhs_scalar_ty.isSignedInt())
+ const result = if (lhs_scalar_ty.isSignedInt(mod))
self.builder.buildSShlSat(lhs, casted_rhs, "")
else
self.builder.buildUShlSat(lhs, casted_rhs, "");
@@ -7835,8 +7654,8 @@ pub const FuncGen = struct {
const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty);
const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False);
const lhs_max = lhs_scalar_llvm_ty.constAllOnes();
- if (rhs_ty.zigTypeTag() == .Vector) {
- const vec_len = rhs_ty.vectorLen();
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
+ const vec_len = rhs_ty.vectorLen(mod);
const bits_vec = self.builder.buildVectorSplat(vec_len, bits, "");
const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, "");
const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, "");
@@ -7848,23 +7667,22 @@ pub const FuncGen = struct {
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const lhs_ty = self.air.typeOf(bin_op.lhs);
- const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
-
- const tg = self.dg.module.getTarget();
+ const lhs_ty = self.typeOf(bin_op.lhs);
+ const rhs_ty = self.typeOf(bin_op.rhs);
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
- const is_signed_int = lhs_scalar_ty.isSignedInt();
+ const is_signed_int = lhs_scalar_ty.isSignedInt(mod);
if (is_exact) {
if (is_signed_int) {
@@ -7882,14 +7700,14 @@ pub const FuncGen = struct {
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const dest_ty = self.air.typeOfIndex(inst);
- const dest_info = dest_ty.intInfo(target);
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_info = dest_ty.intInfo(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_info = operand_ty.intInfo(target);
+ const operand_ty = self.typeOf(ty_op.operand);
+ const operand_info = operand_ty.intInfo(mod);
if (operand_info.bits < dest_info.bits) {
switch (operand_info.signedness) {
@@ -7906,16 +7724,17 @@ pub const FuncGen = struct {
fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
+ const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst));
return self.builder.buildTrunc(operand, dest_llvm_ty, "");
}
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const dest_ty = self.air.typeOfIndex(inst);
- const target = self.dg.module.getTarget();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const dest_ty = self.typeOfIndex(inst);
+ const target = mod.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
@@ -7940,11 +7759,12 @@ pub const FuncGen = struct {
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const operand_ty = self.air.typeOf(ty_op.operand);
- const dest_ty = self.air.typeOfIndex(inst);
- const target = self.dg.module.getTarget();
+ const operand_ty = self.typeOf(ty_op.operand);
+ const dest_ty = self.typeOfIndex(inst);
+ const target = mod.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
@@ -7971,25 +7791,25 @@ pub const FuncGen = struct {
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const ptr_ty = self.air.typeOf(un_op);
+ const ptr_ty = self.typeOf(un_op);
const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty);
- const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
+ const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst));
return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, "");
}
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
- const inst_ty = self.air.typeOfIndex(inst);
+ const operand_ty = self.typeOf(ty_op.operand);
+ const inst_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
return self.bitCast(operand, operand_ty, inst_ty);
}
fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value {
- const operand_is_ref = isByRef(operand_ty);
- const result_is_ref = isByRef(inst_ty);
+ const mod = self.dg.module;
+ const operand_is_ref = isByRef(operand_ty, mod);
+ const result_is_ref = isByRef(inst_ty, mod);
const llvm_dest_ty = try self.dg.lowerType(inst_ty);
- const target = self.dg.module.getTarget();
if (operand_is_ref and result_is_ref) {
// They are both pointers, so just return the same opaque pointer :)
@@ -8002,27 +7822,27 @@ pub const FuncGen = struct {
return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) {
+ if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) {
return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
- const elem_ty = operand_ty.childType();
+ if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
+ const elem_ty = operand_ty.childType(mod);
if (!result_is_ref) {
return self.dg.todo("implement bitcast vector to non-ref array", .{});
}
const array_ptr = self.buildAlloca(llvm_dest_ty, null);
- const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
const llvm_store = self.builder.buildStore(operand, array_ptr);
- llvm_store.setAlignment(inst_ty.abiAlignment(target));
+ llvm_store.setAlignment(inst_ty.abiAlignment(mod));
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
- const vector_len = operand_ty.arrayLen();
+ const vector_len = operand_ty.arrayLen(mod);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
const index_usize = llvm_usize.constInt(i, .False);
@@ -8034,19 +7854,19 @@ pub const FuncGen = struct {
}
}
return array_ptr;
- } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
- const elem_ty = operand_ty.childType();
+ } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
+ const elem_ty = operand_ty.childType(mod);
const llvm_vector_ty = try self.dg.lowerType(inst_ty);
if (!operand_is_ref) {
return self.dg.todo("implement bitcast non-ref array to vector", .{});
}
- const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
const vector = self.builder.buildLoad(llvm_vector_ty, operand, "");
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
- vector.setAlignment(elem_ty.abiAlignment(target));
+ vector.setAlignment(elem_ty.abiAlignment(mod));
return vector;
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8056,7 +7876,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
- const vector_len = operand_ty.arrayLen();
+ const vector_len = operand_ty.arrayLen(mod);
var vector = llvm_vector_ty.getUndef();
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
@@ -8074,12 +7894,12 @@ pub const FuncGen = struct {
if (operand_is_ref) {
const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, "");
- load_inst.setAlignment(operand_ty.abiAlignment(target));
+ load_inst.setAlignment(operand_ty.abiAlignment(mod));
return load_inst;
}
if (result_is_ref) {
- const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
@@ -8090,7 +7910,7 @@ pub const FuncGen = struct {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values.
// Therefore, we store operand to alloca, then load for result.
- const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
@@ -8109,22 +7929,23 @@ pub const FuncGen = struct {
}
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
if (self.dg.object.di_builder) |dib| {
if (needDbgVarWorkaround(self.dg)) {
return arg_val;
}
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const func = self.dg.decl.getFunction().?;
- const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
+ const func = self.dg.decl.getOwnedFunction(mod).?;
+ const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
- func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args
+ func.getParamName(mod, src_index).ptr, // TODO test 0 bit args
self.di_file.?,
lbrace_line,
try self.dg.object.lowerDebugType(inst_ty, .full),
@@ -8135,10 +7956,10 @@ pub const FuncGen = struct {
const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null);
const insert_block = self.builder.getInsertBlock();
- if (isByRef(inst_ty)) {
+ if (isByRef(inst_ty, mod)) {
_ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block);
} else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = inst_ty.abiAlignment(self.dg.module.getTarget());
+ const alignment = inst_ty.abiAlignment(mod);
const alloca = self.buildAlloca(arg_val.typeOf(), alignment);
const store_inst = self.builder.buildStore(arg_val, alloca);
store_inst.setAlignment(alignment);
@@ -8152,24 +7973,24 @@ pub const FuncGen = struct {
}
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const ptr_ty = self.air.typeOfIndex(inst);
- const pointee_type = ptr_ty.childType();
- if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ const mod = self.dg.module;
+ const ptr_ty = self.typeOfIndex(inst);
+ const pointee_type = ptr_ty.childType(mod);
+ if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.lowerType(pointee_type);
- const target = self.dg.module.getTarget();
- const alignment = ptr_ty.ptrAlignment(target);
+ const alignment = ptr_ty.ptrAlignment(mod);
return self.buildAlloca(pointee_llvm_ty, alignment);
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const ptr_ty = self.air.typeOfIndex(inst);
- const ret_ty = ptr_ty.childType();
- if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ const mod = self.dg.module;
+ const ptr_ty = self.typeOfIndex(inst);
+ const ret_ty = ptr_ty.childType(mod);
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.lowerType(ret_ty);
- const target = self.dg.module.getTarget();
- return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target));
+ return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod));
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
@@ -8179,12 +8000,13 @@ pub const FuncGen = struct {
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType();
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const operand_ty = ptr_ty.childType(mod);
- const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
@@ -8194,13 +8016,12 @@ pub const FuncGen = struct {
u8_llvm_ty.constInt(0xaa, .False)
else
u8_llvm_ty.getUndef();
- const target = self.dg.module.getTarget();
- const operand_size = operand_ty.abiSize(target);
+ const operand_size = operand_ty.abiSize(mod);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const len = usize_llvm_ty.constInt(operand_size, .False);
- const dest_ptr_align = ptr_ty.ptrAlignment(target);
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
- if (safety and self.dg.module.comp.bin_file.options.valgrind) {
+ const dest_ptr_align = ptr_ty.ptrAlignment(mod);
+ _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod));
+ if (safety and mod.comp.bin_file.options.valgrind) {
self.valgrindMarkUndef(dest_ptr, len);
}
return null;
@@ -8218,8 +8039,10 @@ pub const FuncGen = struct {
///
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
+ const mod = fg.dg.module;
+ const ip = &mod.intern_pool;
for (body_tail[1..]) |body_inst| {
- switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) {
+ switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) {
.none => continue,
.write, .noret, .complex => return false,
.tomb => return true,
@@ -8231,14 +8054,15 @@ pub const FuncGen = struct {
}
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = fg.dg.module;
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[inst].ty_op;
- const ptr_ty = fg.air.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_ty = fg.typeOf(ty_op.operand);
+ const ptr_info = ptr_ty.ptrInfo(mod);
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (!isByRef(ptr_info.pointee_type)) break :elide;
+ if (!isByRef(ptr_info.pointee_type, mod)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@@ -8262,8 +8086,9 @@ pub const FuncGen = struct {
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
+ const mod = self.dg.module;
const llvm_usize = try self.dg.lowerType(Type.usize);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
if (!target_util.supportsReturnAddress(target)) {
// https://github.com/ziglang/zig/issues/11946
return llvm_usize.constNull();
@@ -8302,16 +8127,17 @@ pub const FuncGen = struct {
}
fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr = try self.resolveInst(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
- const operand_ty = self.air.typeOf(extra.ptr).elemType();
+ const operand_ty = self.typeOf(extra.ptr).childType(mod);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating
- if (operand_ty.isSignedInt()) {
+ if (operand_ty.isSignedInt(mod)) {
expected_value = self.builder.buildSExt(expected_value, abi_ty, "");
new_value = self.builder.buildSExt(new_value, abi_ty, "");
} else {
@@ -8329,7 +8155,7 @@ pub const FuncGen = struct {
);
result.setWeak(llvm.Bool.fromBool(is_weak));
- const optional_ty = self.air.typeOfIndex(inst);
+ const optional_ty = self.typeOfIndex(inst);
var payload = self.builder.buildExtractValue(result, 0, "");
if (opt_abi_ty != null) {
@@ -8337,7 +8163,7 @@ pub const FuncGen = struct {
}
const success_bit = self.builder.buildExtractValue(result, 1, "");
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, "");
}
@@ -8348,13 +8174,14 @@ pub const FuncGen = struct {
}
fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
- const ptr_ty = self.air.typeOf(pl_op.operand);
- const operand_ty = ptr_ty.elemType();
+ const ptr_ty = self.typeOf(pl_op.operand);
+ const operand_ty = ptr_ty.childType(mod);
const operand = try self.resolveInst(extra.operand);
- const is_signed_int = operand_ty.isSignedInt();
+ const is_signed_int = operand_ty.isSignedInt(mod);
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
@@ -8403,17 +8230,17 @@ pub const FuncGen = struct {
}
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
- const ptr_ty = self.air.typeOf(atomic_load.ptr);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_ty = self.typeOf(atomic_load.ptr);
+ const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_info.pointee_type;
- if (!elem_ty.hasRuntimeBitsIgnoreComptime())
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false);
- const target = self.dg.module.getTarget();
- const ptr_alignment = ptr_info.alignment(target);
+ const ptr_alignment = ptr_info.alignment(mod);
const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile");
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -8437,17 +8264,18 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
ordering: llvm.AtomicOrdering,
) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType();
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null;
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const operand_ty = ptr_ty.childType(mod);
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening
- if (operand_ty.isSignedInt()) {
+ if (operand_ty.isSignedInt(mod)) {
element = self.builder.buildSExt(element, abi_ty, "");
} else {
element = self.builder.buildZExt(element, abi_ty, "");
@@ -8458,19 +8286,19 @@ pub const FuncGen = struct {
}
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const elem_ty = self.air.typeOf(bin_op.rhs);
- const module = self.dg.module;
- const target = module.getTarget();
- const dest_ptr_align = ptr_ty.ptrAlignment(target);
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const elem_ty = self.typeOf(bin_op.rhs);
+ const target = mod.getTarget();
+ const dest_ptr_align = ptr_ty.ptrAlignment(mod);
const u8_llvm_ty = self.context.intType(8);
const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
- const is_volatile = ptr_ty.isVolatilePtr();
+ const is_volatile = ptr_ty.isVolatilePtr(mod);
- if (self.air.value(bin_op.rhs)) |elem_val| {
- if (elem_val.isUndefDeep()) {
+ if (try self.air.value(bin_op.rhs, mod)) |elem_val| {
+ if (elem_val.isUndefDeep(mod)) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
@@ -8481,7 +8309,7 @@ pub const FuncGen = struct {
const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
_ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
- if (safety and module.comp.bin_file.options.valgrind) {
+ if (safety and mod.comp.bin_file.options.valgrind) {
self.valgrindMarkUndef(dest_ptr, len);
}
return null;
@@ -8491,8 +8319,7 @@ pub const FuncGen = struct {
// repeating byte pattern, for example, `@as(u64, 0)` has a
// repeating byte pattern of 0 bytes. In such case, the memset
// intrinsic can be used.
- var value_buffer: Value.Payload.U64 = undefined;
- if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| {
+ if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| {
const fill_byte = try self.resolveValue(.{
.ty = Type.u8,
.val = byte_val,
@@ -8504,7 +8331,7 @@ pub const FuncGen = struct {
}
const value = try self.resolveInst(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(target);
+ const elem_abi_size = elem_ty.abiSize(mod);
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
@@ -8535,10 +8362,10 @@ pub const FuncGen = struct {
const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody");
const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
- const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
- const len = switch (ptr_ty.ptrSize()) {
+ const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
+ const len = switch (ptr_ty.ptrSize(mod)) {
.Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
- .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False),
+ .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False),
.Many, .C => unreachable,
};
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -8552,9 +8379,9 @@ pub const FuncGen = struct {
_ = self.builder.buildCondBr(end, body_block, end_block);
self.builder.positionBuilderAtEnd(body_block);
- const elem_abi_alignment = elem_ty.abiAlignment(target);
+ const elem_abi_alignment = elem_ty.abiAlignment(mod);
const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align);
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
_ = self.builder.buildMemCpy(
it_ptr,
it_ptr_alignment,
@@ -8584,19 +8411,19 @@ pub const FuncGen = struct {
fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
- const dest_ptr_ty = self.air.typeOf(bin_op.lhs);
+ const dest_ptr_ty = self.typeOf(bin_op.lhs);
const src_slice = try self.resolveInst(bin_op.rhs);
- const src_ptr_ty = self.air.typeOf(bin_op.rhs);
+ const src_ptr_ty = self.typeOf(bin_op.rhs);
const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty);
const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
- const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod);
_ = self.builder.buildMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(target),
+ dest_ptr_ty.ptrAlignment(mod),
src_ptr,
- src_ptr_ty.ptrAlignment(target),
+ src_ptr_ty.ptrAlignment(mod),
len,
is_volatile,
);
@@ -8604,10 +8431,10 @@ pub const FuncGen = struct {
}
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const un_ty = self.air.typeOf(bin_op.lhs).childType();
- const target = self.dg.module.getTarget();
- const layout = un_ty.unionGetLayout(target);
+ const un_ty = self.typeOf(bin_op.lhs).childType(mod);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
@@ -8625,13 +8452,13 @@ pub const FuncGen = struct {
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const un_ty = self.air.typeOf(ty_op.operand);
- const target = self.dg.module.getTarget();
- const layout = un_ty.unionGetLayout(target);
+ const un_ty = self.typeOf(ty_op.operand);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolveInst(ty_op.operand);
- if (isByRef(un_ty)) {
+ if (isByRef(un_ty, mod)) {
const llvm_un_ty = try self.dg.lowerType(un_ty);
if (layout.payload_size == 0) {
return self.builder.buildLoad(llvm_un_ty, union_handle, "");
@@ -8651,7 +8478,7 @@ pub const FuncGen = struct {
fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const operand_ty = self.air.typeOf(un_op);
+ const operand_ty = self.typeOf(un_op);
return self.buildFloatOp(op, operand_ty, 1, .{operand});
}
@@ -8661,14 +8488,15 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const operand_ty = self.air.typeOf(un_op);
+ const operand_ty = self.typeOf(un_op);
return self.buildFloatOp(.neg, operand_ty, 1, .{operand});
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
+ const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const llvm_i1 = self.context.intType(1);
@@ -8677,12 +8505,11 @@ pub const FuncGen = struct {
const params = [_]*llvm.Value{ operand, llvm_i1.constNull() };
const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
- const result_ty = self.air.typeOfIndex(inst);
+ const result_ty = self.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const result_bits = result_ty.intInfo(target).bits;
+ const bits = operand_ty.intInfo(mod).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8693,8 +8520,9 @@ pub const FuncGen = struct {
}
fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
+ const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const params = [_]*llvm.Value{operand};
@@ -8702,12 +8530,11 @@ pub const FuncGen = struct {
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
- const result_ty = self.air.typeOfIndex(inst);
+ const result_ty = self.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const result_bits = result_ty.intInfo(target).bits;
+ const bits = operand_ty.intInfo(mod).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8718,10 +8545,10 @@ pub const FuncGen = struct {
}
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
- var bits = operand_ty.intInfo(target).bits;
+ const operand_ty = self.typeOf(ty_op.operand);
+ var bits = operand_ty.intInfo(mod).bits;
assert(bits % 8 == 0);
var operand = try self.resolveInst(ty_op.operand);
@@ -8731,8 +8558,8 @@ pub const FuncGen = struct {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
// The truncated result at the end will be the correct bswap
const scalar_llvm_ty = self.context.intType(bits + 8);
- if (operand_ty.zigTypeTag() == .Vector) {
- const vec_len = operand_ty.vectorLen();
+ if (operand_ty.zigTypeTag(mod) == .Vector) {
+ const vec_len = operand_ty.vectorLen(mod);
operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -8758,9 +8585,9 @@ pub const FuncGen = struct {
const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
- const result_ty = self.air.typeOfIndex(inst);
+ const result_ty = self.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const result_bits = result_ty.intInfo(target).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8771,28 +8598,23 @@ pub const FuncGen = struct {
}
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const error_set_ty = self.air.getRefType(ty_op.ty);
- const names = error_set_ty.errorSetNames();
+ const names = error_set_ty.errorSetNames(mod);
const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid");
const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid");
const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
for (names) |name| {
- const err_int = self.dg.module.global_error_set.get(name).?;
- const this_tag_int_value = int: {
- var tag_val_payload: Value.Payload.U64 = .{
- .base = .{ .tag = .int_u64 },
- .data = err_int,
- };
- break :int try self.dg.lowerValue(.{
- .ty = Type.err_int,
- .val = Value.initPayload(&tag_val_payload.base),
- });
- };
+ const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
+ const this_tag_int_value = try self.dg.lowerValue(.{
+ .ty = Type.err_int,
+ .val = try mod.intValue(Type.err_int, err_int),
+ });
switch_instr.addCase(this_tag_int_value, valid_block);
}
self.builder.positionBuilderAtEnd(valid_block);
@@ -8818,7 +8640,7 @@ pub const FuncGen = struct {
fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const enum_ty = self.air.typeOf(un_op);
+ const enum_ty = self.typeOf(un_op);
const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty);
const params = [_]*llvm.Value{operand};
@@ -8826,25 +8648,22 @@ pub const FuncGen = struct {
}
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
- const enum_decl = enum_ty.getOwnerDecl();
+ const mod = self.dg.module;
+ const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
// TODO: detect when the type changes and re-emit this function.
- const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl);
+ const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl);
if (gop.found_existing) return gop.value_ptr.*;
- errdefer assert(self.dg.object.named_enum_map.remove(enum_decl));
+ errdefer assert(self.dg.object.named_enum_map.remove(enum_type.decl));
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const mod = self.dg.module;
- const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod);
- defer self.gpa.free(fqn);
- const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn});
+ const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
+ const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)});
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
- const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
+ const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())};
const llvm_ret_ty = try self.dg.lowerType(Type.bool);
const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
@@ -8867,21 +8686,17 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(entry_block);
self.builder.clearCurrentDebugLocation();
- const fields = enum_ty.enumFields();
const named_block = self.context.appendBasicBlock(fn_val, "Named");
const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed");
const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
+ const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len));
- for (fields.keys(), 0..) |_, field_index| {
+ for (enum_type.names, 0..) |_, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
const this_tag_int_value = int: {
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
break :int try self.dg.lowerValue(.{
.ty = enum_ty,
- .val = Value.initPayload(&tag_val_payload.base),
+ .val = try mod.enumValueFieldIndex(enum_ty, field_index),
});
};
switch_instr.addCase(this_tag_int_value, named_block);
@@ -8897,7 +8712,7 @@ pub const FuncGen = struct {
fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const enum_ty = self.air.typeOf(un_op);
+ const enum_ty = self.typeOf(un_op);
const llvm_fn = try self.getEnumTagNameFunction(enum_ty);
const params = [_]*llvm.Value{operand};
@@ -8905,31 +8720,27 @@ pub const FuncGen = struct {
}
fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
- const enum_decl = enum_ty.getOwnerDecl();
+ const mod = self.dg.module;
+ const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
// TODO: detect when the type changes and re-emit this function.
- const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl);
+ const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl);
if (gop.found_existing) return gop.value_ptr.*;
- errdefer assert(self.dg.object.decl_map.remove(enum_decl));
+ errdefer assert(self.dg.object.decl_map.remove(enum_type.decl));
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const mod = self.dg.module;
- const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod);
- defer self.gpa.free(fqn);
- const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
+ const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
+ const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)});
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const llvm_ret_ty = try self.dg.lowerType(slice_ty);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
- const target = self.dg.module.getTarget();
- const slice_alignment = slice_ty.abiAlignment(target);
+ const slice_alignment = slice_ty.abiAlignment(mod);
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
- const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
+ const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())};
const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
@@ -8951,16 +8762,17 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(entry_block);
self.builder.clearCurrentDebugLocation();
- const fields = enum_ty.enumFields();
const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue");
const tag_int_value = fn_val.getParam(0);
- const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count()));
+ const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len));
const array_ptr_indices = [_]*llvm.Value{
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
- for (fields.keys(), 0..) |name, field_index| {
+ for (enum_type.names, 0..) |name_ip, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
+ const name = mod.intern_pool.stringToSlice(name_ip);
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, "");
@@ -8983,16 +8795,10 @@ pub const FuncGen = struct {
slice_global.setAlignment(slice_alignment);
const return_block = self.context.appendBasicBlock(fn_val, "Name");
- const this_tag_int_value = int: {
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- break :int try self.dg.lowerValue(.{
- .ty = enum_ty,
- .val = Value.initPayload(&tag_val_payload.base),
- });
- };
+ const this_tag_int_value = try self.dg.lowerValue(.{
+ .ty = enum_ty,
+ .val = try mod.enumValueFieldIndex(enum_ty, field_index),
+ });
switch_instr.addCase(this_tag_int_value, return_block);
self.builder.positionBuilderAtEnd(return_block);
@@ -9028,7 +8834,7 @@ pub const FuncGen = struct {
fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const slice_ty = self.air.typeOfIndex(inst);
+ const slice_ty = self.typeOfIndex(inst);
const slice_llvm_ty = try self.dg.lowerType(slice_ty);
const error_name_table_ptr = try self.getErrorNameTable();
@@ -9040,10 +8846,11 @@ pub const FuncGen = struct {
}
fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
- const vector_ty = self.air.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const vector_ty = self.typeOfIndex(inst);
+ const len = vector_ty.vectorLen(mod);
return self.builder.buildVectorSplat(len, scalar, "");
}
@@ -9058,13 +8865,14 @@ pub const FuncGen = struct {
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
const b = try self.resolveInst(extra.b);
- const mask = self.air.values[extra.mask];
+ const mask = extra.mask.toValue();
const mask_len = extra.mask_len;
- const a_len = self.air.typeOf(extra.a).vectorLen();
+ const a_len = self.typeOf(extra.a).vectorLen(mod);
// LLVM uses integers larger than the length of the first array to
// index into the second array. This was deemed unnecessarily fragile
@@ -9077,12 +8885,11 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
for (values, 0..) |*val, i| {
- var buf: Value.ElemValueBuffer = undefined;
- const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
- if (elem.isUndef()) {
+ const elem = try mask.elemValue(mod, i);
+ if (elem.isUndef(mod)) {
val.* = llvm_i32.getUndef();
} else {
- const int = elem.toSignedInt(self.dg.module.getTarget());
+ const int = elem.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
val.* = llvm_i32.constInt(unsigned, .False);
}
@@ -9158,32 +8965,33 @@ pub const FuncGen = struct {
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
const reduce = self.air.instructions.items(.data)[inst].reduce;
const operand = try self.resolveInst(reduce.operand);
- const operand_ty = self.air.typeOf(reduce.operand);
- const scalar_ty = self.air.typeOfIndex(inst);
+ const operand_ty = self.typeOf(reduce.operand);
+ const scalar_ty = self.typeOfIndex(inst);
switch (reduce.operation) {
.And => return self.builder.buildAndReduce(operand),
.Or => return self.builder.buildOrReduce(operand),
.Xor => return self.builder.buildXorReduce(operand),
- .Min => switch (scalar_ty.zigTypeTag()) {
- .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()),
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
return self.builder.buildFPMinReduce(operand);
},
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
- .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()),
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
return self.builder.buildFPMaxReduce(operand);
},
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag()) {
+ .Add => switch (scalar_ty.zigTypeTag(mod)) {
.Int => return self.builder.buildAddReduce(operand),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -9192,7 +9000,7 @@ pub const FuncGen = struct {
},
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag()) {
+ .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int => return self.builder.buildMulReduce(operand),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -9222,35 +9030,32 @@ pub const FuncGen = struct {
}) catch unreachable,
else => unreachable,
};
- var init_value_payload = Value.Payload.Float_32{
- .data = switch (reduce.operation) {
- .Min => std.math.nan(f32),
- .Max => std.math.nan(f32),
- .Add => -0.0,
- .Mul => 1.0,
- else => unreachable,
- },
- };
const param_llvm_ty = try self.dg.lowerType(scalar_ty);
const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty };
const libc_fn = self.getLibcFunction(fn_name, &param_types, param_llvm_ty);
const init_value = try self.dg.lowerValue(.{
.ty = scalar_ty,
- .val = Value.initPayload(&init_value_payload.base),
+ .val = try mod.floatValue(scalar_ty, switch (reduce.operation) {
+ .Min => std.math.nan(f32),
+ .Max => std.math.nan(f32),
+ .Add => -0.0,
+ .Mul => 1.0,
+ else => unreachable,
+ }),
});
- return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value);
+ return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value);
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const result_ty = self.air.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen());
+ const result_ty = self.typeOfIndex(inst);
+ const len = @intCast(usize, result_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Vector => {
const llvm_u32 = self.context.intType(32);
@@ -9263,10 +9068,10 @@ pub const FuncGen = struct {
return vector;
},
.Struct => {
- if (result_ty.containerLayout() == .Packed) {
- const struct_obj = result_ty.castTag(.@"struct").?.data;
+ if (result_ty.containerLayout(mod) == .Packed) {
+ const struct_obj = mod.typeToStruct(result_ty).?;
assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
@@ -9274,12 +9079,12 @@ pub const FuncGen = struct {
var running_bits: u16 = 0;
for (elements, 0..) |elem, i| {
const field = fields[i];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = self.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
self.builder.buildBitCast(non_int_val, small_int_ty, "");
@@ -9295,30 +9100,28 @@ pub const FuncGen = struct {
return running_int;
}
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
-
- if (isByRef(result_ty)) {
+ if (isByRef(result_ty, mod)) {
const llvm_u32 = self.context.intType(32);
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
+ const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
for (elements, 0..) |elem, i| {
- if (result_ty.structFieldValueComptime(i) != null) continue;
+ if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?;
+ const llvm_i = llvmField(result_ty, i, mod).?.index;
indices[1] = llvm_u32.constInt(llvm_i, .False);
const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
- var field_ptr_payload: Type.Payload.Pointer = .{
- .data = .{
- .pointee_type = self.air.typeOf(elem),
- .@"align" = result_ty.structFieldAlign(i, target),
- .@"addrspace" = .generic,
+ const field_ptr_ty = try mod.ptrType(.{
+ .child = self.typeOf(elem).toIntern(),
+ .flags = .{
+ .alignment = InternPool.Alignment.fromNonzeroByteUnits(
+ result_ty.structFieldAlign(i, mod),
+ ),
},
- };
- const field_ptr_ty = Type.initPayload(&field_ptr_payload.base);
+ });
try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic);
}
@@ -9326,29 +9129,25 @@ pub const FuncGen = struct {
} else {
var result = llvm_result_ty.getUndef();
for (elements, 0..) |elem, i| {
- if (result_ty.structFieldValueComptime(i) != null) continue;
+ if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?;
+ const llvm_i = llvmField(result_ty, i, mod).?.index;
result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
}
return result;
}
},
.Array => {
- assert(isByRef(result_ty));
+ assert(isByRef(result_ty, mod));
const llvm_usize = try self.dg.lowerType(Type.usize);
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
+ const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
- const array_info = result_ty.arrayInfo();
- var elem_ptr_payload: Type.Payload.Pointer = .{
- .data = .{
- .pointee_type = array_info.elem_type,
- .@"addrspace" = .generic,
- },
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base);
+ const array_info = result_ty.arrayInfo(mod);
+ const elem_ptr_ty = try mod.ptrType(.{
+ .child = array_info.elem_type.toIntern(),
+ });
for (elements, 0..) |elem, i| {
const indices: [2]*llvm.Value = .{
@@ -9380,22 +9179,22 @@ pub const FuncGen = struct {
}
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
- const union_ty = self.air.typeOfIndex(inst);
+ const union_ty = self.typeOfIndex(inst);
const union_llvm_ty = try self.dg.lowerType(union_ty);
- const target = self.dg.module.getTarget();
- const layout = union_ty.unionGetLayout(target);
- const union_obj = union_ty.cast(Type.Payload.Union).?.data;
+ const layout = union_ty.unionGetLayout(mod);
+ const union_obj = mod.typeToUnion(union_ty).?;
if (union_obj.layout == .Packed) {
- const big_bits = union_ty.bitSize(target);
+ const big_bits = union_ty.bitSize(mod);
const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const field = union_obj.fields.values()[extra.field_index];
const non_int_val = try self.resolveInst(extra.init);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = self.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
self.builder.buildBitCast(non_int_val, small_int_ty, "");
@@ -9403,26 +9202,21 @@ pub const FuncGen = struct {
}
const tag_int = blk: {
- const tag_ty = union_ty.unionTagTypeHypothetical();
+ const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.fields.keys()[extra.field_index];
- const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?;
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, enum_field_index),
- };
- const tag_val = Value.initPayload(&tag_val_payload.base);
- var int_payload: Value.Payload.U64 = undefined;
- const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload);
- break :blk tag_int_val.toUnsignedInt(target);
+ const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
+ const tag_int_val = try tag_val.enumToInt(tag_ty, mod);
+ break :blk tag_int_val.toUnsignedInt(mod);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
return null;
}
- assert(!isByRef(union_ty));
+ assert(!isByRef(union_ty, mod));
return union_llvm_ty.constInt(tag_int, .False);
}
- assert(isByRef(union_ty));
+ assert(isByRef(union_ty, mod));
// The llvm type of the alloca will be the named LLVM union type, and will not
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
@@ -9432,12 +9226,12 @@ pub const FuncGen = struct {
assert(union_obj.haveFieldTypes());
const field = union_obj.fields.values()[extra.field_index];
const field_llvm_ty = try self.dg.lowerType(field.ty);
- const field_size = field.ty.abiSize(target);
- const field_align = field.normalAlignment(target);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.normalAlignment(mod);
const llvm_union_ty = t: {
const payload = p: {
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p self.context.intType(8).arrayType(padding_len);
}
@@ -9473,14 +9267,12 @@ pub const FuncGen = struct {
// tag and the payload.
const index_type = self.context.intType(32);
- var field_ptr_payload: Type.Payload.Pointer = .{
- .data = .{
- .pointee_type = field.ty,
- .@"align" = field_align,
- .@"addrspace" = .generic,
+ const field_ptr_ty = try mod.ptrType(.{
+ .child = field.ty.toIntern(),
+ .flags = .{
+ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align),
},
- };
- const field_ptr_ty = Type.initPayload(&field_ptr_payload.base);
+ });
if (layout.tag_size == 0) {
const indices: [3]*llvm.Value = .{
index_type.constNull(),
@@ -9512,7 +9304,7 @@ pub const FuncGen = struct {
const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
const llvm_tag = tag_llvm_ty.constInt(tag_int, .False);
const store_inst = self.builder.buildStore(llvm_tag, field_ptr);
- store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target));
+ store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod));
}
return result_ptr;
@@ -9536,7 +9328,8 @@ pub const FuncGen = struct {
// by the target.
// To work around this, don't emit llvm.prefetch in this case.
// See https://bugs.llvm.org/show_bug.cgi?id=21037
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
.x86_64,
@@ -9585,7 +9378,7 @@ pub const FuncGen = struct {
fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const inst_ty = self.air.typeOfIndex(inst);
+ const inst_ty = self.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const llvm_dest_ty = try self.dg.lowerType(inst_ty);
@@ -9659,8 +9452,9 @@ pub const FuncGen = struct {
return table;
}
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget());
+ const mod = self.dg.module;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
+ const slice_alignment = slice_ty.abiAlignment(mod);
const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
@@ -9702,16 +9496,15 @@ pub const FuncGen = struct {
opt_ty: Type,
can_elide_load: bool,
) !*llvm.Value {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
+ const mod = fg.dg.module;
+ const payload_ty = opt_ty.optionalChild(mod);
- if (isByRef(opt_ty)) {
+ if (isByRef(opt_ty, mod)) {
// We have a pointer and we need to return a pointer to the first field.
const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, "");
- const target = fg.dg.module.getTarget();
- const payload_alignment = payload_ty.abiAlignment(target);
- if (isByRef(payload_ty)) {
+ const payload_alignment = payload_ty.abiAlignment(mod);
+ if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
@@ -9723,7 +9516,7 @@ pub const FuncGen = struct {
return load_inst;
}
- assert(!isByRef(payload_ty));
+ assert(!isByRef(payload_ty, mod));
return fg.builder.buildExtractValue(opt_handle, 0, "");
}
@@ -9735,10 +9528,10 @@ pub const FuncGen = struct {
) !?*llvm.Value {
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), "");
+ const mod = self.dg.module;
- if (isByRef(optional_ty)) {
- const target = self.dg.module.getTarget();
- const payload_alignment = optional_ty.abiAlignment(target);
+ if (isByRef(optional_ty, mod)) {
+ const payload_alignment = optional_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment);
{
@@ -9766,13 +9559,13 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !?*llvm.Value {
- const target = self.dg.object.target;
- const struct_ty = struct_ptr_ty.childType();
- switch (struct_ty.zigTypeTag()) {
- .Struct => switch (struct_ty.containerLayout()) {
+ const mod = self.dg.module;
+ const struct_ty = struct_ptr_ty.childType(mod);
+ switch (struct_ty.zigTypeTag(mod)) {
+ .Struct => switch (struct_ty.containerLayout(mod)) {
.Packed => {
- const result_ty = self.air.typeOfIndex(inst);
- const result_ty_info = result_ty.ptrInfo().data;
+ const result_ty = self.typeOfIndex(inst);
+ const result_ty_info = result_ty.ptrInfo(mod);
if (result_ty_info.host_size != 0) {
// From LLVM's perspective, a pointer to a packed struct and a pointer
@@ -9784,7 +9577,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
- const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target);
+ const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod);
if (byte_offset == 0) return struct_ptr;
const byte_llvm_ty = self.context.intType(8);
const llvm_usize = try self.dg.lowerType(Type.usize);
@@ -9795,24 +9588,23 @@ pub const FuncGen = struct {
else => {
const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty);
- var ty_buf: Type.Payload.Pointer = undefined;
- if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
- return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, "");
+ if (llvmField(struct_ty, field_index, mod)) |llvm_field| {
+ return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, "");
} else {
// If we found no index then this means this is a zero sized field at the
// end of the struct. Treat our struct pointer as an array of two and get
// the index to the element at index `1` to get a pointer to the end of
// the struct.
const llvm_u32 = self.context.intType(32);
- const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False);
+ const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, "");
}
},
},
.Union => {
- const layout = struct_ty.unionGetLayout(target);
- if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr;
+ const layout = struct_ty.unionGetLayout(mod);
+ if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const union_llvm_ty = try self.dg.lowerType(struct_ty);
const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, "");
@@ -9836,12 +9628,12 @@ pub const FuncGen = struct {
ptr_alignment: u32,
is_volatile: bool,
) !*llvm.Value {
+ const mod = fg.dg.module;
const pointee_llvm_ty = try fg.dg.lowerType(pointee_type);
- const target = fg.dg.module.getTarget();
- const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target));
+ const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod));
const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align);
- const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits);
- const size_bytes = pointee_type.abiSize(target);
+ const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits);
+ const size_bytes = pointee_type.abiSize(mod);
_ = fg.builder.buildMemCpy(
result_ptr,
result_align,
@@ -9857,12 +9649,12 @@ pub const FuncGen = struct {
/// alloca and copies the value into it, then returns the alloca instruction.
/// For isByRef=false types, it creates a load instruction and returns it.
fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
- const info = ptr_ty.ptrInfo().data;
- if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null;
+ const mod = self.dg.module;
+ const info = ptr_ty.ptrInfo(mod);
+ if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null;
- const target = self.dg.module.getTarget();
- const ptr_alignment = info.alignment(target);
- const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr());
+ const ptr_alignment = info.alignment(mod);
+ const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod));
assert(info.vector_index != .runtime);
if (info.vector_index != .none) {
@@ -9878,7 +9670,7 @@ pub const FuncGen = struct {
}
if (info.host_size == 0) {
- if (isByRef(info.pointee_type)) {
+ if (isByRef(info.pointee_type, mod)) {
return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile");
}
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
@@ -9893,13 +9685,13 @@ pub const FuncGen = struct {
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
+ const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
- if (isByRef(info.pointee_type)) {
- const result_align = info.pointee_type.abiAlignment(target);
+ if (isByRef(info.pointee_type, mod)) {
+ const result_align = info.pointee_type.abiAlignment(mod);
const result_ptr = self.buildAlloca(elem_llvm_ty, result_align);
const same_size_int = self.context.intType(elem_bits);
@@ -9909,13 +9701,13 @@ pub const FuncGen = struct {
return result_ptr;
}
- if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) {
+ if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
}
- if (info.pointee_type.isPtrAtRuntime()) {
+ if (info.pointee_type.isPtrAtRuntime(mod)) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -9931,13 +9723,13 @@ pub const FuncGen = struct {
elem: *llvm.Value,
ordering: llvm.AtomicOrdering,
) !void {
- const info = ptr_ty.ptrInfo().data;
+ const mod = self.dg.module;
+ const info = ptr_ty.ptrInfo(mod);
const elem_ty = info.pointee_type;
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
- const target = self.dg.module.getTarget();
- const ptr_alignment = ptr_ty.ptrAlignment(target);
+ const ptr_alignment = ptr_ty.ptrAlignment(mod);
const ptr_volatile = llvm.Bool.fromBool(info.@"volatile");
assert(info.vector_index != .runtime);
@@ -9965,13 +9757,13 @@ pub const FuncGen = struct {
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
+ const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const containing_int_ty = containing_int.typeOf();
const shift_amt = containing_int_ty.constInt(info.bit_offset, .False);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
const value_bits_type = self.context.intType(elem_bits);
- const value_bits = if (elem_ty.isPtrAtRuntime())
+ const value_bits = if (elem_ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(elem, value_bits_type, "")
else
self.builder.buildBitCast(elem, value_bits_type, "");
@@ -9992,7 +9784,7 @@ pub const FuncGen = struct {
store_inst.setVolatile(ptr_volatile);
return;
}
- if (!isByRef(elem_ty)) {
+ if (!isByRef(elem_ty, mod)) {
const store_inst = self.builder.buildStore(elem, ptr);
store_inst.setOrdering(ordering);
store_inst.setAlignment(ptr_alignment);
@@ -10000,13 +9792,13 @@ pub const FuncGen = struct {
return;
}
assert(ordering == .NotAtomic);
- const size_bytes = elem_ty.abiSize(target);
+ const size_bytes = elem_ty.abiSize(mod);
_ = self.builder.buildMemCpy(
ptr,
ptr_alignment,
elem,
- elem_ty.abiAlignment(target),
- self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
+ elem_ty.abiAlignment(mod),
+ self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False),
info.@"volatile",
);
}
@@ -10014,7 +9806,7 @@ pub const FuncGen = struct {
fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void {
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
const target = fg.dg.module.getTarget();
- const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
+ const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
const zero = usize_llvm_ty.constInt(0, .False);
const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False);
const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, "");
@@ -10031,11 +9823,12 @@ pub const FuncGen = struct {
a4: *llvm.Value,
a5: *llvm.Value,
) *llvm.Value {
- const target = fg.dg.module.getTarget();
+ const mod = fg.dg.module;
+ const target = mod.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
- const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
- const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target));
+ const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
+ const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod));
const array_llvm_ty = usize_llvm_ty.arrayType(6);
const array_ptr = fg.valgrind_client_request_array orelse a: {
@@ -10112,6 +9905,16 @@ pub const FuncGen = struct {
);
return call;
}
+
+ fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type {
+ const mod = fg.dg.module;
+ return fg.air.typeOf(inst, &mod.intern_pool);
+ }
+
+ fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type {
+ const mod = fg.dg.module;
+ return fg.air.typeOfIndex(inst, &mod.intern_pool);
+ }
};
fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
@@ -10445,65 +10248,67 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
+const LlvmField = struct {
+ index: c_uint,
+ ty: Type,
+ alignment: u32,
+};
+
/// Take into account 0 bit fields and padding. Returns null if an llvm
/// field could not be found.
/// This only happens if you want the field index of a zero sized field at
/// the end of the struct.
-fn llvmFieldIndex(
- ty: Type,
- field_index: usize,
- target: std.Target,
- ptr_pl_buf: *Type.Payload.Pointer,
-) ?c_uint {
+fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
// Detects where we inserted extra padding fields so that we can skip
// over them in this function.
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
- var llvm_field_index: c_uint = 0;
- for (tuple.types, 0..) |field_ty, i| {
- if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .anon_struct_type => |tuple| {
+ var llvm_field_index: c_uint = 0;
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
- const field_align = field_ty.abiAlignment(target);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForward(u64, offset, field_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- llvm_field_index += 1;
- }
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ llvm_field_index += 1;
+ }
- if (field_index <= i) {
- ptr_pl_buf.* = .{
- .data = .{
- .pointee_type = field_ty,
- .@"align" = field_align,
- .@"addrspace" = .generic,
- },
- };
- return llvm_field_index;
- }
+ if (field_index <= i) {
+ return .{
+ .index = llvm_field_index,
+ .ty = field_ty.toType(),
+ .alignment = field_align,
+ };
+ }
- llvm_field_index += 1;
- offset += field_ty.abiSize(target);
- }
- return null;
- }
- const layout = ty.containerLayout();
+ llvm_field_index += 1;
+ offset += field_ty.toType().abiSize(mod);
+ }
+ return null;
+ },
+ .struct_type => |s| s,
+ else => unreachable,
+ };
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const layout = struct_obj.layout;
assert(layout != .Packed);
var llvm_field_index: c_uint = 0;
- var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator();
+ var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_align = field.alignment(target, layout);
+ const field_align = field.alignment(mod, layout);
big_align = @max(big_align, field_align);
const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ offset = std.mem.alignForward(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
@@ -10511,54 +10316,52 @@ fn llvmFieldIndex(
}
if (field_index == field_and_index.index) {
- ptr_pl_buf.* = .{
- .data = .{
- .pointee_type = field.ty,
- .@"align" = field_align,
- .@"addrspace" = .generic,
- },
+ return .{
+ .index = llvm_field_index,
+ .ty = field.ty,
+ .alignment = field_align,
};
- return llvm_field_index;
}
llvm_field_index += 1;
- offset += field.ty.abiSize(target);
+ offset += field.ty.abiSize(mod);
} else {
// We did not find an llvm field that corresponds to this zig field.
return null;
}
}
-fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false;
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
+ if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false;
+ const target = mod.getTarget();
switch (fn_info.cc) {
- .Unspecified, .Inline => return isByRef(fn_info.return_type),
+ .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
- .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
- else => return firstParamSRetSystemV(fn_info.return_type, target),
+ .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
+ else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
},
- .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
- .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory,
- .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect,
+ .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
},
- .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory,
+ .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
- .SysV => return firstParamSRetSystemV(fn_info.return_type, target),
- .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
- .Stdcall => return !isScalar(fn_info.return_type),
+ .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
+ .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
+ .Stdcall => return !isScalar(mod, fn_info.return_type.toType()),
else => return false,
}
}
-fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
- const class = x86_64_abi.classifySystemV(ty, target, .ret);
+fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
+ const class = x86_64_abi.classifySystemV(ty, mod, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -10567,75 +10370,77 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
/// In order to support the C calling convention, some return types need to be lowered
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
-fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) {
+fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+ const mod = dg.module;
+ const return_type = fn_info.return_type.toType();
+ if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (fn_info.return_type.isError()) {
+ if (return_type.isError(mod)) {
return dg.lowerType(Type.anyerror);
} else {
return dg.context.voidType();
}
}
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => {
- if (isByRef(fn_info.return_type)) {
+ if (isByRef(return_type, mod)) {
return dg.context.voidType();
} else {
- return dg.lowerType(fn_info.return_type);
+ return dg.lowerType(return_type);
}
},
.C => {
switch (target.cpu.arch) {
- .mips, .mipsel => return dg.lowerType(fn_info.return_type),
+ .mips, .mipsel => return dg.lowerType(return_type),
.x86_64 => switch (target.os.tag) {
.windows => return lowerWin64FnRetTy(dg, fn_info),
else => return lowerSystemVFnRetTy(dg, fn_info),
},
.wasm32 => {
- if (isScalar(fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
}
- const classes = wasm_c_abi.classifyType(fn_info.return_type, target);
+ const classes = wasm_c_abi.classifyType(return_type, mod);
if (classes[0] == .indirect or classes[0] == .none) {
return dg.context.voidType();
}
assert(classes[0] == .direct and classes[1] == .none);
- const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target);
- const abi_size = scalar_type.abiSize(target);
+ const scalar_type = wasm_c_abi.scalarType(return_type, mod);
+ const abi_size = scalar_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
},
.aarch64, .aarch64_be => {
- switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) {
+ switch (aarch64_c_abi.classifyType(return_type, mod)) {
.memory => return dg.context.voidType(),
- .float_array => return dg.lowerType(fn_info.return_type),
- .byval => return dg.lowerType(fn_info.return_type),
+ .float_array => return dg.lowerType(return_type),
+ .byval => return dg.lowerType(return_type),
.integer => {
- const bit_size = fn_info.return_type.bitSize(target);
+ const bit_size = return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => return dg.context.intType(64).arrayType(2),
}
},
.arm, .armeb => {
- switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return dg.context.voidType(),
.i32_array => |len| if (len == 1) {
return dg.context.intType(32);
} else {
return dg.context.voidType();
},
- .byval => return dg.lowerType(fn_info.return_type),
+ .byval => return dg.lowerType(return_type),
}
},
.riscv32, .riscv64 => {
- switch (riscv_c_abi.classifyType(fn_info.return_type, target)) {
+ switch (riscv_c_abi.classifyType(return_type, mod)) {
.memory => return dg.context.voidType(),
.integer => {
- const bit_size = fn_info.return_type.bitSize(target);
+ const bit_size = return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => {
@@ -10645,50 +10450,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
};
return dg.context.structType(&llvm_types_buffer, 2, .False);
},
- .byval => return dg.lowerType(fn_info.return_type),
+ .byval => return dg.lowerType(return_type),
}
},
// TODO investigate C ABI for other architectures
- else => return dg.lowerType(fn_info.return_type),
+ else => return dg.lowerType(return_type),
}
},
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
.Stdcall => {
- if (isScalar(fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
} else {
return dg.context.voidType();
}
},
- else => return dg.lowerType(fn_info.return_type),
+ else => return dg.lowerType(return_type),
}
}
-fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- const target = dg.module.getTarget();
- switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
+fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+ const mod = dg.module;
+ const return_type = fn_info.return_type.toType();
+ switch (x86_64_abi.classifyWindows(return_type, mod)) {
.integer => {
- if (isScalar(fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
} else {
- const abi_size = fn_info.return_type.abiSize(target);
+ const abi_size = return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
.win_i128 => return dg.context.intType(64).vectorType(2),
.memory => return dg.context.voidType(),
- .sse => return dg.lowerType(fn_info.return_type),
+ .sse => return dg.lowerType(return_type),
else => unreachable,
}
}
-fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- if (isScalar(fn_info.return_type)) {
- return dg.lowerType(fn_info.return_type);
+fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type {
+ const mod = dg.module;
+ const return_type = fn_info.return_type.toType();
+ if (isScalar(mod, return_type)) {
+ return dg.lowerType(return_type);
}
- const target = dg.module.getTarget();
- const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
+ const classes = x86_64_abi.classifySystemV(return_type, mod, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
@@ -10729,7 +10536,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
}
}
if (classes[0] == .integer and classes[1] == .none) {
- const abi_size = fn_info.return_type.abiSize(target);
+ const abi_size = return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
@@ -10737,10 +10544,9 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
const ParamTypeIterator = struct {
dg: *DeclGen,
- fn_info: Type.Payload.Function.Data,
+ fn_info: InternPool.Key.FuncType,
zig_index: u32,
llvm_index: u32,
- target: std.Target,
llvm_types_len: u32,
llvm_types_buffer: [8]*llvm.Type,
byval_attr: bool,
@@ -10763,7 +10569,7 @@ const ParamTypeIterator = struct {
if (it.zig_index >= it.fn_info.param_types.len) return null;
const ty = it.fn_info.param_types[it.zig_index];
it.byval_attr = false;
- return nextInner(it, ty);
+ return nextInner(it, ty.toType());
}
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
@@ -10772,15 +10578,18 @@ const ParamTypeIterator = struct {
if (it.zig_index >= args.len) {
return null;
} else {
- return nextInner(it, fg.air.typeOf(args[it.zig_index]));
+ return nextInner(it, fg.typeOf(args[it.zig_index]));
}
} else {
- return nextInner(it, it.fn_info.param_types[it.zig_index]);
+ return nextInner(it, it.fn_info.param_types[it.zig_index].toType());
}
}
fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ const mod = it.dg.module;
+ const target = mod.getTarget();
+
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
it.zig_index += 1;
return .no_bits;
}
@@ -10788,11 +10597,10 @@ const ParamTypeIterator = struct {
.Unspecified, .Inline => {
it.zig_index += 1;
it.llvm_index += 1;
- var buf: Type.Payload.ElemType = undefined;
- if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) {
+ if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) {
it.llvm_index += 1;
return .slice;
- } else if (isByRef(ty)) {
+ } else if (isByRef(ty, mod)) {
return .byref;
} else {
return .byval;
@@ -10802,23 +10610,23 @@ const ParamTypeIterator = struct {
@panic("TODO implement async function lowering in the LLVM backend");
},
.C => {
- switch (it.target.cpu.arch) {
+ switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
- .x86_64 => switch (it.target.os.tag) {
+ .x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
return .byval;
}
- const classes = wasm_c_abi.classifyType(ty, it.target);
+ const classes = wasm_c_abi.classifyType(ty, mod);
if (classes[0] == .indirect) {
return .byref;
}
@@ -10827,7 +10635,7 @@ const ParamTypeIterator = struct {
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (aarch64_c_abi.classifyType(ty, it.target)) {
+ switch (aarch64_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
@@ -10842,7 +10650,7 @@ const ParamTypeIterator = struct {
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (arm_c_abi.classifyType(ty, it.target, .arg)) {
+ switch (arm_c_abi.classifyType(ty, mod, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
@@ -10855,10 +10663,10 @@ const ParamTypeIterator = struct {
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
- if (ty.tag() == .f16) {
+ if (ty.toIntern() == .f16_type) {
return .as_u16;
}
- switch (riscv_c_abi.classifyType(ty, it.target)) {
+ switch (riscv_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
@@ -10879,7 +10687,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
return .byval;
} else {
it.byval_attr = true;
@@ -10895,9 +10703,10 @@ const ParamTypeIterator = struct {
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
- switch (x86_64_abi.classifyWindows(ty, it.target)) {
+ const mod = it.dg.module;
+ switch (x86_64_abi.classifyWindows(ty, mod)) {
.integer => {
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -10927,14 +10736,15 @@ const ParamTypeIterator = struct {
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering {
- const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
+ const mod = it.dg.module;
+ const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -10987,13 +10797,12 @@ const ParamTypeIterator = struct {
}
};
-fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator {
+fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator {
return .{
.dg = dg,
.fn_info = fn_info,
.zig_index = 0,
.llvm_index = 0,
- .target = dg.module.getTarget(),
.llvm_types_buffer = undefined,
.llvm_types_len = 0,
.byval_attr = false,
@@ -11002,16 +10811,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
fn ccAbiPromoteInt(
cc: std.builtin.CallingConvention,
- target: std.Target,
+ mod: *Module,
ty: Type,
) ?std.builtin.Signedness {
+ const target = mod.getTarget();
switch (cc) {
.Unspecified, .Inline, .Async => return null,
else => {},
}
- const int_info = switch (ty.zigTypeTag()) {
- .Bool => Type.u1.intInfo(target),
- .Int, .Enum, .ErrorSet => ty.intInfo(target),
+ const int_info = switch (ty.zigTypeTag(mod)) {
+ .Bool => Type.u1.intInfo(mod),
+ .Int, .Enum, .ErrorSet => ty.intInfo(mod),
else => return null,
};
if (int_info.bits <= 16) return int_info.signedness;
@@ -11040,12 +10850,12 @@ fn ccAbiPromoteInt(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
-fn isByRef(ty: Type) bool {
+fn isByRef(ty: Type, mod: *Module) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 0;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -11068,51 +10878,53 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
- .Array, .Frame => return ty.hasRuntimeBits(),
+ .Array, .Frame => return ty.hasRuntimeBits(mod),
.Struct => {
// Packed structs are represented to LLVM as integers.
- if (ty.containerLayout() == .Packed) return false;
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
- var count: usize = 0;
- for (tuple.values, 0..) |field_val, i| {
- if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue;
-
- count += 1;
- if (count > max_fields_byval) return true;
- if (isByRef(tuple.types[i])) return true;
- }
- return false;
- }
+ if (ty.containerLayout(mod) == .Packed) return false;
+ const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .anon_struct_type => |tuple| {
+ var count: usize = 0;
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+
+ count += 1;
+ if (count > max_fields_byval) return true;
+ if (isByRef(field_ty.toType(), mod)) return true;
+ }
+ return false;
+ },
+ .struct_type => |s| s,
+ else => unreachable,
+ };
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
var count: usize = 0;
- const fields = ty.structFields();
- for (fields.values()) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+ for (struct_obj.fields.values()) |field| {
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(field.ty)) return true;
+ if (isByRef(field.ty, mod)) return true;
}
return false;
},
- .Union => switch (ty.containerLayout()) {
+ .Union => switch (ty.containerLayout(mod)) {
.Packed => return false,
- else => return ty.hasRuntimeBits(),
+ else => return ty.hasRuntimeBits(mod),
},
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = ty.errorUnionPayload(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
return true;
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return false;
}
return true;
@@ -11120,8 +10932,8 @@ fn isByRef(ty: Type) bool {
}
}
-fn isScalar(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+fn isScalar(mod: *Module, ty: Type) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
.NoReturn,
@@ -11135,8 +10947,8 @@ fn isScalar(ty: Type) bool {
.Vector,
=> true,
- .Struct => ty.containerLayout() == .Packed,
- .Union => ty.containerLayout() == .Packed,
+ .Struct => ty.containerLayout(mod) == .Packed,
+ .Union => ty.containerLayout(mod) == .Packed,
else => false,
};
}
@@ -11166,6 +10978,7 @@ fn backendSupportsF16(target: std.Target) bool {
.mips64,
.mips64el,
=> false,
+ .aarch64 => std.Target.aarch64.featureSetHas(target.cpu.features, .fp_armv8),
else => true,
};
}
@@ -11176,6 +10989,7 @@ fn backendSupportsF16(target: std.Target) bool {
fn backendSupportsF128(target: std.Target) bool {
return switch (target.cpu.arch) {
.amdgcn => false,
+ .aarch64 => std.Target.aarch64.featureSetHas(target.cpu.features, .fp_armv8),
else => true,
};
}
@@ -11183,10 +10997,10 @@ fn backendSupportsF128(target: std.Target) bool {
/// LLVM does not support all relevant intrinsics for all targets, so we
/// may need to manually generate a libc call
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
- return switch (scalar_ty.tag()) {
- .f16 => backendSupportsF16(target),
- .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
- .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
+ return switch (scalar_ty.toIntern()) {
+ .f16_type => backendSupportsF16(target),
+ .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
+ .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
else => true,
};
}
@@ -11303,12 +11117,12 @@ fn buildAllocaInner(
return alloca;
}
-fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 {
- return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target));
+fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 {
+ return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod));
}
-fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 {
- return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target));
+fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 {
+ return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index dfb51cecef..dc1f23dad4 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -22,8 +22,10 @@ const IdResultType = spec.IdResultType;
const StorageClass = spec.StorageClass;
const SpvModule = @import("spirv/Module.zig");
+const CacheRef = SpvModule.CacheRef;
+const CacheString = SpvModule.CacheString;
+
const SpvSection = @import("spirv/Section.zig");
-const SpvType = @import("spirv/type.zig").Type;
const SpvAssembler = @import("spirv/Assembler.zig");
const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
@@ -216,8 +218,9 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
+ const mod = self.module;
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
+ const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index), mod);
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@@ -229,12 +232,13 @@ pub const DeclGen = struct {
/// Fetch the result-id for a previously generated instruction or constant.
fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
- if (self.air.value(inst)) |val| {
- const ty = self.air.typeOf(inst);
- if (ty.zigTypeTag() == .Fn) {
- const fn_decl_index = switch (val.tag()) {
- .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
- .function => val.castTag(.function).?.data.owner_decl,
+ const mod = self.module;
+ if (try self.air.value(inst, mod)) |val| {
+ const ty = self.typeOf(inst);
+ if (ty.zigTypeTag(mod) == .Fn) {
+ const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
else => unreachable,
};
const spv_decl_index = try self.resolveDecl(fn_decl_index);
@@ -242,7 +246,7 @@ pub const DeclGen = struct {
return self.spv.declPtr(spv_decl_index).result_id;
}
- return try self.constant(ty, val);
+ return try self.constant(ty, val, .direct);
}
const index = Air.refToIndex(inst).?;
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
@@ -252,12 +256,12 @@ pub const DeclGen = struct {
/// Note: Function does not actually generate the decl.
fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index {
const decl = self.module.declPtr(decl_index);
- self.module.markDeclAlive(decl);
+ try self.module.markDeclAlive(decl);
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
// TODO: Extern fn?
- const kind: SpvModule.DeclKind = if (decl.val.tag() == .function)
+ const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none)
.func
else
.global;
@@ -338,8 +342,9 @@ pub const DeclGen = struct {
}
fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo {
+ const mod = self.module;
const target = self.getTarget();
- return switch (ty.zigTypeTag()) {
+ return switch (ty.zigTypeTag(mod)) {
.Bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
.is_vector = false,
@@ -353,7 +358,7 @@ pub const DeclGen = struct {
.class = .float,
},
.Int => blk: {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
break :blk ArithmeticTypeInfo{
@@ -372,85 +377,66 @@ pub const DeclGen = struct {
// As of yet, there is no vector support in the self-hosted compiler.
.Vector => self.todo("implement arithmeticTypeInfo for Vector", .{}),
// TODO: For which types is this the case?
- else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmt(self.module)}),
- };
- }
-
- fn genConstInt(self: *DeclGen, ty_ref: SpvType.Ref, result_id: IdRef, value: anytype) !void {
- const ty = self.spv.typeRefType(ty_ref);
- const ty_id = self.typeId(ty_ref);
-
- const Lit = spec.LiteralContextDependentNumber;
- const literal = switch (ty.intSignedness()) {
- .signed => switch (ty.intFloatBits()) {
- 1...32 => Lit{ .int32 = @intCast(i32, value) },
- 33...64 => Lit{ .int64 = @intCast(i64, value) },
- else => unreachable, // TODO: composite integer literals
- },
- .unsigned => switch (ty.intFloatBits()) {
- 1...32 => Lit{ .uint32 = @intCast(u32, value) },
- 33...64 => Lit{ .uint64 = @intCast(u64, value) },
- else => unreachable,
- },
+ // else => self.todo("implement arithmeticTypeInfo for {}", .{ty.fmt(self.module)}),
+ else => unreachable,
};
-
- try self.spv.emitConstant(ty_id, result_id, literal);
- }
-
- fn constInt(self: *DeclGen, ty_ref: SpvType.Ref, value: anytype) !IdRef {
- const result_id = self.spv.allocId();
- try self.genConstInt(ty_ref, result_id, value);
- return result_id;
- }
-
- fn constUndef(self: *DeclGen, ty_ref: SpvType.Ref) !IdRef {
- const result_id = self.spv.allocId();
- try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpUndef, .{
- .id_result_type = self.typeId(ty_ref),
- .id_result = result_id,
- });
- return result_id;
- }
-
- fn constNull(self: *DeclGen, ty_ref: SpvType.Ref) !IdRef {
- const result_id = self.spv.allocId();
- try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantNull, .{
- .id_result_type = self.typeId(ty_ref),
- .id_result = result_id,
- });
- return result_id;
}
+ /// Emits a bool constant in a particular representation.
fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef {
switch (repr) {
.indirect => {
const int_ty_ref = try self.intType(.unsigned, 1);
- return self.constInt(int_ty_ref, @boolToInt(value));
+ return self.spv.constInt(int_ty_ref, @boolToInt(value));
},
.direct => {
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const result_id = self.spv.allocId();
- const operands = .{ .id_result_type = self.typeId(bool_ty_ref), .id_result = result_id };
- if (value) {
- try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantTrue, operands);
- } else {
- try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpConstantFalse, operands);
- }
- return result_id;
+ return self.spv.constBool(bool_ty_ref, value);
},
}
}
+ /// Construct a struct at runtime.
+ /// result_ty_ref must be a struct type.
+ fn constructStruct(self: *DeclGen, result_ty_ref: CacheRef, constituents: []const IdRef) !IdRef {
+ // The Khronos LLVM-SPIRV translator crashes because it cannot construct structs which'
+ // operands are not constant.
+ // See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/1349
+ // For now, just initialize the struct by setting the fields manually...
+ // TODO: Make this OpCompositeConstruct when we can
+ const ptr_composite_id = try self.alloc(result_ty_ref, null);
+ // Note: using 32-bit ints here because usize crashes the translator as well
+ const index_ty_ref = try self.intType(.unsigned, 32);
+
+ const spv_composite_ty = self.spv.cache.lookup(result_ty_ref).struct_type;
+ const member_types = spv_composite_ty.member_types;
+
+ for (constituents, member_types, 0..) |constitent_id, member_ty_ref, index| {
+ const index_id = try self.spv.constInt(index_ty_ref, index);
+ const ptr_member_ty_ref = try self.spv.ptrType(member_ty_ref, .Generic);
+ const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{index_id});
+ try self.func.body.emit(self.spv.gpa, .OpStore, .{
+ .pointer = ptr_id,
+ .object = constitent_id,
+ });
+ }
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpLoad, .{
+ .id_result_type = self.typeId(result_ty_ref),
+ .id_result = result_id,
+ .pointer = ptr_composite_id,
+ });
+ return result_id;
+ }
+
const IndirectConstantLowering = struct {
const undef = 0xAA;
dg: *DeclGen,
/// Cached reference of the u32 type.
- u32_ty_ref: SpvType.Ref,
- /// Cached type id of the u32 type.
- u32_ty_id: IdRef,
+ u32_ty_ref: CacheRef,
/// The members of the resulting structure type
- members: std.ArrayList(SpvType.Payload.Struct.Member),
+ members: std.ArrayList(CacheRef),
/// The initializers of each of the members.
initializers: std.ArrayList(IdRef),
/// The current size of the structure. Includes
@@ -481,19 +467,17 @@ pub const DeclGen = struct {
}
const word = @bitCast(Word, self.partial_word.buffer);
- const result_id = self.dg.spv.allocId();
- // TODO: Integrate with caching mechanism
- try self.dg.spv.emitConstant(self.u32_ty_id, result_id, .{ .uint32 = word });
- try self.members.append(.{ .ty = self.u32_ty_ref });
+ const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word);
+ try self.members.append(self.u32_ty_ref);
try self.initializers.append(result_id);
self.partial_word.len = 0;
- self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word));
+ self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word));
}
/// Fill the buffer with undefined values until the size is aligned to `align`.
fn fillToAlign(self: *@This(), alignment: u32) !void {
- const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment);
+ const target_size = std.mem.alignForward(u32, self.size, alignment);
try self.addUndef(target_size - self.size);
}
@@ -520,25 +504,21 @@ pub const DeclGen = struct {
}
}
- fn addPtr(self: *@This(), ptr_ty_ref: SpvType.Ref, ptr_id: IdRef) !void {
+ fn addPtr(self: *@This(), ptr_ty_ref: CacheRef, ptr_id: IdRef) !void {
// TODO: Double check pointer sizes here.
// shared pointers might be u32...
const target = self.dg.getTarget();
- const width = @divExact(target.cpu.arch.ptrBitWidth(), 8);
+ const width = @divExact(target.ptrBitWidth(), 8);
if (self.size % width != 0) {
return self.dg.todo("misaligned pointer constants", .{});
}
- try self.members.append(.{ .ty = ptr_ty_ref });
+ try self.members.append(ptr_ty_ref);
try self.initializers.append(ptr_id);
self.size += width;
}
- fn addNullPtr(self: *@This(), ptr_ty_ref: SpvType.Ref) !void {
- const result_id = self.dg.spv.allocId();
- try self.dg.spv.sections.types_globals_constants.emit(self.dg.spv.gpa, .OpConstantNull, .{
- .id_result_type = self.dg.typeId(ptr_ty_ref),
- .id_result = result_id,
- });
+ fn addNullPtr(self: *@This(), ptr_ty_ref: CacheRef) !void {
+ const result_id = try self.dg.spv.constNull(ptr_ty_ref);
try self.addPtr(ptr_ty_ref, result_id);
}
@@ -556,20 +536,44 @@ pub const DeclGen = struct {
}
fn addInt(self: *@This(), ty: Type, val: Value) !void {
- const target = self.dg.getTarget();
- const int_info = ty.intInfo(target);
+ const mod = self.dg.module;
+ const int_info = ty.intInfo(mod);
const int_bits = switch (int_info.signedness) {
- .signed => @bitCast(u64, val.toSignedInt(target)),
- .unsigned => val.toUnsignedInt(target),
+ .signed => @bitCast(u64, val.toSignedInt(mod)),
+ .unsigned => val.toUnsignedInt(mod),
};
// TODO: Swap endianess if the compiler is big endian.
- const len = ty.abiSize(target);
+ const len = ty.abiSize(mod);
try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]);
}
+ fn addFloat(self: *@This(), ty: Type, val: Value) !void {
+ const mod = self.dg.module;
+ const target = self.dg.getTarget();
+ const len = ty.abiSize(mod);
+
+ // TODO: Swap endianess if the compiler is big endian.
+ switch (ty.floatBits(target)) {
+ 16 => {
+ const float_bits = val.toFloat(f16, mod);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ },
+ 32 => {
+ const float_bits = val.toFloat(f32, mod);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ },
+ 64 => {
+ const float_bits = val.toFloat(f64, mod);
+ try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+ },
+ else => unreachable,
+ }
+ }
+
fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void {
const dg = self.dg;
+ const mod = dg.module;
const ty_ref = try self.dg.resolveType(ty, .indirect);
const ty_id = dg.typeId(ty_ref);
@@ -577,19 +581,18 @@ pub const DeclGen = struct {
const decl = dg.module.declPtr(decl_index);
const spv_decl_index = try dg.resolveDecl(decl_index);
- switch (decl.val.tag()) {
- .function => {
+ switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+ .func => {
// TODO: Properly lower function pointers. For now we are going to hack around it and
// just generate an empty pointer. Function pointers are represented by usize for now,
// though.
- try self.addInt(Type.usize, Value.initTag(.zero));
+ try self.addInt(Type.usize, Value.zero_usize);
// TODO: Add dependency
return;
},
- .extern_fn => unreachable, // TODO
+ .extern_func => unreachable, // TODO
else => {
const result_id = dg.spv.allocId();
- log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name });
try self.decl_deps.put(spv_decl_index, {});
@@ -607,112 +610,122 @@ pub const DeclGen = struct {
}
}
- fn lower(self: *@This(), ty: Type, val: Value) !void {
- const target = self.dg.getTarget();
+ fn lower(self: *@This(), ty: Type, arg_val: Value) !void {
const dg = self.dg;
+ const mod = dg.module;
- if (val.isUndef()) {
- const size = ty.abiSize(target);
+ var val = arg_val;
+ switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
+ }
+
+ if (val.isUndefDeep(mod)) {
+ const size = ty.abiSize(mod);
return try self.addUndef(size);
}
- switch (ty.zigTypeTag()) {
- .Int => try self.addInt(ty, val),
- .Bool => try self.addConstBool(val.toBool()),
- .Array => switch (val.tag()) {
- .aggregate => {
- const elem_vals = val.castTag(.aggregate).?.data;
- const elem_ty = ty.elemType();
- const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way.
- for (elem_vals[0..len]) |elem_val| {
- try self.lower(elem_ty, elem_val);
- }
- },
- .repeated => {
- const elem_val = val.castTag(.repeated).?.data;
- const elem_ty = ty.elemType();
- const len = @intCast(u32, ty.arrayLen());
- for (0..len) |_| {
- try self.lower(elem_ty, elem_val);
- }
- if (ty.sentinel()) |sentinel| {
- try self.lower(elem_ty, sentinel);
- }
- },
- .str_lit => {
- const str_lit = val.castTag(.str_lit).?.data;
- const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- try self.addBytes(bytes);
- if (ty.sentinel()) |sentinel| {
- try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target)));
- }
- },
- .bytes => {
- const bytes = val.castTag(.bytes).?.data;
- try self.addBytes(bytes);
- },
- else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}),
+ switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => try self.addConstBool(val.toBool()),
},
- .Pointer => switch (val.tag()) {
- .decl_ref_mut => {
- const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
- try self.addDeclRef(ty, decl_index);
- },
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- try self.addDeclRef(ty, decl_index);
- },
- .slice => {
- const slice = val.castTag(.slice).?.data;
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ .empty_enum_value,
+ => unreachable, // non-runtime values
+ .int => try self.addInt(ty, val),
+ .err => |err| {
+ const int = try mod.getErrorValue(err.name);
+ try self.addConstInt(u16, @intCast(u16, int));
+ },
+ .error_union => |error_union| {
+ const payload_ty = ty.errorUnionPayload(mod);
+ const is_pl = val.errorUnionIsPayload(mod);
+ const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0);
- var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = ty.slicePtrFieldType(&buf);
+ const eu_layout = dg.errorUnionLayout(payload_ty);
+ if (!eu_layout.payload_has_bits) {
+ return try self.lower(Type.anyerror, error_val);
+ }
- try self.lower(ptr_ty, slice.ptr);
- try self.addInt(Type.usize, slice.len);
- },
- else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
- },
- .Struct => {
- if (ty.isSimpleTupleOrAnonStruct()) {
- unreachable; // TODO
+ const payload_size = payload_ty.abiSize(mod);
+ const error_size = Type.anyerror.abiAlignment(mod);
+ const ty_size = ty.abiSize(mod);
+ const padding = ty_size - payload_size - error_size;
+
+ const payload_val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .payload => |payload| payload,
+ }.toValue();
+
+ if (eu_layout.error_first) {
+ try self.lower(Type.anyerror, error_val);
+ try self.lower(payload_ty, payload_val);
} else {
- const struct_ty = ty.castTag(.@"struct").?.data;
+ try self.lower(payload_ty, payload_val);
+ try self.lower(Type.anyerror, error_val);
+ }
- if (struct_ty.layout == .Packed) {
- return dg.todo("packed struct constants", .{});
- }
+ try self.addUndef(padding);
+ },
+ .enum_tag => {
+ const int_val = try val.enumToInt(ty, mod);
- const struct_begin = self.size;
- const field_vals = val.castTag(.aggregate).?.data;
- for (struct_ty.fields.values(), 0..) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
- try self.lower(field.ty, field_vals[i]);
+ const int_ty = ty.intTagType(mod);
- // Add padding if required.
- // TODO: Add to type generation as well?
- const unpadded_field_end = self.size - struct_begin;
- const padded_field_end = ty.structFieldOffset(i + 1, target);
- const padding = padded_field_end - unpadded_field_end;
- try self.addUndef(padding);
- }
+ try self.lower(int_ty, int_val);
+ },
+ .float => try self.addFloat(ty, val),
+ .ptr => |ptr| {
+ switch (ptr.addr) {
+ .decl => |decl| try self.addDeclRef(ty, decl),
+ .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl),
+ else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
+ }
+ if (ptr.len != .none) {
+ try self.addInt(Type.usize, ptr.len.toValue());
}
},
- .Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
- const has_payload = !val.isNull();
- const abi_size = ty.abiSize(target);
-
- if (!payload_ty.hasRuntimeBits()) {
- try self.addConstBool(has_payload);
+ .opt => {
+ const payload_ty = ty.optionalChild(mod);
+ const payload_val = val.optionalValue(mod);
+ const abi_size = ty.abiSize(mod);
+
+ if (!payload_ty.hasRuntimeBits(mod)) {
+ try self.addConstBool(payload_val != null);
return;
- } else if (ty.optionalReprIsPayload()) {
+ } else if (ty.optionalReprIsPayload(mod)) {
// Optional representation is a nullable pointer or slice.
- if (val.castTag(.opt_payload)) |payload| {
- try self.lower(payload_ty, payload.data);
- } else if (has_payload) {
- try self.lower(payload_ty, val);
+ if (payload_val) |pl_val| {
+ try self.lower(payload_ty, pl_val);
} else {
const ptr_ty_ref = try dg.resolveType(ty, .indirect);
try self.addNullPtr(ptr_ty_ref);
@@ -725,102 +738,98 @@ pub const DeclGen = struct {
// Subtract 1 for @sizeOf(bool).
// TODO: Make this not hardcoded.
- const payload_size = payload_ty.abiSize(target);
+ const payload_size = payload_ty.abiSize(mod);
const padding = abi_size - payload_size - 1;
- if (val.castTag(.opt_payload)) |payload| {
- try self.lower(payload_ty, payload.data);
+ if (payload_val) |pl_val| {
+ try self.lower(payload_ty, pl_val);
} else {
try self.addUndef(payload_size);
}
- try self.addConstBool(has_payload);
+ try self.addConstBool(payload_val != null);
try self.addUndef(padding);
},
- .Enum => {
- var int_val_buffer: Value.Payload.U64 = undefined;
- const int_val = val.enumToInt(ty, &int_val_buffer);
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type => |array_type| {
+ const elem_ty = array_type.child.toType();
+ switch (aggregate.storage) {
+ .bytes => |bytes| try self.addBytes(bytes),
+ .elems, .repeated_elem => {
+ for (0..array_type.len) |i| {
+ try self.lower(elem_ty, switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(),
+ .repeated_elem => |elem_val| elem_val.toValue(),
+ });
+ }
+ },
+ }
+ if (array_type.sentinel != .none) {
+ try self.lower(elem_ty, array_type.sentinel.toValue());
+ }
+ },
+ .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
+ .struct_type => {
+ const struct_ty = mod.typeToStruct(ty).?;
- var int_ty_buffer: Type.Payload.Bits = undefined;
- const int_ty = ty.intTagType(&int_ty_buffer);
+ if (struct_ty.layout == .Packed) {
+ return dg.todo("packed struct constants", .{});
+ }
- try self.lower(int_ty, int_val);
+ const struct_begin = self.size;
+ const field_vals = val.castTag(.aggregate).?.data;
+ for (struct_ty.fields.values(), 0..) |field, i| {
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
+ try self.lower(field.ty, field_vals[i]);
+
+ // Add padding if required.
+ // TODO: Add to type generation as well?
+ const unpadded_field_end = self.size - struct_begin;
+ const padded_field_end = ty.structFieldOffset(i + 1, mod);
+ const padding = padded_field_end - unpadded_field_end;
+ try self.addUndef(padding);
+ }
+ },
+ .anon_struct_type => unreachable, // TODO
+ else => unreachable,
},
- .Union => {
- const tag_and_val = val.castTag(.@"union").?.data;
- const layout = ty.unionGetLayout(target);
+ .un => |un| {
+ const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
- return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
+ return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
- const union_ty = ty.cast(Type.Payload.Union).?.data;
+ const union_ty = mod.typeToUnion(ty).?;
if (union_ty.layout == .Packed) {
return dg.todo("packed union constants", .{});
}
- const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?;
+ const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?;
const active_field_ty = union_ty.fields.values()[active_field].ty;
const has_tag = layout.tag_size != 0;
const tag_first = layout.tag_align >= layout.payload_align;
if (has_tag and tag_first) {
- try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
+ try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
- const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
- try self.lower(active_field_ty, tag_and_val.val);
- break :blk active_field_ty.abiSize(target);
+ const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
+ try self.lower(active_field_ty, un.val.toValue());
+ break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
try self.addUndef(payload_padding_len);
if (has_tag and !tag_first) {
- try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
+ try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
try self.addUndef(layout.padding);
},
- .ErrorSet => switch (val.tag()) {
- .@"error" => {
- const err_name = val.castTag(.@"error").?.data.name;
- const kv = try dg.module.getErrorValue(err_name);
- try self.addConstInt(u16, @intCast(u16, kv.value));
- },
- .zero => {
- // Unactivated error set.
- try self.addConstInt(u16, 0);
- },
- else => unreachable,
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
- const is_pl = val.errorUnionIsPayload();
- const error_val = if (!is_pl) val else Value.initTag(.zero);
-
- const eu_layout = dg.errorUnionLayout(payload_ty);
- if (!eu_layout.payload_has_bits) {
- return try self.lower(Type.anyerror, error_val);
- }
-
- const payload_size = payload_ty.abiSize(target);
- const error_size = Type.anyerror.abiAlignment(target);
- const ty_size = ty.abiSize(target);
- const padding = ty_size - payload_size - error_size;
- const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
-
- if (eu_layout.error_first) {
- try self.lower(Type.anyerror, error_val);
- try self.lower(payload_ty, payload_val);
- } else {
- try self.lower(payload_ty, payload_val);
- try self.lower(Type.anyerror, error_val);
- }
-
- try self.addUndef(padding);
- },
- else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}),
+ .memoized_call => unreachable,
}
}
};
@@ -869,12 +878,12 @@ pub const DeclGen = struct {
const section = &self.spv.globals.section;
const ty_ref = try self.resolveType(ty, .indirect);
- const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class, 0);
+ const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class);
// const target = self.getTarget();
// TODO: Fix the resulting global linking for these paths.
- // if (val.isUndef()) {
+ // if (val.isUndef(mod)) {
// // Special case: the entire value is undefined. In this case, we can just
// // generate an OpVariable with no initializer.
// return try section.emit(self.spv.gpa, .OpVariable, .{
@@ -882,7 +891,7 @@ pub const DeclGen = struct {
// .id_result = result_id,
// .storage_class = storage_class,
// });
- // } else if (ty.abiSize(target) == 0) {
+ // } else if (ty.abiSize(mod) == 0) {
// // Special case: if the type has no size, then return an undefined pointer.
// return try section.emit(self.spv.gpa, .OpUndef, .{
// .id_result_type = self.typeId(ptr_ty_ref),
@@ -897,8 +906,7 @@ pub const DeclGen = struct {
var icl = IndirectConstantLowering{
.dg = self,
.u32_ty_ref = u32_ty_ref,
- .u32_ty_id = self.typeId(u32_ty_ref),
- .members = std.ArrayList(SpvType.Payload.Struct.Member).init(self.gpa),
+ .members = std.ArrayList(CacheRef).init(self.gpa),
.initializers = std.ArrayList(IdRef).init(self.gpa),
.decl_deps = std.AutoArrayHashMap(SpvModule.Decl.Index, void).init(self.gpa),
};
@@ -910,8 +918,10 @@ pub const DeclGen = struct {
try icl.lower(ty, val);
try icl.flush();
- const constant_struct_ty_ref = try self.spv.simpleStructType(icl.members.items);
- const ptr_constant_struct_ty_ref = try self.spv.ptrType(constant_struct_ty_ref, storage_class, 0);
+ const constant_struct_ty_ref = try self.spv.resolve(.{ .struct_type = .{
+ .member_types = icl.members.items,
+ } });
+ const ptr_constant_struct_ty_ref = try self.spv.ptrType(constant_struct_ty_ref, storage_class);
const constant_struct_id = self.spv.allocId();
try section.emit(self.spv.gpa, .OpSpecConstantComposite, .{
@@ -945,7 +955,7 @@ pub const DeclGen = struct {
});
if (cast_to_generic) {
- const generic_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic, 0);
+ const generic_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic);
try section.emitSpecConstantOp(self.spv.gpa, .OpPtrCastToGeneric, .{
.id_result_type = self.typeId(generic_ptr_ty_ref),
.id_result = result_id,
@@ -962,43 +972,34 @@ pub const DeclGen = struct {
/// the constant is more complicated however, it needs to be lowered to an indirect constant, which
/// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default.
/// This function should only be called during function code generation.
- fn constant(self: *DeclGen, ty: Type, val: Value) !IdRef {
- const target = self.getTarget();
- const section = &self.spv.sections.types_globals_constants;
- const result_ty_ref = try self.resolveType(ty, .direct);
- const result_ty_id = self.typeId(result_ty_ref);
- const result_id = self.spv.allocId();
+ fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
+ const mod = self.module;
+ const result_ty_ref = try self.resolveType(ty, repr);
- if (val.isUndef()) {
- try section.emit(self.spv.gpa, .OpUndef, .{
- .id_result_type = result_ty_id,
- .id_result = result_id,
- });
- return result_id;
+ log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
+
+ if (val.isUndef(mod)) {
+ return self.spv.constUndef(result_ty_ref);
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Int => {
- if (ty.isSignedInt()) {
- try self.genConstInt(result_ty_ref, result_id, val.toSignedInt(target));
+ if (ty.isSignedInt(mod)) {
+ return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod));
} else {
- try self.genConstInt(result_ty_ref, result_id, val.toUnsignedInt(target));
+ return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
}
},
.Bool => {
- const operands = .{ .id_result_type = result_ty_id, .id_result = result_id };
- if (val.toBool()) {
- try section.emit(self.spv.gpa, .OpConstantTrue, operands);
- } else {
- try section.emit(self.spv.gpa, .OpConstantFalse, operands);
- }
+ @compileError("TODO merge conflict failure");
},
// TODO: We can handle most pointers here (decl refs etc), because now they emit an extra
// OpVariable that is not really required.
else => {
// The value cannot be generated directly, so generate it as an indirect constant,
// and then perform an OpLoad.
- const alignment = ty.abiAlignment(target);
+ const result_id = self.spv.allocId();
+ const alignment = ty.abiAlignment(mod);
const spv_decl_index = try self.spv.allocDecl(.global);
try self.lowerIndirectConstant(
@@ -1013,42 +1014,43 @@ pub const DeclGen = struct {
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
- .id_result_type = result_ty_id,
+ .id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.pointer = self.spv.declPtr(spv_decl_index).result_id,
});
// TODO: Convert bools? This logic should hook into `load`. It should be a dead
// path though considering .Bool is handled above.
+ return result_id;
},
}
-
- return result_id;
}
/// Turn a Zig type into a SPIR-V Type, and return its type result-id.
fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
const type_ref = try self.resolveType(ty, .direct);
- return self.typeId(type_ref);
+ return self.spv.resultId(type_ref);
}
- fn typeId(self: *DeclGen, ty_ref: SpvType.Ref) IdRef {
- return self.spv.typeId(ty_ref);
+ fn typeId(self: *DeclGen, ty_ref: CacheRef) IdRef {
+ return self.spv.resultId(ty_ref);
}
/// Create an integer type suitable for storing at least 'bits' bits.
- fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !SpvType.Ref {
+ /// The integer type that is returned by this function is the type that is used to perform
+ /// actual operations (as well as store) a Zig type of a particular number of bits. To create
+ /// a type with an exact size, use SpvModule.intType.
+ fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
const backing_bits = self.backingIntBits(bits) orelse {
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
return self.todo("Implement {s} composite int type of {} bits", .{ @tagName(signedness), bits });
};
-
- return try self.spv.resolveType(try SpvType.int(self.spv.arena, signedness, backing_bits));
+ return self.spv.intType(signedness, backing_bits);
}
/// Create an integer type that represents 'usize'.
- fn sizeType(self: *DeclGen) !SpvType.Ref {
- return try self.intType(.unsigned, self.getTarget().cpu.arch.ptrBitWidth());
+ fn sizeType(self: *DeclGen) !CacheRef {
+ return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
}
/// Generate a union type, optionally with a known field. If the tag alignment is greater
@@ -1073,79 +1075,85 @@ pub const DeclGen = struct {
/// If any of the fields' size is 0, it will be omitted.
/// NOTE: When the active field is set to something other than the most aligned field, the
/// resulting struct will be *underaligned*.
- fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !SpvType.Ref {
- const target = self.getTarget();
- const layout = ty.unionGetLayout(target);
- const union_ty = ty.cast(Type.Payload.Union).?.data;
+ fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef {
+ const mod = self.module;
+ const layout = ty.unionGetLayout(mod);
+ const union_ty = mod.typeToUnion(ty).?;
if (union_ty.layout == .Packed) {
return self.todo("packed union types", .{});
}
- const tag_ty_ref = try self.resolveType(union_ty.tag_ty, .indirect);
if (layout.payload_size == 0) {
// No payload, so represent this as just the tag type.
- return tag_ty_ref;
+ return try self.resolveType(union_ty.tag_ty, .indirect);
}
- var members = std.BoundedArray(SpvType.Payload.Struct.Member, 4){};
+ var member_types = std.BoundedArray(CacheRef, 4){};
+ var member_names = std.BoundedArray(CacheString, 4){};
const has_tag = layout.tag_size != 0;
const tag_first = layout.tag_align >= layout.payload_align;
- const tag_member = .{ .name = "tag", .ty = tag_ty_ref };
const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
if (has_tag and tag_first) {
- members.appendAssumeCapacity(tag_member);
+ const tag_ty_ref = try self.resolveType(union_ty.tag_ty, .indirect);
+ member_types.appendAssumeCapacity(tag_ty_ref);
+ member_names.appendAssumeCapacity(try self.spv.resolveString("tag"));
}
const active_field = maybe_active_field orelse layout.most_aligned_field;
const active_field_ty = union_ty.fields.values()[active_field].ty;
- const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
+ const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect);
- members.appendAssumeCapacity(.{ .name = "payload", .ty = active_payload_ty_ref });
- break :blk active_field_ty.abiSize(target);
+ member_types.appendAssumeCapacity(active_payload_ty_ref);
+ member_names.appendAssumeCapacity(try self.spv.resolveString("payload"));
+ break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
if (payload_padding_len != 0) {
const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref);
- members.appendAssumeCapacity(.{ .name = "padding_payload", .ty = payload_padding_ty_ref });
+ member_types.appendAssumeCapacity(payload_padding_ty_ref);
+ member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding"));
}
if (has_tag and !tag_first) {
- members.appendAssumeCapacity(tag_member);
+ const tag_ty_ref = try self.resolveType(union_ty.tag_ty, .indirect);
+ member_types.appendAssumeCapacity(tag_ty_ref);
+ member_names.appendAssumeCapacity(try self.spv.resolveString("tag"));
}
if (layout.padding != 0) {
const padding_ty_ref = try self.spv.arrayType(layout.padding, u8_ty_ref);
- members.appendAssumeCapacity(.{ .name = "padding", .ty = padding_ty_ref });
+ member_types.appendAssumeCapacity(padding_ty_ref);
+ member_names.appendAssumeCapacity(try self.spv.resolveString("padding"));
}
- return try self.spv.simpleStructType(members.slice());
+ return try self.spv.resolve(.{ .struct_type = .{
+ .member_types = member_types.slice(),
+ .member_names = member_names.slice(),
+ } });
}
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
- fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!SpvType.Ref {
+ fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
+ const mod = self.module;
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
const target = self.getTarget();
- switch (ty.zigTypeTag()) {
- .Void, .NoReturn => return try self.spv.resolveType(SpvType.initTag(.void)),
+ switch (ty.zigTypeTag(mod)) {
+ .Void, .NoReturn => return try self.spv.resolve(.void_type),
.Bool => switch (repr) {
- .direct => return try self.spv.resolveType(SpvType.initTag(.bool)),
- // SPIR-V booleans are opaque, which is fine for operations, but they cant be stored.
- // This function returns the *stored* type, for values directly we convert this into a bool when
- // it is loaded, and convert it back to this type when stored.
+ .direct => return try self.spv.resolve(.bool_type),
.indirect => return try self.intType(.unsigned, 1),
},
.Int => {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
- var buffer: Type.Payload.Bits = undefined;
- const tag_ty = ty.intTagType(&buffer);
+ const tag_ty = ty.intTagType(mod);
return self.resolveType(tag_ty, repr);
},
.Float => {
@@ -1164,34 +1172,34 @@ pub const DeclGen = struct {
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
- return try self.spv.resolveType(SpvType.float(bits));
+ return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
},
.Array => {
- const elem_ty = ty.childType();
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
- const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse {
- return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()});
+ const elem_ty = ty.childType(mod);
+ const elem_ty_ref = try self.resolveType(elem_ty, .direct);
+ const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
+ return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
- return try self.spv.arrayType(total_len, elem_ty_ref);
+ return self.spv.arrayType(total_len, elem_ty_ref);
},
.Fn => switch (repr) {
.direct => {
+ const fn_info = mod.typeToFunc(ty).?;
// TODO: Put this somewhere in Sema.zig
- if (ty.fnIsVarArgs())
+ if (fn_info.is_var_args)
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
- // TODO: Parameter passing convention etc.
-
- const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
- for (param_types, 0..) |*param, i| {
- param.* = try self.resolveType(ty.fnParamType(i), .direct);
+ const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen());
+ defer self.gpa.free(param_ty_refs);
+ for (param_ty_refs, 0..) |*param_type, i| {
+ param_type.* = try self.resolveType(ty.fnParamType(i), .direct);
}
+ const return_ty_ref = try self.resolveType(ty.fnReturnType(), .direct);
- const return_type = try self.resolveType(ty.fnReturnType(), .direct);
-
- const payload = try self.spv.arena.create(SpvType.Payload.Function);
- payload.* = .{ .return_type = return_type, .parameters = param_types };
- return try self.spv.resolveType(SpvType.initPayload(&payload.base));
+ return try self.spv.resolve(.{ .function_type = .{
+ .return_type = return_ty_ref,
+ .parameters = param_ty_refs,
+ } });
},
.indirect => {
// TODO: Represent function pointers properly.
@@ -1200,20 +1208,26 @@ pub const DeclGen = struct {
},
},
.Pointer => {
- const ptr_info = ty.ptrInfo().data;
+ const ptr_info = ty.ptrInfo(mod);
const storage_class = spvStorageClass(ptr_info.@"addrspace");
const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect);
- const ptr_ty_ref = try self.spv.ptrType(child_ty_ref, storage_class, 0);
-
+ const ptr_ty_ref = try self.spv.resolve(.{ .ptr_type = .{
+ .storage_class = storage_class,
+ .child_type = child_ty_ref,
+ } });
if (ptr_info.size != .Slice) {
return ptr_ty_ref;
}
- return try self.spv.simpleStructType(&.{
- .{ .ty = ptr_ty_ref, .name = "ptr" },
- .{ .ty = try self.sizeType(), .name = "len" },
- });
+ const size_ty_ref = try self.sizeType();
+ return self.spv.resolve(.{ .struct_type = .{
+ .member_types = &.{ ptr_ty_ref, size_ty_ref },
+ .member_names = &.{
+ try self.spv.resolveString("ptr"),
+ try self.spv.resolveString("len"),
+ },
+ } });
},
.Vector => {
// Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
@@ -1225,89 +1239,90 @@ pub const DeclGen = struct {
// TODO: Properly verify sizes and child type.
- const payload = try self.spv.arena.create(SpvType.Payload.Vector);
- payload.* = .{
- .component_type = try self.resolveType(ty.elemType(), repr),
- .component_count = @intCast(u32, ty.vectorLen()),
- };
- return try self.spv.resolveType(SpvType.initPayload(&payload.base));
+ return try self.spv.resolve(.{ .vector_type = .{
+ .component_type = try self.resolveType(ty.childType(mod), repr),
+ .component_count = @intCast(u32, ty.vectorLen(mod)),
+ } });
},
.Struct => {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
- const members = try self.spv.arena.alloc(SpvType.Payload.Struct.Member, tuple.types.len);
- var member_index: u32 = 0;
+ const member_types = try self.gpa.alloc(CacheRef, tuple.types.len);
+ defer self.gpa.free(member_types);
+
+ var member_index: usize = 0;
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
- members[member_index] = .{
- .ty = try self.resolveType(field_ty, .indirect),
- };
+ if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
+
+ member_types[member_index] = try self.resolveType(field_ty, .indirect);
member_index += 1;
}
- const payload = try self.spv.arena.create(SpvType.Payload.Struct);
- payload.* = .{
- .members = members[0..member_index],
- };
- return try self.spv.resolveType(SpvType.initPayload(&payload.base));
+
+ return try self.spv.resolve(.{ .struct_type = .{
+ .member_types = member_types[0..member_index],
+ } });
}
- const struct_ty = ty.castTag(.@"struct").?.data;
+ const struct_ty = mod.typeToStruct(ty).?;
if (struct_ty.layout == .Packed) {
- return try self.resolveType(struct_ty.backing_int_ty, .indirect);
+ return try self.resolveType(struct_ty.backing_int_ty, .direct);
}
- const members = try self.spv.arena.alloc(SpvType.Payload.Struct.Member, struct_ty.fields.count());
+ const member_types = try self.gpa.alloc(CacheRef, struct_ty.fields.count());
+ defer self.gpa.free(member_types);
+
+ const member_names = try self.gpa.alloc(CacheString, struct_ty.fields.count());
+ defer self.gpa.free(member_names);
+
var member_index: usize = 0;
- for (struct_ty.fields.values(), 0..) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+ const struct_obj = void; // TODO
+ for (struct_obj.fields.values(), 0..) |field, i| {
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
- members[member_index] = .{
- .ty = try self.resolveType(field.ty, .indirect),
- .name = struct_ty.fields.keys()[i],
- };
+ member_types[member_index] = try self.resolveType(field.ty, .indirect);
+ member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]);
member_index += 1;
}
- const name = try struct_ty.getFullyQualifiedName(self.module);
- defer self.module.gpa.free(name);
+ const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
- const payload = try self.spv.arena.create(SpvType.Payload.Struct);
- payload.* = .{
- .members = members[0..member_index],
- .name = try self.spv.arena.dupe(u8, name),
- };
- return try self.spv.resolveType(SpvType.initPayload(&payload.base));
+ return try self.spv.resolve(.{ .struct_type = .{
+ .name = try self.spv.resolveString(name),
+ .member_types = member_types[0..member_index],
+ .member_names = member_names[0..member_index],
+ } });
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ const payload_ty = ty.optionalChild(mod);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
- // Perform the converison to a direct bool when the field is extracted.
+ // Perform the conversion to a direct bool when the field is extracted.
return try self.resolveType(Type.bool, .indirect);
}
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
// Optional is actually a pointer or a slice.
return payload_ty_ref;
}
const bool_ty_ref = try self.resolveType(Type.bool, .indirect);
- // its an actual optional
- return try self.spv.simpleStructType(&.{
- .{ .ty = payload_ty_ref, .name = "payload" },
- .{ .ty = bool_ty_ref, .name = "valid" },
- });
+ return try self.spv.resolve(.{ .struct_type = .{
+ .member_types = &.{ payload_ty_ref, bool_ty_ref },
+ .member_names = &.{
+ try self.spv.resolveString("payload"),
+ try self.spv.resolveString("valid"),
+ },
+ } });
},
.Union => return try self.resolveUnionType(ty, null),
.ErrorSet => return try self.intType(.unsigned, 16),
.ErrorUnion => {
- const payload_ty = ty.errorUnionPayload();
+ const payload_ty = ty.errorUnionPayload(mod);
const error_ty_ref = try self.resolveType(Type.anyerror, .indirect);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -1317,20 +1332,30 @@ pub const DeclGen = struct {
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
- var members = std.BoundedArray(SpvType.Payload.Struct.Member, 2){};
+ var member_types: [2]CacheRef = undefined;
+ var member_names: [2]CacheString = undefined;
if (eu_layout.error_first) {
// Put the error first
- members.appendAssumeCapacity(.{ .ty = error_ty_ref, .name = "error" });
- members.appendAssumeCapacity(.{ .ty = payload_ty_ref, .name = "payload" });
+ member_types = .{ error_ty_ref, payload_ty_ref };
+ member_names = .{
+ try self.spv.resolveString("error"),
+ try self.spv.resolveString("payload"),
+ };
// TODO: ABI padding?
} else {
// Put the payload first.
- members.appendAssumeCapacity(.{ .ty = payload_ty_ref, .name = "payload" });
- members.appendAssumeCapacity(.{ .ty = error_ty_ref, .name = "error" });
+ member_types = .{ payload_ty_ref, error_ty_ref };
+ member_names = .{
+ try self.spv.resolveString("payload"),
+ try self.spv.resolveString("error"),
+ };
// TODO: ABI padding?
}
- return try self.spv.simpleStructType(members.slice());
+ return try self.spv.resolve(.{ .struct_type = .{
+ .member_types = &member_types,
+ .member_names = &member_names,
+ } });
},
.Null,
@@ -1382,14 +1407,14 @@ pub const DeclGen = struct {
};
fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout {
- const target = self.getTarget();
+ const mod = self.module;
- const error_align = Type.anyerror.abiAlignment(target);
- const payload_align = payload_ty.abiAlignment(target);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const payload_align = payload_ty.abiAlignment(mod);
const error_first = error_align > payload_align;
return .{
- .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(),
+ .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
.error_first = error_first,
};
}
@@ -1414,17 +1439,13 @@ pub const DeclGen = struct {
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const ptr_anyerror_ty_ref = try self.spv.ptrType(anyerror_ty_ref, .CrossWorkgroup, 0);
+ const ptr_anyerror_ty_ref = try self.spv.ptrType(anyerror_ty_ref, .CrossWorkgroup);
const void_ty_ref = try self.resolveType(Type.void, .direct);
- const kernel_proto_ty_ref = blk: {
- const proto_payload = try self.spv.arena.create(SpvType.Payload.Function);
- proto_payload.* = .{
- .return_type = void_ty_ref,
- .parameters = try self.spv.arena.dupe(SpvType.Ref, &.{ptr_anyerror_ty_ref}),
- };
- break :blk try self.spv.resolveType(SpvType.initPayload(&proto_payload.base));
- };
+ const kernel_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
+ .return_type = void_ty_ref,
+ .parameters = &.{ptr_anyerror_ty_ref},
+ } });
const test_id = self.spv.declPtr(spv_test_decl_index).result_id;
@@ -1470,28 +1491,28 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.module.declPtr(self.decl_index);
+ if (true) @panic("TODO: update SPIR-V backend for InternPool changes");
+ const mod = self.module;
+ const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.resolveDecl(self.decl_index);
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
- log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name });
- if (decl.val.castTag(.function)) |_| {
- assert(decl.ty.zigTypeTag() == .Fn);
+ if (decl.val.getFunction(mod)) |_| {
+ assert(decl.ty.zigTypeTag(mod) == .Fn);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
+ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)),
.id_result = decl_id,
.function_control = .{}, // TODO: We can set inline here if the type requires it.
.function_type = prototype_id,
});
- const params = decl.ty.fnParamLen();
- var i: usize = 0;
+ const fn_info = mod.typeToFunc(decl.ty).?;
- try self.args.ensureUnusedCapacity(self.gpa, params);
- while (i < params) : (i += 1) {
- const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i));
+ try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
+ for (fn_info.param_types) |param_type| {
+ const param_type_id = try self.resolveTypeId(param_type.toType());
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -1517,8 +1538,7 @@ pub const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
- const fqn = try decl.getFullyQualifiedName(self.module);
- defer self.module.gpa.free(fqn);
+ const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module));
try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{
.target = decl_id,
@@ -1530,12 +1550,12 @@ pub const DeclGen = struct {
try self.generateTestEntryPoint(fqn, spv_decl_index);
}
} else {
- const init_val = if (decl.val.castTag(.variable)) |payload|
- payload.data.init
+ const init_val = if (decl.val.getVariable(mod)) |payload|
+ payload.init.toValue()
else
decl.val;
- if (init_val.tag() == .unreachable_value) {
+ if (init_val.ip_index == .unreachable_value) {
return self.todo("importing extern variables", .{});
}
@@ -1558,15 +1578,29 @@ pub const DeclGen = struct {
}
}
+ fn boolToInt(self: *DeclGen, result_ty_ref: CacheRef, condition_id: IdRef) !IdRef {
+ const zero_id = try self.spv.constInt(result_ty_ref, 0);
+ const one_id = try self.spv.constInt(result_ty_ref, 1);
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpSelect, .{
+ .id_result_type = self.typeId(result_ty_ref),
+ .id_result = result_id,
+ .condition = condition_id,
+ .object_1 = one_id,
+ .object_2 = zero_id,
+ });
+ return result_id;
+ }
+
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- // const direct_ty_ref = try self.resolveType(ty, .direct);
- return switch (ty.zigTypeTag()) {
+ const mod = self.module;
+ return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const direct_bool_ty_ref = try self.resolveType(ty, .direct);
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
- const zero_id = try self.constInt(indirect_bool_ty_ref, 0);
+ const zero_id = try self.spv.constInt(indirect_bool_ty_ref, 0);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
.id_result_type = self.typeId(direct_bool_ty_ref),
@@ -1583,20 +1617,11 @@ pub const DeclGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- return switch (ty.zigTypeTag()) {
+ const mod = self.module;
+ return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
- const zero_id = try self.constInt(indirect_bool_ty_ref, 0);
- const one_id = try self.constInt(indirect_bool_ty_ref, 1);
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpSelect, .{
- .id_result_type = self.typeId(indirect_bool_ty_ref),
- .id_result = result_id,
- .condition = operand_id,
- .object_1 = one_id,
- .object_2 = zero_id,
- });
- break :blk result_id;
+ break :blk self.boolToInt(indirect_bool_ty_ref, operand_id);
},
else => operand_id,
};
@@ -1617,11 +1642,12 @@ pub const DeclGen = struct {
}
fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef {
- const value_ty = ptr_ty.childType();
+ const mod = self.module;
+ const value_ty = ptr_ty.childType(mod);
const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
- .Volatile = ptr_ty.isVolatilePtr(),
+ .Volatile = ptr_ty.isVolatilePtr(mod),
};
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = self.typeId(indirect_value_ty_ref),
@@ -1633,10 +1659,11 @@ pub const DeclGen = struct {
}
fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void {
- const value_ty = ptr_ty.childType();
+ const mod = self.module;
+ const value_ty = ptr_ty.childType(mod);
const indirect_value_id = try self.convertToIndirect(value_ty, value_id);
const access = spec.MemoryAccess.Extended{
- .Volatile = ptr_ty.isVolatilePtr(),
+ .Volatile = ptr_ty.isVolatilePtr(mod),
};
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
@@ -1652,10 +1679,11 @@ pub const DeclGen = struct {
}
fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
+ const ip = &mod.intern_pool;
// TODO: remove now-redundant isUnused calls from AIR handler functions
- if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
+ if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
return;
- }
const air_tags = self.air.instructions.items(.tag);
const maybe_result_id: ?IdRef = switch (air_tags[inst]) {
@@ -1680,6 +1708,9 @@ pub const DeclGen = struct {
.shuffle => try self.airShuffle(inst),
+ .ptr_add => try self.airPtrAdd(inst),
+ .ptr_sub => try self.airPtrSub(inst),
+
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
.xor => try self.airBinOpSimple(inst, .OpBitwiseXor),
@@ -1688,8 +1719,11 @@ pub const DeclGen = struct {
.shl => try self.airShift(inst, .OpShiftLeftLogical),
- .bitcast => try self.airBitcast(inst),
- .intcast, .trunc => try self.airIntcast(inst),
+ .bitcast => try self.airBitCast(inst),
+ .intcast, .trunc => try self.airIntCast(inst),
+ .ptrtoint => try self.airPtrToInt(inst),
+ .int_to_float => try self.airIntToFloat(inst),
+ .float_to_int => try self.airFloatToInt(inst),
.not => try self.airNot(inst),
.slice_ptr => try self.airSliceField(inst, 0),
@@ -1697,7 +1731,9 @@ pub const DeclGen = struct {
.slice_elem_ptr => try self.airSliceElemPtr(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
+ .ptr_elem_val => try self.airPtrElemVal(inst),
+ .get_union_tag => try self.airGetUnionTag(inst),
.struct_field_val => try self.airStructFieldVal(inst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
@@ -1705,12 +1741,12 @@ pub const DeclGen = struct {
.struct_field_ptr_index_2 => try self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => try self.airStructFieldPtrIndex(inst, 3),
- .cmp_eq => try self.airCmp(inst, .OpFOrdEqual, .OpLogicalEqual, .OpIEqual),
- .cmp_neq => try self.airCmp(inst, .OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual),
- .cmp_gt => try self.airCmp(inst, .OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan),
- .cmp_gte => try self.airCmp(inst, .OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual),
- .cmp_lt => try self.airCmp(inst, .OpFOrdLessThan, .OpSLessThan, .OpULessThan),
- .cmp_lte => try self.airCmp(inst, .OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual),
+ .cmp_eq => try self.airCmp(inst, .eq),
+ .cmp_neq => try self.airCmp(inst, .neq),
+ .cmp_gt => try self.airCmp(inst, .gt),
+ .cmp_gte => try self.airCmp(inst, .gte),
+ .cmp_lt => try self.airCmp(inst, .lt),
+ .cmp_lte => try self.airCmp(inst, .lte),
.arg => self.airArg(),
.alloc => try self.airAlloc(inst),
@@ -1724,8 +1760,6 @@ pub const DeclGen = struct {
.br => return self.airBr(inst),
.breakpoint => return,
.cond_br => return self.airCondBr(inst),
- .constant => unreachable,
- .const_ty => unreachable,
.dbg_stmt => return self.airDbgStmt(inst),
.loop => return self.airLoop(inst),
.ret => return self.airRet(inst),
@@ -1750,10 +1784,12 @@ pub const DeclGen = struct {
.call_never_tail => try self.airCall(inst, .never_tail),
.call_never_inline => try self.airCall(inst, .never_inline),
- .dbg_var_ptr => return,
- .dbg_var_val => return,
- .dbg_block_begin => return,
- .dbg_block_end => return,
+ .dbg_inline_begin => return,
+ .dbg_inline_end => return,
+ .dbg_var_ptr => return,
+ .dbg_var_val => return,
+ .dbg_block_begin => return,
+ .dbg_block_end => return,
// zig fmt: on
else => |tag| return self.todo("implement AIR tag {s}", .{@tagName(tag)}),
@@ -1769,7 +1805,7 @@ pub const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst));
+ const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst));
try self.func.body.emit(self.spv.gpa, opcode, .{
.id_result_type = result_type_id,
.id_result = result_id,
@@ -1784,7 +1820,7 @@ pub const DeclGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
- const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst));
+ const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst));
// the shift and the base must be the same type in SPIR-V, but in Zig the shift is a smaller int.
const shift_id = self.spv.allocId();
@@ -1804,10 +1840,10 @@ pub const DeclGen = struct {
return result_id;
}
- fn maskStrangeInt(self: *DeclGen, ty_ref: SpvType.Ref, value_id: IdRef, bits: u16) !IdRef {
+ fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef {
const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1;
const result_id = self.spv.allocId();
- const mask_id = try self.constInt(ty_ref, mask_value);
+ const mask_id = try self.spv.constInt(ty_ref, mask_value);
try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
.id_result_type = self.typeId(ty_ref),
.id_result = result_id,
@@ -1829,15 +1865,15 @@ pub const DeclGen = struct {
if (self.liveness.isUnused(inst)) return null;
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
// the result to be the same as the LHS and RHS, which matches SPIR-V.
- const ty = self.air.typeOfIndex(inst);
+ const ty = self.typeOfIndex(inst);
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
var lhs_id = try self.resolve(bin_op.lhs);
var rhs_id = try self.resolve(bin_op.rhs);
const result_ty_ref = try self.resolveType(ty, .direct);
- assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module));
- assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module));
+ assert(self.typeOf(bin_op.lhs).eql(ty, self.module));
+ assert(self.typeOf(bin_op.rhs).eql(ty, self.module));
// Binary operations are generally applicable to both scalar and vector operations
// in SPIR-V, but int and float versions of operations require different opcodes.
@@ -1893,8 +1929,8 @@ pub const DeclGen = struct {
const lhs = try self.resolve(extra.lhs);
const rhs = try self.resolve(extra.rhs);
- const operand_ty = self.air.typeOf(extra.lhs);
- const result_ty = self.air.typeOfIndex(inst);
+ const operand_ty = self.typeOf(extra.lhs);
+ const result_ty = self.typeOfIndex(inst);
const info = try self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
@@ -1904,76 +1940,96 @@ pub const DeclGen = struct {
.float, .bool => unreachable,
}
- // The operand type must be the same as the result type in SPIR-V.
+ // The operand type must be the same as the result type in SPIR-V, which
+ // is the same as in Zig.
const operand_ty_ref = try self.resolveType(operand_ty, .direct);
const operand_ty_id = self.typeId(operand_ty_ref);
- const op_result_id = blk: {
- // Construct the SPIR-V result type.
- // It is almost the same as the zig one, except that the fields must be the same type
- // and they must be unsigned.
- const overflow_result_ty_ref = try self.spv.simpleStructType(&.{
- .{ .ty = operand_ty_ref, .name = "res" },
- .{ .ty = operand_ty_ref, .name = "ov" },
- });
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpIAddCarry, .{
- .id_result_type = self.typeId(overflow_result_ty_ref),
- .id_result = result_id,
- .operand_1 = lhs,
- .operand_2 = rhs,
- });
- break :blk result_id;
- };
-
- // Now convert the SPIR-V flavor result into a Zig-flavor result.
- // First, extract the two fields.
- const unsigned_result = try self.extractField(operand_ty, op_result_id, 0);
- const overflow = try self.extractField(operand_ty, op_result_id, 1);
-
- // We need to convert the results to the types that Zig expects here.
- // The `result` is the same type except unsigned, so we can just bitcast that.
- // TODO: This can be removed in Kernels as there are only unsigned ints. Maybe for
- // shaders as well?
- const result = try self.bitcast(operand_ty_id, unsigned_result);
-
- // The overflow needs to be converted into whatever is used to represent it in Zig.
- const casted_overflow = blk: {
- const ov_ty = result_ty.tupleFields().types[1];
- const ov_ty_id = try self.resolveTypeId(ov_ty);
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
- .id_result_type = ov_ty_id,
- .id_result = result_id,
- .unsigned_value = overflow,
- });
- break :blk result_id;
- };
+ const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- // TODO: If copying this function for borrow, make sure to convert -1 to 1 as appropriate.
+ const ov_ty = result_ty.tupleFields().types[1];
+ // Note: result is stored in a struct, so indirect representation.
+ const ov_ty_ref = try self.resolveType(ov_ty, .indirect);
- // Finally, construct the Zig type.
- // Layout is result, overflow.
- const result_id = self.spv.allocId();
- const constituents = [_]IdRef{ result, casted_overflow };
- try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
+ // TODO: Operations other than addition.
+ const value_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpIAdd, .{
.id_result_type = operand_ty_id,
- .id_result = result_id,
- .constituents = &constituents,
+ .id_result = value_id,
+ .operand_1 = lhs,
+ .operand_2 = rhs,
+ });
+
+ const overflowed_id = switch (info.signedness) {
+ .unsigned => blk: {
+ // Overflow happened if the result is smaller than either of the operands. It doesn't matter which.
+ const overflowed_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpULessThan, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = overflowed_id,
+ .operand_1 = value_id,
+ .operand_2 = lhs,
+ });
+ break :blk overflowed_id;
+ },
+ .signed => blk: {
+ // Overflow happened if:
+ // - rhs is negative and value > lhs
+ // - rhs is positive and value < lhs
+ // This can be shortened to:
+ // (rhs < 0 && value > lhs) || (rhs >= 0 && value <= lhs)
+ // = (rhs < 0) == (value > lhs)
+ // Note that signed overflow is also wrapping in spir-v.
+
+ const rhs_lt_zero_id = self.spv.allocId();
+ const zero_id = try self.spv.constInt(operand_ty_ref, 0);
+ try self.func.body.emit(self.spv.gpa, .OpSLessThan, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = rhs_lt_zero_id,
+ .operand_1 = rhs,
+ .operand_2 = zero_id,
+ });
+
+ const value_gt_lhs_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpSGreaterThan, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = value_gt_lhs_id,
+ .operand_1 = value_id,
+ .operand_2 = lhs,
+ });
+
+ const overflowed_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpLogicalEqual, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = overflowed_id,
+ .operand_1 = rhs_lt_zero_id,
+ .operand_2 = value_gt_lhs_id,
+ });
+ break :blk overflowed_id;
+ },
+ };
+
+ // Construct the struct that Zig wants as result.
+ // The value should already be the correct type.
+ const ov_id = try self.boolToInt(ov_ty_ref, overflowed_id);
+ const result_ty_ref = try self.resolveType(result_ty, .direct);
+ return try self.constructStruct(result_ty_ref, &.{
+ value_id,
+ ov_id,
});
- return result_id;
}
fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
if (self.liveness.isUnused(inst)) return null;
- const ty = self.air.typeOfIndex(inst);
+ const ty = self.typeOfIndex(inst);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolve(extra.a);
const b = try self.resolve(extra.b);
- const mask = self.air.values[extra.mask];
+ const mask = extra.mask.toValue();
const mask_len = extra.mask_len;
- const a_len = self.air.typeOf(extra.a).vectorLen();
+ const a_len = self.typeOf(extra.a).vectorLen(mod);
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(ty);
@@ -1987,12 +2043,11 @@ pub const DeclGen = struct {
var i: usize = 0;
while (i < mask_len) : (i += 1) {
- var buf: Value.ElemValueBuffer = undefined;
- const elem = mask.elemValueBuffer(self.module, i, &buf);
- if (elem.isUndef()) {
+ const elem = try mask.elemValue(mod, i);
+ if (elem.isUndef(mod)) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
- const int = elem.toSignedInt(self.getTarget());
+ const int = elem.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
self.func.body.writeOperand(spec.LiteralInteger, unsigned);
}
@@ -2000,96 +2055,342 @@ pub const DeclGen = struct {
return result_id;
}
- fn airCmp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !?IdRef {
- if (self.liveness.isUnused(inst)) return null;
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- var lhs_id = try self.resolve(bin_op.lhs);
- var rhs_id = try self.resolve(bin_op.rhs);
+ /// AccessChain is essentially PtrAccessChain with 0 as initial argument. The effective
+ /// difference lies in whether the resulting type of the first dereference will be the
+ /// same as that of the base pointer, or that of a dereferenced base pointer. AccessChain
+ /// is the latter and PtrAccessChain is the former.
+ fn accessChain(
+ self: *DeclGen,
+ result_ty_ref: CacheRef,
+ base: IdRef,
+ indexes: []const IdRef,
+ ) !IdRef {
const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(Type.bool);
- const op_ty = self.air.typeOf(bin_op.lhs);
- assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.module));
+ try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{
+ .id_result_type = self.typeId(result_ty_ref),
+ .id_result = result_id,
+ .base = base,
+ .indexes = indexes,
+ });
+ return result_id;
+ }
- // Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
- // but int and float versions of operations require different opcodes.
- const info = try self.arithmeticTypeInfo(op_ty);
+ fn ptrAccessChain(
+ self: *DeclGen,
+ result_ty_ref: CacheRef,
+ base: IdRef,
+ element: IdRef,
+ indexes: []const IdRef,
+ ) !IdRef {
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
+ .id_result_type = self.typeId(result_ty_ref),
+ .id_result = result_id,
+ .base = base,
+ .element = element,
+ .indexes = indexes,
+ });
+ return result_id;
+ }
- const opcode_index: usize = switch (info.class) {
- .composite_integer => {
- return self.todo("binary operations for composite integers", .{});
+ fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
+ const mod = self.module;
+ const result_ty_ref = try self.resolveType(result_ty, .direct);
+
+ switch (ptr_ty.ptrSize(mod)) {
+ .One => {
+ // Pointer to array
+ // TODO: Is this correct?
+ return try self.accessChain(result_ty_ref, ptr_id, &.{offset_id});
},
- .float => 0,
- .bool => 1,
- .strange_integer => blk: {
- const op_ty_ref = try self.resolveType(op_ty, .direct);
- lhs_id = try self.maskStrangeInt(op_ty_ref, lhs_id, info.bits);
- rhs_id = try self.maskStrangeInt(op_ty_ref, rhs_id, info.bits);
- break :blk switch (info.signedness) {
- .signed => @as(usize, 1),
- .unsigned => @as(usize, 2),
- };
+ .C, .Many => {
+ return try self.ptrAccessChain(result_ty_ref, ptr_id, offset_id, &.{});
},
- .integer => switch (info.signedness) {
- .signed => @as(usize, 1),
- .unsigned => @as(usize, 2),
+ .Slice => {
+ // TODO: This is probably incorrect. A slice should be returned here, though this is what llvm does.
+ const slice_ptr_id = try self.extractField(result_ty, ptr_id, 0);
+ return try self.ptrAccessChain(result_ty_ref, slice_ptr_id, offset_id, &.{});
},
- };
+ }
+ }
- const operands = .{
- .id_result_type = result_type_id,
- .id_result = result_id,
- .operand_1 = lhs_id,
- .operand_2 = rhs_id,
+ fn airPtrAdd(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr_id = try self.resolve(bin_op.lhs);
+ const offset_id = try self.resolve(bin_op.rhs);
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const result_ty = self.typeOfIndex(inst);
+
+ return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id);
+ }
+
+ fn airPtrSub(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr_id = try self.resolve(bin_op.lhs);
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const offset_id = try self.resolve(bin_op.rhs);
+ const offset_ty = self.typeOf(bin_op.rhs);
+ const offset_ty_ref = try self.resolveType(offset_ty, .direct);
+ const result_ty = self.typeOfIndex(inst);
+
+ const negative_offset_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpSNegate, .{
+ .id_result_type = self.typeId(offset_ty_ref),
+ .id_result = negative_offset_id,
+ .operand = offset_id,
+ });
+ return try self.ptrAdd(result_ty, ptr_ty, ptr_id, negative_offset_id);
+ }
+
+ fn cmp(
+ self: *DeclGen,
+ comptime op: std.math.CompareOperator,
+ bool_ty_id: IdRef,
+ ty: Type,
+ lhs_id: IdRef,
+ rhs_id: IdRef,
+ ) !IdRef {
+ const mod = self.module;
+ var cmp_lhs_id = lhs_id;
+ var cmp_rhs_id = rhs_id;
+ const opcode: Opcode = opcode: {
+ const op_ty = switch (ty.zigTypeTag(mod)) {
+ .Int, .Bool, .Float => ty,
+ .Enum => ty.intTagType(),
+ .ErrorSet => Type.u16,
+ .Pointer => blk: {
+ // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
+ // currently not implemented in the SPIR-V LLVM translator. Thus, we emit these using
+ // OpConvertPtrToU...
+ cmp_lhs_id = self.spv.allocId();
+ cmp_rhs_id = self.spv.allocId();
+
+ const usize_ty_id = self.typeId(try self.sizeType());
+
+ try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = cmp_lhs_id,
+ .pointer = lhs_id,
+ });
+
+ try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
+ .id_result_type = usize_ty_id,
+ .id_result = cmp_rhs_id,
+ .pointer = rhs_id,
+ });
+
+ break :blk Type.usize;
+ },
+ .Optional => unreachable, // TODO
+ else => unreachable,
+ };
+
+ const info = try self.arithmeticTypeInfo(op_ty);
+ const signedness = switch (info.class) {
+ .composite_integer => {
+ return self.todo("binary operations for composite integers", .{});
+ },
+ .float => break :opcode switch (op) {
+ .eq => .OpFOrdEqual,
+ .neq => .OpFOrdNotEqual,
+ .lt => .OpFOrdLessThan,
+ .lte => .OpFOrdLessThanEqual,
+ .gt => .OpFOrdGreaterThan,
+ .gte => .OpFOrdGreaterThanEqual,
+ },
+ .bool => break :opcode switch (op) {
+ .eq => .OpIEqual,
+ .neq => .OpINotEqual,
+ else => unreachable,
+ },
+ .strange_integer => sign: {
+ const op_ty_ref = try self.resolveType(op_ty, .direct);
+ // Mask operands before performing comparison.
+ cmp_lhs_id = try self.maskStrangeInt(op_ty_ref, cmp_lhs_id, info.bits);
+ cmp_rhs_id = try self.maskStrangeInt(op_ty_ref, cmp_rhs_id, info.bits);
+ break :sign info.signedness;
+ },
+ .integer => info.signedness,
+ };
+
+ break :opcode switch (signedness) {
+ .unsigned => switch (op) {
+ .eq => .OpIEqual,
+ .neq => .OpINotEqual,
+ .lt => .OpULessThan,
+ .lte => .OpULessThanEqual,
+ .gt => .OpUGreaterThan,
+ .gte => .OpUGreaterThanEqual,
+ },
+ .signed => switch (op) {
+ .eq => .OpIEqual,
+ .neq => .OpINotEqual,
+ .lt => .OpSLessThan,
+ .lte => .OpSLessThanEqual,
+ .gt => .OpSGreaterThan,
+ .gte => .OpSGreaterThanEqual,
+ },
+ };
};
- switch (opcode_index) {
- 0 => try self.func.body.emit(self.spv.gpa, fop, operands),
- 1 => try self.func.body.emit(self.spv.gpa, sop, operands),
- 2 => try self.func.body.emit(self.spv.gpa, uop, operands),
- else => unreachable,
+ const result_id = self.spv.allocId();
+ try self.func.body.emitRaw(self.spv.gpa, opcode, 4);
+ self.func.body.writeOperand(spec.IdResultType, bool_ty_id);
+ self.func.body.writeOperand(spec.IdResult, result_id);
+ self.func.body.writeOperand(spec.IdResultType, cmp_lhs_id);
+ self.func.body.writeOperand(spec.IdResultType, cmp_rhs_id);
+ return result_id;
+ }
+
+ fn airCmp(
+ self: *DeclGen,
+ inst: Air.Inst.Index,
+ comptime op: std.math.CompareOperator,
+ ) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs_id = try self.resolve(bin_op.lhs);
+ const rhs_id = try self.resolve(bin_op.rhs);
+ const bool_ty_id = try self.resolveTypeId(Type.bool);
+ const ty = self.typeOf(bin_op.lhs);
+ assert(ty.eql(self.typeOf(bin_op.rhs), self.module));
+
+ return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id);
+ }
+
+ fn bitCast(
+ self: *DeclGen,
+ dst_ty: Type,
+ src_ty: Type,
+ src_id: IdRef,
+ ) !IdRef {
+ const mod = self.module;
+ const dst_ty_ref = try self.resolveType(dst_ty, .direct);
+ const result_id = self.spv.allocId();
+
+ // TODO: Some more cases are missing here
+ // See fn bitCast in llvm.zig
+
+ if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
+ try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
+ .id_result_type = self.typeId(dst_ty_ref),
+ .id_result = result_id,
+ .integer_value = src_id,
+ });
+ } else {
+ try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
+ .id_result_type = self.typeId(dst_ty_ref),
+ .id_result = result_id,
+ .operand = src_id,
+ });
}
+ return result_id;
+ }
+
+ fn airBitCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
+ const operand_ty = self.typeOf(ty_op.operand);
+ const result_ty = self.typeOfIndex(inst);
+ return try self.bitCast(result_ty, operand_ty, operand_id);
+ }
+
+ fn airIntCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_id = try self.resolve(ty_op.operand);
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_ty_id = try self.resolveTypeId(dest_ty);
+
+ const mod = self.module;
+ const dest_info = dest_ty.intInfo(mod);
+ // TODO: Masking?
+
+ const result_id = self.spv.allocId();
+ switch (dest_info.signedness) {
+ .signed => try self.func.body.emit(self.spv.gpa, .OpSConvert, .{
+ .id_result_type = dest_ty_id,
+ .id_result = result_id,
+ .signed_value = operand_id,
+ }),
+ .unsigned => try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
+ .id_result_type = dest_ty_id,
+ .id_result = result_id,
+ .unsigned_value = operand_id,
+ }),
+ }
return result_id;
}
- fn bitcast(self: *DeclGen, target_type_id: IdResultType, value_id: IdRef) !IdRef {
+ fn airPtrToInt(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const operand_id = try self.resolve(un_op);
+ const result_type_id = try self.resolveTypeId(Type.usize);
+
const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = target_type_id,
+ try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
+ .id_result_type = result_type_id,
.id_result = result_id,
- .operand = value_id,
+ .pointer = operand_id,
});
return result_id;
}
- fn airBitcast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airIntToFloat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand_ty = self.typeOf(ty_op.operand);
const operand_id = try self.resolve(ty_op.operand);
- const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst));
- return try self.bitcast(result_type_id, operand_id);
+ const operand_info = try self.arithmeticTypeInfo(operand_ty);
+ const dest_ty = self.typeOfIndex(inst);
+ const dest_ty_id = try self.resolveTypeId(dest_ty);
+
+ const result_id = self.spv.allocId();
+ switch (operand_info.signedness) {
+ .signed => try self.func.body.emit(self.spv.gpa, .OpConvertSToF, .{
+ .id_result_type = dest_ty_id,
+ .id_result = result_id,
+ .signed_value = operand_id,
+ }),
+ .unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertUToF, .{
+ .id_result_type = dest_ty_id,
+ .id_result = result_id,
+ .unsigned_value = operand_id,
+ }),
+ }
+ return result_id;
}
- fn airIntcast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ fn airFloatToInt(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
- const dest_ty = self.air.typeOfIndex(inst);
+ const dest_ty = self.typeOfIndex(inst);
const dest_info = try self.arithmeticTypeInfo(dest_ty);
const dest_ty_id = try self.resolveTypeId(dest_ty);
const result_id = self.spv.allocId();
switch (dest_info.signedness) {
- .signed => try self.func.body.emit(self.spv.gpa, .OpSConvert, .{
+ .signed => try self.func.body.emit(self.spv.gpa, .OpConvertFToS, .{
.id_result_type = dest_ty_id,
.id_result = result_id,
- .signed_value = operand_id,
+ .float_value = operand_id,
}),
- .unsigned => try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
+ .unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertFToU, .{
.id_result_type = dest_ty_id,
.id_result = result_id,
- .unsigned_value = operand_id,
+ .float_value = operand_id,
}),
}
return result_id;
@@ -2112,133 +2413,129 @@ pub const DeclGen = struct {
fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const field_ty = self.air.typeOfIndex(inst);
+ const field_ty = self.typeOfIndex(inst);
const operand_id = try self.resolve(ty_op.operand);
- return try self.extractField(
- field_ty,
- operand_id,
- field,
- );
+ return try self.extractField(field_ty, operand_id, field);
}
fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs);
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const slice = try self.resolve(bin_op.lhs);
- const index = try self.resolve(bin_op.rhs);
+ const slice_ty = self.typeOf(bin_op.lhs);
+ if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
- const spv_ptr_ty = try self.resolveTypeId(self.air.typeOfIndex(inst));
+ const slice_id = try self.resolve(bin_op.lhs);
+ const index_id = try self.resolve(bin_op.rhs);
- const slice_ptr = blk: {
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{
- .id_result_type = spv_ptr_ty,
- .id_result = result_id,
- .composite = slice,
- .indexes = &.{0},
- });
- break :blk result_id;
- };
+ const ptr_ty = self.typeOfIndex(inst);
+ const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
- .id_result_type = spv_ptr_ty,
- .id_result = result_id,
- .base = slice_ptr,
- .element = index,
- });
- return result_id;
+ const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
+ return try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
}
fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs);
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
+ const slice_ty = self.typeOf(bin_op.lhs);
+ if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
- const slice = try self.resolve(bin_op.lhs);
- const index = try self.resolve(bin_op.rhs);
+ const slice_id = try self.resolve(bin_op.lhs);
+ const index_id = try self.resolve(bin_op.rhs);
var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty_id = try self.resolveTypeId(slice_ty.slicePtrFieldType(&slice_buf));
-
- const slice_ptr = blk: {
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{
- .id_result_type = ptr_ty_id,
- .id_result = result_id,
- .composite = slice,
- .indexes = &.{0},
- });
- break :blk result_id;
- };
-
- const elem_ptr = blk: {
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
- .id_result_type = ptr_ty_id,
- .id_result = result_id,
- .base = slice_ptr,
- .element = index,
- });
- break :blk result_id;
- };
+ const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf, mod);
+ const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
+ const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
+ const elem_ptr = try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
return try self.load(slice_ty, elem_ptr);
}
+ fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
+ const mod = self.module;
+ // Construct new pointer type for the resulting pointer
+ const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
+ const elem_ty_ref = try self.resolveType(elem_ty, .direct);
+ const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
+ if (ptr_ty.isSinglePointer(mod)) {
+ // Pointer-to-array. In this case, the resulting pointer is not of the same type
+ // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
+ return try self.accessChain(elem_ptr_ty_ref, ptr_id, &.{index_id});
+ } else {
+ // Resulting pointer type is the same as the ptr_ty, so use ptrAccessChain
+ return try self.ptrAccessChain(elem_ptr_ty_ref, ptr_id, index_id, &.{});
+ }
+ }
+
fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const ptr_ty = self.air.typeOf(bin_op.lhs);
- const result_ty = self.air.typeOfIndex(inst);
- const elem_ty = ptr_ty.childType();
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const elem_ty = ptr_ty.childType(mod);
// TODO: Make this return a null ptr or something
- if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- const result_type_id = try self.resolveTypeId(result_ty);
- const base_ptr = try self.resolve(bin_op.lhs);
- const rhs = try self.resolve(bin_op.rhs);
+ const ptr_id = try self.resolve(bin_op.lhs);
+ const index_id = try self.resolve(bin_op.rhs);
+ return try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
+ }
- const result_id = self.spv.allocId();
- const indexes = [_]IdRef{rhs};
- try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{
- .id_result_type = result_type_id,
- .id_result = result_id,
- .base = base_ptr,
- .indexes = &indexes,
- });
- return result_id;
+ fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const ptr_ty = self.typeOf(bin_op.lhs);
+ const ptr_id = try self.resolve(bin_op.lhs);
+ const index_id = try self.resolve(bin_op.rhs);
+
+ const elem_ptr_id = try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
+
+ // If we have a pointer-to-array, construct an element pointer to use with load()
+ // If we pass ptr_ty directly, it will attempt to load the entire array rather than
+ // just an element.
+ var elem_ptr_info = ptr_ty.ptrInfo(mod);
+ elem_ptr_info.size = .One;
+ const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info);
+
+ return try self.load(elem_ptr_ty, elem_ptr_id);
+ }
+
+ fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const un_ty = self.typeOf(ty_op.operand);
+
+ const mod = self.module;
+ const layout = un_ty.unionGetLayout(mod);
+ if (layout.tag_size == 0) return null;
+
+ const union_handle = try self.resolve(ty_op.operand);
+ if (layout.payload_size == 0) return union_handle;
+
+ const tag_ty = un_ty.unionTagTypeSafety().?;
+ const tag_index = @boolToInt(layout.tag_align < layout.payload_align);
+ return try self.extractField(tag_ty, union_handle, tag_index);
}
fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
- const struct_ty = self.air.typeOf(struct_field.struct_operand);
- const object = try self.resolve(struct_field.struct_operand);
+ const struct_ty = self.typeOf(struct_field.struct_operand);
+ const object_id = try self.resolve(struct_field.struct_operand);
const field_index = struct_field.field_index;
- const field_ty = struct_ty.structFieldType(field_index);
- const field_ty_id = try self.resolveTypeId(field_ty);
+ const field_ty = struct_ty.structFieldType(field_index, mod);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet.
+ assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet.
- const result_id = self.spv.allocId();
- const indexes = [_]u32{field_index};
- try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{
- .id_result_type = field_ty_id,
- .id_result = result_id,
- .composite = object,
- .indexes = &indexes,
- });
- return result_id;
+ return try self.extractField(field_ty, object_id, field_index);
}
fn structFieldPtr(
@@ -2248,24 +2545,16 @@ pub const DeclGen = struct {
object_ptr: IdRef,
field_index: u32,
) !?IdRef {
- const object_ty = object_ptr_ty.childType();
- switch (object_ty.zigTypeTag()) {
- .Struct => switch (object_ty.containerLayout()) {
+ const mod = self.module;
+ const object_ty = object_ptr_ty.childType(mod);
+ switch (object_ty.zigTypeTag(mod)) {
+ .Struct => switch (object_ty.containerLayout(mod)) {
.Packed => unreachable, // TODO
else => {
- const u32_ty_id = self.typeId(try self.intType(.unsigned, 32));
- const field_index_id = self.spv.allocId();
- try self.spv.emitConstant(u32_ty_id, field_index_id, .{ .uint32 = field_index });
- const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(result_ptr_ty);
- const indexes = [_]IdRef{field_index_id};
- try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{
- .id_result_type = result_type_id,
- .id_result = result_id,
- .base = object_ptr,
- .indexes = &indexes,
- });
- return result_id;
+ const field_index_ty_ref = try self.intType(.unsigned, 32);
+ const field_index_id = try self.spv.constInt(field_index_ty_ref, field_index);
+ const result_ty_ref = try self.resolveType(result_ptr_ty, .direct);
+ return try self.accessChain(result_ty_ref, object_ptr, &.{field_index_id});
},
},
else => unreachable, // TODO
@@ -2276,8 +2565,8 @@ pub const DeclGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try self.resolve(ty_op.operand);
- const struct_ptr_ty = self.air.typeOf(ty_op.operand);
- const result_ptr_ty = self.air.typeOfIndex(inst);
+ const struct_ptr_ty = self.typeOf(ty_op.operand);
+ const result_ptr_ty = self.typeOfIndex(inst);
return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index);
}
@@ -2287,7 +2576,7 @@ pub const DeclGen = struct {
fn makePointerConstant(
self: *DeclGen,
section: *SpvSection,
- ptr_ty_ref: SpvType.Ref,
+ ptr_ty_ref: CacheRef,
ptr_id: IdRef,
) !IdRef {
const result_id = self.spv.allocId();
@@ -2299,76 +2588,46 @@ pub const DeclGen = struct {
return result_id;
}
- fn variable(
+ // Allocate a function-local variable, with possible initializer.
+ // This function returns a pointer to a variable of type `ty_ref`,
+ // which is in the Generic address space. The variable is actually
+ // placed in the Function address space.
+ fn alloc(
self: *DeclGen,
- comptime context: enum { function, global },
- result_id: IdRef,
- ptr_ty_ref: SpvType.Ref,
+ ty_ref: CacheRef,
initializer: ?IdRef,
- ) !void {
- const storage_class = self.spv.typeRefType(ptr_ty_ref).payload(.pointer).storage_class;
- const actual_storage_class = switch (storage_class) {
- .Generic => switch (context) {
- .function => .Function,
- .global => .CrossWorkgroup,
- },
- else => storage_class,
- };
- const actual_ptr_ty_ref = switch (storage_class) {
- .Generic => try self.spv.changePtrStorageClass(ptr_ty_ref, actual_storage_class),
- else => ptr_ty_ref,
- };
- const alloc_result_id = switch (storage_class) {
- .Generic => self.spv.allocId(),
- else => result_id,
- };
+ ) !IdRef {
+ const fn_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Function);
+ const general_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic);
- const section = switch (actual_storage_class) {
- .Generic => unreachable,
- // SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
- // directly generate them into func.prologue instead of the body.
- .Function => &self.func.prologue,
- else => &self.spv.sections.types_globals_constants,
- };
- try section.emit(self.spv.gpa, .OpVariable, .{
- .id_result_type = self.typeId(actual_ptr_ty_ref),
- .id_result = alloc_result_id,
- .storage_class = actual_storage_class,
+ // SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
+ // directly generate them into func.prologue instead of the body.
+ const var_id = self.spv.allocId();
+ try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
+ .id_result_type = self.typeId(fn_ptr_ty_ref),
+ .id_result = var_id,
+ .storage_class = .Function,
.initializer = initializer,
});
- if (storage_class != .Generic) {
- return;
- }
-
- // Now we need to convert the pointer.
- // If this is a function local, we need to perform the conversion at runtime. Otherwise, we can do
- // it ahead of time using OpSpecConstantOp.
- switch (actual_storage_class) {
- .Function => try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
- .id_result_type = self.typeId(ptr_ty_ref),
- .id_result = result_id,
- .pointer = alloc_result_id,
- }),
- // TODO: Can we do without this cast or move it to runtime?
- else => {
- const const_ptr_id = try self.makePointerConstant(section, actual_ptr_ty_ref, alloc_result_id);
- try section.emitSpecConstantOp(self.spv.gpa, .OpPtrCastToGeneric, .{
- .id_result_type = self.typeId(ptr_ty_ref),
- .id_result = result_id,
- .pointer = const_ptr_id,
- });
- },
- }
+ // Convert to a generic pointer
+ const result_id = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
+ .id_result_type = self.typeId(general_ptr_ty_ref),
+ .id_result = result_id,
+ .pointer = var_id,
+ });
+ return result_id;
}
fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
- const ty = self.air.typeOfIndex(inst);
- const result_ty_ref = try self.resolveType(ty, .direct);
- const result_id = self.spv.allocId();
- try self.variable(.function, result_id, result_ty_ref, null);
- return result_id;
+ const mod = self.module;
+ const ptr_ty = self.typeOfIndex(inst);
+ assert(ptr_ty.ptrAddressSpace(mod) == .generic);
+ const child_ty = ptr_ty.childType(mod);
+ const child_ty_ref = try self.resolveType(child_ty, .indirect);
+ return try self.alloc(child_ty_ref, null);
}
fn airArg(self: *DeclGen) IdRef {
@@ -2382,6 +2641,7 @@ pub const DeclGen = struct {
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
// ir.Block in a different SPIR-V block.
+ const mod = self.module;
const label_id = self.spv.allocId();
// 4 chosen as arbitrary initial capacity.
@@ -2396,7 +2656,7 @@ pub const DeclGen = struct {
incoming_blocks.deinit(self.gpa);
}
- const ty = self.air.typeOfIndex(inst);
+ const ty = self.typeOfIndex(inst);
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
@@ -2405,7 +2665,7 @@ pub const DeclGen = struct {
try self.beginSpvBlock(label_id);
// If this block didn't produce a value, simply return here.
- if (!ty.hasRuntimeBitsIgnoreComptime())
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
// Combine the result from the blocks using the Phi instruction.
@@ -2429,9 +2689,10 @@ pub const DeclGen = struct {
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
const br = self.air.instructions.items(.data)[inst].br;
const block = self.blocks.get(br.block_inst).?;
- const operand_ty = self.air.typeOf(br.operand);
+ const operand_ty = self.typeOf(br.operand);
- if (operand_ty.hasRuntimeBits()) {
+ const mod = self.module;
+ if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
@@ -2468,7 +2729,10 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
+ const src_fname_id = try self.spv.resolveSourceFileName(
+ self.module,
+ self.module.declPtr(self.decl_index),
+ );
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@@ -2477,24 +2741,26 @@ pub const DeclGen = struct {
}
fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const ptr_ty = self.air.typeOf(ty_op.operand);
+ const ptr_ty = self.typeOf(ty_op.operand);
const operand = try self.resolve(ty_op.operand);
- if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
+ if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
return try self.load(ptr_ty, operand);
}
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs);
+ const ptr_ty = self.typeOf(bin_op.lhs);
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
- const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
- const undef = try self.constUndef(ptr_ty_ref);
+ const undef = try self.spv.constUndef(ptr_ty_ref);
try self.store(ptr_ty, ptr, undef);
} else {
try self.store(ptr_ty, ptr, value);
@@ -2519,8 +2785,9 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
- const operand_ty = self.air.typeOf(operand);
- if (operand_ty.hasRuntimeBits()) {
+ const operand_ty = self.typeOf(operand);
+ const mod = self.module;
+ if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(operand);
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
} else {
@@ -2529,11 +2796,12 @@ pub const DeclGen = struct {
}
fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
- const ptr_ty = self.air.typeOf(un_op);
- const ret_ty = ptr_ty.childType();
+ const ptr_ty = self.typeOf(un_op);
+ const ret_ty = ptr_ty.childType(mod);
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
return;
}
@@ -2546,26 +2814,27 @@ pub const DeclGen = struct {
}
fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const err_union_id = try self.resolve(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = self.air.typeOf(pl_op.operand);
- const payload_ty = self.air.typeOfIndex(inst);
+ const err_union_ty = self.typeOf(pl_op.operand);
+ const payload_ty = self.typeOfIndex(inst);
const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
const eu_layout = self.errorUnionLayout(payload_ty);
- if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
const err_id = if (eu_layout.payload_has_bits)
try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex())
else
err_union_id;
- const zero_id = try self.constInt(err_ty_ref, 0);
+ const zero_id = try self.spv.constInt(err_ty_ref, 0);
const is_err_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
.id_result_type = self.typeId(bool_ty_ref),
@@ -2607,17 +2876,18 @@ pub const DeclGen = struct {
fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
- const err_union_ty = self.air.typeOf(ty_op.operand);
+ const err_union_ty = self.typeOf(ty_op.operand);
const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
- if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
+ if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
// No error possible, so just return undefined.
- return try self.constUndef(err_ty_ref);
+ return try self.spv.constUndef(err_ty_ref);
}
- const payload_ty = err_union_ty.errorUnionPayload();
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
@@ -2631,9 +2901,10 @@ pub const DeclGen = struct {
fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const err_union_ty = self.air.typeOfIndex(inst);
- const payload_ty = err_union_ty.errorUnionPayload();
+ const err_union_ty = self.typeOfIndex(inst);
+ const payload_ty = err_union_ty.errorUnionPayload(mod);
const operand_id = try self.resolve(ty_op.operand);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -2643,7 +2914,7 @@ pub const DeclGen = struct {
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
var members = std.BoundedArray(IdRef, 2){};
- const payload_id = try self.constUndef(payload_ty_ref);
+ const payload_id = try self.spv.constUndef(payload_ty_ref);
if (eu_layout.error_first) {
members.appendAssumeCapacity(operand_id);
members.appendAssumeCapacity(payload_id);
@@ -2655,43 +2926,36 @@ pub const DeclGen = struct {
}
const err_union_ty_ref = try self.resolveType(err_union_ty, .direct);
- const result_id = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
- .id_result_type = self.typeId(err_union_ty_ref),
- .id_result = result_id,
- .constituents = members.slice(),
- });
- return result_id;
+ return try self.constructStruct(err_union_ty_ref, members.slice());
}
fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand_id = try self.resolve(un_op);
- const optional_ty = self.air.typeOf(un_op);
+ const optional_ty = self.typeOf(un_op);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const payload_ty = optional_ty.optionalChild(mod);
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// Pointer payload represents nullability: pointer or slice.
- var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_ty = if (payload_ty.isSlice())
- payload_ty.slicePtrFieldType(&ptr_buf)
+ const ptr_ty = if (payload_ty.isSlice(mod))
+ payload_ty.slicePtrFieldType(mod)
else
payload_ty;
- const ptr_id = if (payload_ty.isSlice())
+ const ptr_id = if (payload_ty.isSlice(mod))
try self.extractField(Type.bool, operand_id, 0)
else
operand_id;
const payload_ty_ref = try self.resolveType(ptr_ty, .direct);
- const null_id = try self.constNull(payload_ty_ref);
+ const null_id = try self.spv.constNull(payload_ty_ref);
const result_id = self.spv.allocId();
const operands = .{
.id_result_type = self.typeId(bool_ty_ref),
@@ -2706,7 +2970,7 @@ pub const DeclGen = struct {
return result_id;
}
- const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime())
+ const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
@@ -2730,14 +2994,15 @@ pub const DeclGen = struct {
fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
- const optional_ty = self.air.typeOf(ty_op.operand);
- const payload_ty = self.air.typeOfIndex(inst);
+ const optional_ty = self.typeOf(ty_op.operand);
+ const payload_ty = self.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
@@ -2747,55 +3012,49 @@ pub const DeclGen = struct {
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const payload_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try self.constBool(true, .direct);
}
const operand_id = try self.resolve(ty_op.operand);
- const optional_ty = self.air.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload()) {
+ const optional_ty = self.typeOfIndex(inst);
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
const optional_ty_ref = try self.resolveType(optional_ty, .direct);
- const result_id = self.spv.allocId();
const members = [_]IdRef{ operand_id, try self.constBool(true, .indirect) };
- try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
- .id_result_type = self.typeId(optional_ty_ref),
- .id_result = result_id,
- .constituents = &members,
- });
- return result_id;
+ return try self.constructStruct(optional_ty_ref, &members);
}
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
- const target = self.getTarget();
+ const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolve(pl_op.operand);
- const cond_ty = self.air.typeOf(pl_op.operand);
+ const cond_ty = self.typeOf(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
- const cond_words: u32 = switch (cond_ty.zigTypeTag()) {
+ const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
.Int => blk: {
- const bits = cond_ty.intInfo(target).bits;
+ const bits = cond_ty.intInfo(mod).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
.Enum => blk: {
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = cond_ty.intTagType(&buffer);
- const int_info = int_ty.intInfo(target);
+ const int_ty = cond_ty.intTagType(mod);
+ const int_info = int_ty.intInfo(mod);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
- else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
+ else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
};
const num_cases = switch_br.data.cases_len;
@@ -2839,15 +3098,14 @@ pub const DeclGen = struct {
const label = IdRef{ .id = first_case_label.id + case_i };
for (items) |item| {
- const value = self.air.value(item) orelse {
+ const value = (try self.air.value(item, mod)) orelse {
return self.todo("switch on runtime value???", .{});
};
- const int_val = switch (cond_ty.zigTypeTag()) {
- .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target),
+ const int_val = switch (cond_ty.zigTypeTag(mod)) {
+ .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod),
.Enum => blk: {
- var int_buffer: Value.Payload.U64 = undefined;
// TODO: figure out of cond_ty is correct (something with enum literals)
- break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants
+ break :blk (try value.enumToInt(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
},
else => unreachable,
};
@@ -2891,6 +3149,7 @@ pub const DeclGen = struct {
}
fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
@@ -2973,7 +3232,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
- const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
+ const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index), mod);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
@@ -3021,19 +3280,20 @@ pub const DeclGen = struct {
fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
_ = modifier;
+ const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
- const callee_ty = self.air.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
+ const callee_ty = self.typeOf(pl_op.operand);
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
.Pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
};
- const fn_info = zig_fn_ty.fnInfo();
+ const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_id = try self.resolveTypeId(return_type);
+ const result_type_id = try self.resolveTypeId(return_type.toType());
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -3046,8 +3306,8 @@ pub const DeclGen = struct {
// before starting to emit OpFunctionCall instructions. Hence the
// temporary params buffer.
const arg_id = try self.resolve(arg);
- const arg_ty = self.air.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ const arg_ty = self.typeOf(arg);
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
params[n_params] = arg_id;
n_params += 1;
@@ -3060,14 +3320,24 @@ pub const DeclGen = struct {
.id_ref_3 = params[0..n_params],
});
- if (return_type.isNoReturn()) {
+ if (return_type == .noreturn_type) {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
return result_id;
}
+
+ fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type {
+ const mod = self.module;
+ return self.air.typeOf(inst, &mod.intern_pool);
+ }
+
+ fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type {
+ const mod = self.module;
+ return self.air.typeOfIndex(inst, &mod.intern_pool);
+ }
};
diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig
index eebf43866d..c7848bbc92 100644
--- a/src/codegen/spirv/Assembler.zig
+++ b/src/codegen/spirv/Assembler.zig
@@ -11,7 +11,8 @@ const IdRef = spec.IdRef;
const IdResult = spec.IdResult;
const SpvModule = @import("Module.zig");
-const SpvType = @import("type.zig").Type;
+const CacheRef = SpvModule.CacheRef;
+const CacheKey = SpvModule.CacheKey;
/// Represents a token in the assembly template.
const Token = struct {
@@ -126,7 +127,7 @@ const AsmValue = union(enum) {
value: IdRef,
/// This result-value represents a type registered into the module's type system.
- ty: SpvType.Ref,
+ ty: CacheRef,
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
/// is of a variant that allows the result to be obtained (not an unresolved
@@ -135,7 +136,7 @@ const AsmValue = union(enum) {
return switch (self) {
.just_declared, .unresolved_forward_reference => unreachable,
.value => |result| result,
- .ty => |ref| spv.typeId(ref),
+ .ty => |ref| spv.resultId(ref),
};
}
};
@@ -267,9 +268,9 @@ fn processInstruction(self: *Assembler) !void {
/// refers to the result.
fn processTypeInstruction(self: *Assembler) !AsmValue {
const operands = self.inst.operands.items;
- const ty = switch (self.inst.opcode) {
- .OpTypeVoid => SpvType.initTag(.void),
- .OpTypeBool => SpvType.initTag(.bool),
+ const ref = switch (self.inst.opcode) {
+ .OpTypeVoid => try self.spv.resolve(.void_type),
+ .OpTypeBool => try self.spv.resolve(.bool_type),
.OpTypeInt => blk: {
const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
0 => .unsigned,
@@ -282,7 +283,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
const width = std.math.cast(u16, operands[1].literal32) orelse {
return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
};
- break :blk try SpvType.int(self.spv.arena, signedness, width);
+ break :blk try self.spv.intType(signedness, width);
},
.OpTypeFloat => blk: {
const bits = operands[1].literal32;
@@ -292,136 +293,36 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
},
}
- break :blk SpvType.float(@intCast(u16, bits));
- },
- .OpTypeVector => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Vector);
- payload.* = .{
- .component_type = try self.resolveTypeRef(operands[1].ref_id),
- .component_count = operands[2].literal32,
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypeMatrix => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Matrix);
- payload.* = .{
- .column_type = try self.resolveTypeRef(operands[1].ref_id),
- .column_count = operands[2].literal32,
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypeImage => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Image);
- payload.* = .{
- .sampled_type = try self.resolveTypeRef(operands[1].ref_id),
- .dim = @intToEnum(spec.Dim, operands[2].value),
- .depth = switch (operands[3].literal32) {
- 0 => .no,
- 1 => .yes,
- 2 => .maybe,
- else => {
- return self.fail(0, "'{}' is not a valid image depth (expected 0, 1 or 2)", .{operands[3].literal32});
- },
- },
- .arrayed = switch (operands[4].literal32) {
- 0 => false,
- 1 => true,
- else => {
- return self.fail(0, "'{}' is not a valid image arrayed-ness (expected 0 or 1)", .{operands[4].literal32});
- },
- },
- .multisampled = switch (operands[5].literal32) {
- 0 => false,
- 1 => true,
- else => {
- return self.fail(0, "'{}' is not a valid image multisampled-ness (expected 0 or 1)", .{operands[5].literal32});
- },
- },
- .sampled = switch (operands[6].literal32) {
- 0 => .known_at_runtime,
- 1 => .with_sampler,
- 2 => .without_sampler,
- else => {
- return self.fail(0, "'{}' is not a valid image sampled-ness (expected 0, 1 or 2)", .{operands[6].literal32});
- },
- },
- .format = @intToEnum(spec.ImageFormat, operands[7].value),
- .access_qualifier = if (operands.len > 8)
- @intToEnum(spec.AccessQualifier, operands[8].value)
- else
- null,
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypeSampler => SpvType.initTag(.sampler),
- .OpTypeSampledImage => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.SampledImage);
- payload.* = .{
- .image_type = try self.resolveTypeRef(operands[1].ref_id),
- };
- break :blk SpvType.initPayload(&payload.base);
+ break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } });
},
+ .OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
+ .component_type = try self.resolveTypeRef(operands[1].ref_id),
+ .component_count = operands[2].literal32,
+ } }),
.OpTypeArray => {
// TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant),
// and so some consideration must be taken when entering this in the type system.
return self.todo("process OpTypeArray", .{});
},
- .OpTypeRuntimeArray => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.RuntimeArray);
- payload.* = .{
- .element_type = try self.resolveTypeRef(operands[1].ref_id),
- // TODO: Fetch array stride from decorations.
- .array_stride = 0,
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypeOpaque => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Opaque);
- const name_offset = operands[1].string;
- payload.* = .{
- .name = std.mem.sliceTo(self.inst.string_bytes.items[name_offset..], 0),
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypePointer => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Pointer);
- payload.* = .{
- .storage_class = @intToEnum(spec.StorageClass, operands[1].value),
- .child_type = try self.resolveTypeRef(operands[2].ref_id),
- // TODO: Fetch decorations
- };
- break :blk SpvType.initPayload(&payload.base);
- },
+ .OpTypePointer => try self.spv.ptrType(
+ try self.resolveTypeRef(operands[2].ref_id),
+ @intToEnum(spec.StorageClass, operands[1].value),
+ ),
.OpTypeFunction => blk: {
const param_operands = operands[2..];
- const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
+ const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len);
+ defer self.spv.gpa.free(param_types);
for (param_types, 0..) |*param, i| {
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
}
- const payload = try self.spv.arena.create(SpvType.Payload.Function);
- payload.* = .{
+ break :blk try self.spv.resolve(.{ .function_type = .{
.return_type = try self.resolveTypeRef(operands[1].ref_id),
.parameters = param_types,
- };
- break :blk SpvType.initPayload(&payload.base);
+ } });
},
- .OpTypeEvent => SpvType.initTag(.event),
- .OpTypeDeviceEvent => SpvType.initTag(.device_event),
- .OpTypeReserveId => SpvType.initTag(.reserve_id),
- .OpTypeQueue => SpvType.initTag(.queue),
- .OpTypePipe => blk: {
- const payload = try self.spv.arena.create(SpvType.Payload.Pipe);
- payload.* = .{
- .qualifier = @intToEnum(spec.AccessQualifier, operands[1].value),
- };
- break :blk SpvType.initPayload(&payload.base);
- },
- .OpTypePipeStorage => SpvType.initTag(.pipe_storage),
- .OpTypeNamedBarrier => SpvType.initTag(.named_barrier),
else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}),
};
- const ref = try self.spv.resolveType(ty);
return AsmValue{ .ty = ref };
}
@@ -528,7 +429,7 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
}
/// Resolve a value reference as type.
-fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !SpvType.Ref {
+fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !CacheRef {
const value = try self.resolveRef(ref);
switch (value) {
.just_declared, .unresolved_forward_reference => unreachable,
@@ -761,19 +662,20 @@ fn parseContextDependentNumber(self: *Assembler) !void {
const tok = self.currentToken();
const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id);
- const result_type = self.spv.type_cache.keys()[@enumToInt(result_type_ref)];
- if (result_type.isInt()) {
- try self.parseContextDependentInt(result_type.intSignedness(), result_type.intFloatBits());
- } else if (result_type.isFloat()) {
- const width = result_type.intFloatBits();
- switch (width) {
- 16 => try self.parseContextDependentFloat(16),
- 32 => try self.parseContextDependentFloat(32),
- 64 => try self.parseContextDependentFloat(64),
- else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{width}),
- }
- } else {
- return self.fail(tok.start, "cannot parse literal constant {s}", .{@tagName(result_type.tag())});
+ const result_type = self.spv.cache.lookup(result_type_ref);
+ switch (result_type) {
+ .int_type => |int| {
+ try self.parseContextDependentInt(int.signedness, int.bits);
+ },
+ .float_type => |float| {
+ switch (float.bits) {
+ 16 => try self.parseContextDependentFloat(16),
+ 32 => try self.parseContextDependentFloat(32),
+ 64 => try self.parseContextDependentFloat(64),
+ else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{float.bits}),
+ }
+ },
+ else => return self.fail(tok.start, "cannot parse literal constant", .{}),
}
}
diff --git a/src/codegen/spirv/Cache.zig b/src/codegen/spirv/Cache.zig
new file mode 100644
index 0000000000..4c41bf583b
--- /dev/null
+++ b/src/codegen/spirv/Cache.zig
@@ -0,0 +1,1046 @@
+//! This file implements an InternPool-like structure that caches
+//! SPIR-V types and constants. Instead of generating type and
+//! constant instructions directly, we first keep a representation
+//! in a compressed database. This is then only later turned into
+//! actual SPIR-V instructions.
+//! Note: This cache is insertion-ordered. This means that we
+//! can materialize the SPIR-V instructions in the proper order,
+//! as SPIR-V requires that the type is emitted before use.
+//! Note: According to SPIR-V spec section 2.8, Types and Variables,
+//! non-pointer non-aggrerate types (which includes matrices and
+//! vectors) must have a _unique_ representation in the final binary.
+
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+const Section = @import("Section.zig");
+const Module = @import("Module.zig");
+
+const spec = @import("spec.zig");
+const Opcode = spec.Opcode;
+const IdResult = spec.IdResult;
+const StorageClass = spec.StorageClass;
+
+const Self = @This();
+
+map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
+items: std.MultiArrayList(Item) = .{},
+extra: std.ArrayListUnmanaged(u32) = .{},
+
+string_bytes: std.ArrayListUnmanaged(u8) = .{},
+strings: std.AutoArrayHashMapUnmanaged(void, u32) = .{},
+
+const Item = struct {
+ tag: Tag,
+ /// The result-id that this item uses.
+ result_id: IdResult,
+ /// The Tag determines how this should be interpreted.
+ data: u32,
+};
+
+const Tag = enum {
+ // -- Types
+ /// Simple type that has no additional data.
+ /// data is SimpleType.
+ type_simple,
+ /// Signed integer type
+ /// data is number of bits
+ type_int_signed,
+ /// Unsigned integer type
+ /// data is number of bits
+ type_int_unsigned,
+ /// Floating point type
+ /// data is number of bits
+ type_float,
+ /// Vector type
+ /// data is payload to VectorType
+ type_vector,
+ /// Array type
+ /// data is payload to ArrayType
+ type_array,
+ /// Function (proto)type
+ /// data is payload to FunctionType
+ type_function,
+ /// Pointer type in the CrossWorkgroup storage class
+ /// data is child type
+ type_ptr_generic,
+ /// Pointer type in the CrossWorkgroup storage class
+ /// data is child type
+ type_ptr_crosswgp,
+ /// Pointer type in the Function storage class
+ /// data is child type
+ type_ptr_function,
+ /// Simple pointer type that does not have any decorations.
+ /// data is payload to SimplePointerType
+ type_ptr_simple,
+ /// Simple structure type that does not have any decorations.
+ /// data is payload to SimpleStructType
+ type_struct_simple,
+ /// Simple structure type that does not have any decorations, but does
+ /// have member names trailing.
+ /// data is payload to SimpleStructType
+ type_struct_simple_with_member_names,
+
+ // -- Values
+ /// Value of type u8
+ /// data is value
+ uint8,
+ /// Value of type u32
+ /// data is value
+ uint32,
+ // TODO: More specialized tags here.
+ /// Integer value for signed values that are smaller than 32 bits.
+ /// data is pointer to Int32
+ int_small,
+ /// Integer value for unsigned values that are smaller than 32 bits.
+ /// data is pointer to UInt32
+ uint_small,
+ /// Integer value for signed values that are beteen 32 and 64 bits.
+ /// data is pointer to Int64
+ int_large,
+ /// Integer value for unsinged values that are beteen 32 and 64 bits.
+ /// data is pointer to UInt64
+ uint_large,
+ /// Value of type f16
+ /// data is value
+ float16,
+ /// Value of type f32
+ /// data is value
+ float32,
+ /// Value of type f64
+ /// data is payload to Float16
+ float64,
+ /// Undefined value
+ /// data is type
+ undef,
+ /// Null value
+ /// data is type
+ null,
+ /// Bool value that is true
+ /// data is (bool) type
+ bool_true,
+ /// Bool value that is false
+ /// data is (bool) type
+ bool_false,
+
+ const SimpleType = enum { void, bool };
+
+ const VectorType = Key.VectorType;
+ const ArrayType = Key.ArrayType;
+
+ // Trailing:
+ // - [param_len]Ref: parameter types.
+ const FunctionType = struct {
+ param_len: u32,
+ return_type: Ref,
+ };
+
+ const SimplePointerType = struct {
+ storage_class: StorageClass,
+ child_type: Ref,
+ };
+
+ /// Trailing:
+ /// - [members_len]Ref: Member types.
+ /// - [members_len]String: Member names, -- ONLY if the tag is type_struct_simple_with_member_names
+ const SimpleStructType = struct {
+ /// (optional) The name of the struct.
+ name: String,
+ /// Number of members that this struct has.
+ members_len: u32,
+ };
+
+ const Float64 = struct {
+ // Low-order 32 bits of the value.
+ low: u32,
+ // High-order 32 bits of the value.
+ high: u32,
+
+ fn encode(value: f64) Float64 {
+ const bits = @bitCast(u64, value);
+ return .{
+ .low = @truncate(u32, bits),
+ .high = @truncate(u32, bits >> 32),
+ };
+ }
+
+ fn decode(self: Float64) f64 {
+ const bits = @as(u64, self.low) | (@as(u64, self.high) << 32);
+ return @bitCast(f64, bits);
+ }
+ };
+
+ const Int32 = struct {
+ ty: Ref,
+ value: i32,
+ };
+
+ const UInt32 = struct {
+ ty: Ref,
+ value: u32,
+ };
+
+ const UInt64 = struct {
+ ty: Ref,
+ low: u32,
+ high: u32,
+
+ fn encode(ty: Ref, value: u64) Int64 {
+ return .{
+ .ty = ty,
+ .low = @truncate(u32, value),
+ .high = @truncate(u32, value >> 32),
+ };
+ }
+
+ fn decode(self: UInt64) u64 {
+ return @as(u64, self.low) | (@as(u64, self.high) << 32);
+ }
+ };
+
+ const Int64 = struct {
+ ty: Ref,
+ low: u32,
+ high: u32,
+
+ fn encode(ty: Ref, value: i64) Int64 {
+ return .{
+ .ty = ty,
+ .low = @truncate(u32, @bitCast(u64, value)),
+ .high = @truncate(u32, @bitCast(u64, value) >> 32),
+ };
+ }
+
+ fn decode(self: Int64) i64 {
+ return @bitCast(i64, @as(u64, self.low) | (@as(u64, self.high) << 32));
+ }
+ };
+};
+
+pub const Ref = enum(u32) { _ };
+
+/// This union represents something that can be interned. This includes
+/// types and constants. This structure is used for interfacing with the
+/// database: Values described for this structure are ephemeral and stored
+/// in a more memory-efficient manner internally.
+pub const Key = union(enum) {
+ // -- Types
+ void_type,
+ bool_type,
+ int_type: IntType,
+ float_type: FloatType,
+ vector_type: VectorType,
+ array_type: ArrayType,
+ function_type: FunctionType,
+ ptr_type: PointerType,
+ struct_type: StructType,
+
+ // -- values
+ int: Int,
+ float: Float,
+ undef: Undef,
+ null: Null,
+ bool: Bool,
+
+ pub const IntType = std.builtin.Type.Int;
+ pub const FloatType = std.builtin.Type.Float;
+
+ pub const VectorType = struct {
+ component_type: Ref,
+ component_count: u32,
+ };
+
+ pub const ArrayType = struct {
+ /// Child type of this array.
+ element_type: Ref,
+ /// Reference to a constant.
+ length: Ref,
+ /// Type has the 'ArrayStride' decoration.
+ /// If zero, no stride is present.
+ stride: u32 = 0,
+ };
+
+ pub const FunctionType = struct {
+ return_type: Ref,
+ parameters: []const Ref,
+ };
+
+ pub const PointerType = struct {
+ storage_class: StorageClass,
+ child_type: Ref,
+ // TODO: Decorations:
+ // - Alignment
+ // - ArrayStride,
+ // - MaxByteOffset,
+ };
+
+ pub const StructType = struct {
+ // TODO: Decorations.
+ /// The name of the structure. Can be `.none`.
+ name: String = .none,
+ /// The type of each member.
+ member_types: []const Ref,
+ /// Name for each member. May be omitted.
+ member_names: ?[]const String = null,
+
+ fn memberNames(self: @This()) []const String {
+ return if (self.member_names) |member_names| member_names else &.{};
+ }
+ };
+
+ pub const Int = struct {
+ /// The type: any bitness integer.
+ ty: Ref,
+ /// The actual value. Only uint64 and int64 types
+ /// are available here: Smaller types should use these
+ /// fields.
+ value: Value,
+
+ pub const Value = union(enum) {
+ uint64: u64,
+ int64: i64,
+ };
+
+ /// Turns this value into the corresponding 32-bit literal, 2s complement signed.
+ fn toBits32(self: Int) u32 {
+ return switch (self.value) {
+ .uint64 => |val| @intCast(u32, val),
+ .int64 => |val| if (val < 0) @bitCast(u32, @intCast(i32, val)) else @intCast(u32, val),
+ };
+ }
+
+ fn toBits64(self: Int) u64 {
+ return switch (self.value) {
+ .uint64 => |val| val,
+ .int64 => |val| @bitCast(u64, val),
+ };
+ }
+
+ fn to(self: Int, comptime T: type) T {
+ return switch (self.value) {
+ inline else => |val| @intCast(T, val),
+ };
+ }
+ };
+
+ /// Represents a numberic value of some type.
+ pub const Float = struct {
+ /// The type: 16, 32, or 64-bit float.
+ ty: Ref,
+ /// The actual value.
+ value: Value,
+
+ pub const Value = union(enum) {
+ float16: f16,
+ float32: f32,
+ float64: f64,
+ };
+ };
+
+ pub const Undef = struct {
+ ty: Ref,
+ };
+
+ pub const Null = struct {
+ ty: Ref,
+ };
+
+ pub const Bool = struct {
+ ty: Ref,
+ value: bool,
+ };
+
+ fn hash(self: Key) u32 {
+ var hasher = std.hash.Wyhash.init(0);
+ switch (self) {
+ .float => |float| {
+ std.hash.autoHash(&hasher, float.ty);
+ switch (float.value) {
+ .float16 => |value| std.hash.autoHash(&hasher, @bitCast(u16, value)),
+ .float32 => |value| std.hash.autoHash(&hasher, @bitCast(u32, value)),
+ .float64 => |value| std.hash.autoHash(&hasher, @bitCast(u64, value)),
+ }
+ },
+ .function_type => |func| {
+ std.hash.autoHash(&hasher, func.return_type);
+ for (func.parameters) |param_type| {
+ std.hash.autoHash(&hasher, param_type);
+ }
+ },
+ .struct_type => |struct_type| {
+ std.hash.autoHash(&hasher, struct_type.name);
+ for (struct_type.member_types) |member_type| {
+ std.hash.autoHash(&hasher, member_type);
+ }
+ for (struct_type.memberNames()) |member_name| {
+ std.hash.autoHash(&hasher, member_name);
+ }
+ },
+ inline else => |key| std.hash.autoHash(&hasher, key),
+ }
+ return @truncate(u32, hasher.final());
+ }
+
+ fn eql(a: Key, b: Key) bool {
+ const KeyTag = @typeInfo(Key).Union.tag_type.?;
+ const a_tag: KeyTag = a;
+ const b_tag: KeyTag = b;
+ if (a_tag != b_tag) {
+ return false;
+ }
+ return switch (a) {
+ .function_type => |a_func| {
+ const b_func = b.function_type;
+ return a_func.return_type == b_func.return_type and
+ std.mem.eql(Ref, a_func.parameters, b_func.parameters);
+ },
+ .struct_type => |a_struct| {
+ const b_struct = b.struct_type;
+ return a_struct.name == b_struct.name and
+ std.mem.eql(Ref, a_struct.member_types, b_struct.member_types) and
+ std.mem.eql(String, a_struct.memberNames(), b_struct.memberNames());
+ },
+ // TODO: Unroll?
+ else => std.meta.eql(a, b),
+ };
+ }
+
+ pub const Adapter = struct {
+ self: *const Self,
+
+ pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool {
+ _ = b_void;
+ return ctx.self.lookup(@intToEnum(Ref, b_index)).eql(a);
+ }
+
+ pub fn hash(ctx: @This(), a: Key) u32 {
+ _ = ctx;
+ return a.hash();
+ }
+ };
+
+ fn toSimpleType(self: Key) Tag.SimpleType {
+ return switch (self) {
+ .void_type => .void,
+ .bool_type => .bool,
+ else => unreachable,
+ };
+ }
+};
+
+pub fn deinit(self: *Self, spv: *const Module) void {
+ self.map.deinit(spv.gpa);
+ self.items.deinit(spv.gpa);
+ self.extra.deinit(spv.gpa);
+ self.string_bytes.deinit(spv.gpa);
+ self.strings.deinit(spv.gpa);
+}
+
+/// Actually materialize the database into spir-v instructions.
+/// This function returns a spir-v section of (only) constant and type instructions.
+/// Additionally, decorations, debug names, etc, are all directly emitted into the
+/// `spv` module. The section is allocated with `spv.gpa`.
+pub fn materialize(self: *const Self, spv: *Module) !Section {
+ var section = Section{};
+ errdefer section.deinit(spv.gpa);
+ for (self.items.items(.result_id), 0..) |result_id, index| {
+ try self.emit(spv, result_id, @intToEnum(Ref, index), &section);
+ }
+ return section;
+}
+
+fn emit(
+ self: *const Self,
+ spv: *Module,
+ result_id: IdResult,
+ ref: Ref,
+ section: *Section,
+) !void {
+ const key = self.lookup(ref);
+ const Lit = spec.LiteralContextDependentNumber;
+ switch (key) {
+ .void_type => {
+ try section.emit(spv.gpa, .OpTypeVoid, .{ .id_result = result_id });
+ try spv.debugName(result_id, "void", .{});
+ },
+ .bool_type => {
+ try section.emit(spv.gpa, .OpTypeBool, .{ .id_result = result_id });
+ try spv.debugName(result_id, "bool", .{});
+ },
+ .int_type => |int| {
+ try section.emit(spv.gpa, .OpTypeInt, .{
+ .id_result = result_id,
+ .width = int.bits,
+ .signedness = switch (int.signedness) {
+ .unsigned => @as(spec.Word, 0),
+ .signed => 1,
+ },
+ });
+ const ui: []const u8 = switch (int.signedness) {
+ .unsigned => "u",
+ .signed => "i",
+ };
+ try spv.debugName(result_id, "{s}{}", .{ ui, int.bits });
+ },
+ .float_type => |float| {
+ try section.emit(spv.gpa, .OpTypeFloat, .{
+ .id_result = result_id,
+ .width = float.bits,
+ });
+ try spv.debugName(result_id, "f{}", .{float.bits});
+ },
+ .vector_type => |vector| {
+ try section.emit(spv.gpa, .OpTypeVector, .{
+ .id_result = result_id,
+ .component_type = self.resultId(vector.component_type),
+ .component_count = vector.component_count,
+ });
+ },
+ .array_type => |array| {
+ try section.emit(spv.gpa, .OpTypeArray, .{
+ .id_result = result_id,
+ .element_type = self.resultId(array.element_type),
+ .length = self.resultId(array.length),
+ });
+ if (array.stride != 0) {
+ try spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = array.stride } });
+ }
+ },
+ .function_type => |function| {
+ try section.emitRaw(spv.gpa, .OpTypeFunction, 2 + function.parameters.len);
+ section.writeOperand(IdResult, result_id);
+ section.writeOperand(IdResult, self.resultId(function.return_type));
+ for (function.parameters) |param_type| {
+ section.writeOperand(IdResult, self.resultId(param_type));
+ }
+ },
+ .ptr_type => |ptr| {
+ try section.emit(spv.gpa, .OpTypePointer, .{
+ .id_result = result_id,
+ .storage_class = ptr.storage_class,
+ .type = self.resultId(ptr.child_type),
+ });
+ // TODO: Decorations?
+ },
+ .struct_type => |struct_type| {
+ try section.emitRaw(spv.gpa, .OpTypeStruct, 1 + struct_type.member_types.len);
+ section.writeOperand(IdResult, result_id);
+ for (struct_type.member_types) |member_type| {
+ section.writeOperand(IdResult, self.resultId(member_type));
+ }
+ if (self.getString(struct_type.name)) |name| {
+ try spv.debugName(result_id, "{s}", .{name});
+ }
+ for (struct_type.memberNames(), 0..) |member_name, i| {
+ if (self.getString(member_name)) |name| {
+ try spv.memberDebugName(result_id, @intCast(u32, i), "{s}", .{name});
+ }
+ }
+ // TODO: Decorations?
+ },
+ .int => |int| {
+ const int_type = self.lookup(int.ty).int_type;
+ const ty_id = self.resultId(int.ty);
+ const lit: Lit = switch (int_type.bits) {
+ 1...32 => .{ .uint32 = int.toBits32() },
+ 33...64 => .{ .uint64 = int.toBits64() },
+ else => unreachable,
+ };
+
+ try section.emit(spv.gpa, .OpConstant, .{
+ .id_result_type = ty_id,
+ .id_result = result_id,
+ .value = lit,
+ });
+ },
+ .float => |float| {
+ const ty_id = self.resultId(float.ty);
+ const lit: Lit = switch (float.value) {
+ .float16 => |value| .{ .uint32 = @bitCast(u16, value) },
+ .float32 => |value| .{ .float32 = value },
+ .float64 => |value| .{ .float64 = value },
+ };
+ try section.emit(spv.gpa, .OpConstant, .{
+ .id_result_type = ty_id,
+ .id_result = result_id,
+ .value = lit,
+ });
+ },
+ .undef => |undef| {
+ try section.emit(spv.gpa, .OpUndef, .{
+ .id_result_type = self.resultId(undef.ty),
+ .id_result = result_id,
+ });
+ },
+ .null => |null_info| {
+ try section.emit(spv.gpa, .OpConstantNull, .{
+ .id_result_type = self.resultId(null_info.ty),
+ .id_result = result_id,
+ });
+ },
+ .bool => |bool_info| switch (bool_info.value) {
+ true => {
+ try section.emit(spv.gpa, .OpConstantTrue, .{
+ .id_result_type = self.resultId(bool_info.ty),
+ .id_result = result_id,
+ });
+ },
+ false => {
+ try section.emit(spv.gpa, .OpConstantFalse, .{
+ .id_result_type = self.resultId(bool_info.ty),
+ .id_result = result_id,
+ });
+ },
+ },
+ }
+}
+
+/// Add a key to this cache. Returns a reference to the key that
+/// was added. The corresponding result-id can be queried using
+/// self.resultId with the result.
+pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
+ const adapter: Key.Adapter = .{ .self = self };
+ const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter);
+ if (entry.found_existing) {
+ return @intToEnum(Ref, entry.index);
+ }
+ const result_id = spv.allocId();
+ const item: Item = switch (key) {
+ inline .void_type, .bool_type => .{
+ .tag = .type_simple,
+ .result_id = result_id,
+ .data = @enumToInt(key.toSimpleType()),
+ },
+ .int_type => |int| blk: {
+ const t: Tag = switch (int.signedness) {
+ .signed => .type_int_signed,
+ .unsigned => .type_int_unsigned,
+ };
+ break :blk .{
+ .tag = t,
+ .result_id = result_id,
+ .data = int.bits,
+ };
+ },
+ .float_type => |float| .{
+ .tag = .type_float,
+ .result_id = result_id,
+ .data = float.bits,
+ },
+ .vector_type => |vector| .{
+ .tag = .type_vector,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, vector),
+ },
+ .array_type => |array| .{
+ .tag = .type_array,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, array),
+ },
+ .function_type => |function| blk: {
+ const extra = try self.addExtra(spv, Tag.FunctionType{
+ .param_len = @intCast(u32, function.parameters.len),
+ .return_type = function.return_type,
+ });
+ try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, function.parameters));
+ break :blk .{
+ .tag = .type_function,
+ .result_id = result_id,
+ .data = extra,
+ };
+ },
+ .ptr_type => |ptr| switch (ptr.storage_class) {
+ .Generic => Item{
+ .tag = .type_ptr_generic,
+ .result_id = result_id,
+ .data = @enumToInt(ptr.child_type),
+ },
+ .CrossWorkgroup => Item{
+ .tag = .type_ptr_crosswgp,
+ .result_id = result_id,
+ .data = @enumToInt(ptr.child_type),
+ },
+ .Function => Item{
+ .tag = .type_ptr_function,
+ .result_id = result_id,
+ .data = @enumToInt(ptr.child_type),
+ },
+ else => |storage_class| Item{
+ .tag = .type_ptr_simple,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.SimplePointerType{
+ .storage_class = storage_class,
+ .child_type = ptr.child_type,
+ }),
+ },
+ },
+ .struct_type => |struct_type| blk: {
+ const extra = try self.addExtra(spv, Tag.SimpleStructType{
+ .name = struct_type.name,
+ .members_len = @intCast(u32, struct_type.member_types.len),
+ });
+ try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, struct_type.member_types));
+
+ if (struct_type.member_names) |member_names| {
+ try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, member_names));
+ break :blk Item{
+ .tag = .type_struct_simple_with_member_names,
+ .result_id = result_id,
+ .data = extra,
+ };
+ } else {
+ break :blk Item{
+ .tag = .type_struct_simple,
+ .result_id = result_id,
+ .data = extra,
+ };
+ }
+ },
+ .int => |int| blk: {
+ const int_type = self.lookup(int.ty).int_type;
+ if (int_type.signedness == .unsigned and int_type.bits == 8) {
+ break :blk .{
+ .tag = .uint8,
+ .result_id = result_id,
+ .data = int.to(u8),
+ };
+ } else if (int_type.signedness == .unsigned and int_type.bits == 32) {
+ break :blk .{
+ .tag = .uint32,
+ .result_id = result_id,
+ .data = int.to(u32),
+ };
+ }
+
+ switch (int.value) {
+ inline else => |val| {
+ if (val >= 0 and val <= std.math.maxInt(u32)) {
+ break :blk .{
+ .tag = .uint_small,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.UInt32{
+ .ty = int.ty,
+ .value = @intCast(u32, val),
+ }),
+ };
+ } else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) {
+ break :blk .{
+ .tag = .int_small,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.Int32{
+ .ty = int.ty,
+ .value = @intCast(i32, val),
+ }),
+ };
+ } else if (val < 0) {
+ break :blk .{
+ .tag = .int_large,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(i64, val))),
+ };
+ } else {
+ break :blk .{
+ .tag = .uint_large,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(u64, val))),
+ };
+ }
+ },
+ }
+ },
+ .float => |float| switch (self.lookup(float.ty).float_type.bits) {
+ 16 => .{
+ .tag = .float16,
+ .result_id = result_id,
+ .data = @bitCast(u16, float.value.float16),
+ },
+ 32 => .{
+ .tag = .float32,
+ .result_id = result_id,
+ .data = @bitCast(u32, float.value.float32),
+ },
+ 64 => .{
+ .tag = .float64,
+ .result_id = result_id,
+ .data = try self.addExtra(spv, Tag.Float64.encode(float.value.float64)),
+ },
+ else => unreachable,
+ },
+ .undef => |undef| .{
+ .tag = .undef,
+ .result_id = result_id,
+ .data = @enumToInt(undef.ty),
+ },
+ .null => |null_info| .{
+ .tag = .null,
+ .result_id = result_id,
+ .data = @enumToInt(null_info.ty),
+ },
+ .bool => |bool_info| .{
+ .tag = switch (bool_info.value) {
+ true => Tag.bool_true,
+ false => Tag.bool_false,
+ },
+ .result_id = result_id,
+ .data = @enumToInt(bool_info.ty),
+ },
+ };
+ try self.items.append(spv.gpa, item);
+
+ return @intToEnum(Ref, entry.index);
+}
+
+/// Turn a Ref back into a Key.
+/// The Key is valid until the next call to resolve().
+pub fn lookup(self: *const Self, ref: Ref) Key {
+ const item = self.items.get(@enumToInt(ref));
+ const data = item.data;
+ return switch (item.tag) {
+ .type_simple => switch (@intToEnum(Tag.SimpleType, data)) {
+ .void => .void_type,
+ .bool => .bool_type,
+ },
+ .type_int_signed => .{ .int_type = .{
+ .signedness = .signed,
+ .bits = @intCast(u16, data),
+ } },
+ .type_int_unsigned => .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = @intCast(u16, data),
+ } },
+ .type_float => .{ .float_type = .{
+ .bits = @intCast(u16, data),
+ } },
+ .type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) },
+ .type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) },
+ .type_function => {
+ const payload = self.extraDataTrail(Tag.FunctionType, data);
+ return .{
+ .function_type = .{
+ .return_type = payload.data.return_type,
+ .parameters = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.param_len]),
+ },
+ };
+ },
+ .type_ptr_generic => .{
+ .ptr_type = .{
+ .storage_class = .Generic,
+ .child_type = @intToEnum(Ref, data),
+ },
+ },
+ .type_ptr_crosswgp => .{
+ .ptr_type = .{
+ .storage_class = .CrossWorkgroup,
+ .child_type = @intToEnum(Ref, data),
+ },
+ },
+ .type_ptr_function => .{
+ .ptr_type = .{
+ .storage_class = .Function,
+ .child_type = @intToEnum(Ref, data),
+ },
+ },
+ .type_ptr_simple => {
+ const payload = self.extraData(Tag.SimplePointerType, data);
+ return .{
+ .ptr_type = .{
+ .storage_class = payload.storage_class,
+ .child_type = payload.child_type,
+ },
+ };
+ },
+ .type_struct_simple => {
+ const payload = self.extraDataTrail(Tag.SimpleStructType, data);
+ const member_types = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.members_len]);
+ return .{
+ .struct_type = .{
+ .name = payload.data.name,
+ .member_types = member_types,
+ .member_names = null,
+ },
+ };
+ },
+ .type_struct_simple_with_member_names => {
+ const payload = self.extraDataTrail(Tag.SimpleStructType, data);
+ const trailing = self.extra.items[payload.trail..];
+ const member_types = @ptrCast([]const Ref, trailing[0..payload.data.members_len]);
+ const member_names = @ptrCast([]const String, trailing[payload.data.members_len..][0..payload.data.members_len]);
+ return .{
+ .struct_type = .{
+ .name = payload.data.name,
+ .member_types = member_types,
+ .member_names = member_names,
+ },
+ };
+ },
+ .float16 => .{ .float = .{
+ .ty = self.get(.{ .float_type = .{ .bits = 16 } }),
+ .value = .{ .float16 = @bitCast(f16, @intCast(u16, data)) },
+ } },
+ .float32 => .{ .float = .{
+ .ty = self.get(.{ .float_type = .{ .bits = 32 } }),
+ .value = .{ .float32 = @bitCast(f32, data) },
+ } },
+ .float64 => .{ .float = .{
+ .ty = self.get(.{ .float_type = .{ .bits = 64 } }),
+ .value = .{ .float64 = self.extraData(Tag.Float64, data).decode() },
+ } },
+ .uint8 => .{ .int = .{
+ .ty = self.get(.{ .int_type = .{ .signedness = .unsigned, .bits = 8 } }),
+ .value = .{ .uint64 = data },
+ } },
+ .uint32 => .{ .int = .{
+ .ty = self.get(.{ .int_type = .{ .signedness = .unsigned, .bits = 32 } }),
+ .value = .{ .uint64 = data },
+ } },
+ .int_small => {
+ const payload = self.extraData(Tag.Int32, data);
+ return .{ .int = .{
+ .ty = payload.ty,
+ .value = .{ .int64 = payload.value },
+ } };
+ },
+ .uint_small => {
+ const payload = self.extraData(Tag.UInt32, data);
+ return .{ .int = .{
+ .ty = payload.ty,
+ .value = .{ .uint64 = payload.value },
+ } };
+ },
+ .int_large => {
+ const payload = self.extraData(Tag.Int64, data);
+ return .{ .int = .{
+ .ty = payload.ty,
+ .value = .{ .int64 = payload.decode() },
+ } };
+ },
+ .uint_large => {
+ const payload = self.extraData(Tag.UInt64, data);
+ return .{ .int = .{
+ .ty = payload.ty,
+ .value = .{ .uint64 = payload.decode() },
+ } };
+ },
+ .undef => .{ .undef = .{
+ .ty = @intToEnum(Ref, data),
+ } },
+ .null => .{ .null = .{
+ .ty = @intToEnum(Ref, data),
+ } },
+ .bool_true => .{ .bool = .{
+ .ty = @intToEnum(Ref, data),
+ .value = true,
+ } },
+ .bool_false => .{ .bool = .{
+ .ty = @intToEnum(Ref, data),
+ .value = false,
+ } },
+ };
+}
+
+/// Look op the result-id that corresponds to a particular
+/// ref.
+pub fn resultId(self: Self, ref: Ref) IdResult {
+ return self.items.items(.result_id)[@enumToInt(ref)];
+}
+
+/// Get the ref for a key that has already been added to the cache.
+fn get(self: *const Self, key: Key) Ref {
+ const adapter: Key.Adapter = .{ .self = self };
+ const index = self.map.getIndexAdapted(key, adapter).?;
+ return @intToEnum(Ref, index);
+}
+
+fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
+ const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
+ try self.extra.ensureUnusedCapacity(spv.gpa, fields.len);
+ return try self.addExtraAssumeCapacity(extra);
+}
+
+fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
+ const payload_offset = @intCast(u32, self.extra.items.len);
+ inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
+ const field_val = @field(extra, field.name);
+ const word = switch (field.type) {
+ u32 => field_val,
+ i32 => @bitCast(u32, field_val),
+ Ref => @enumToInt(field_val),
+ StorageClass => @enumToInt(field_val),
+ String => @enumToInt(field_val),
+ else => @compileError("Invalid type: " ++ @typeName(field.type)),
+ };
+ self.extra.appendAssumeCapacity(word);
+ }
+ return payload_offset;
+}
+
+fn extraData(self: Self, comptime T: type, offset: u32) T {
+ return self.extraDataTrail(T, offset).data;
+}
+
+fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, trail: u32 } {
+ var result: T = undefined;
+ const fields = @typeInfo(T).Struct.fields;
+ inline for (fields, 0..) |field, i| {
+ const word = self.extra.items[offset + i];
+ @field(result, field.name) = switch (field.type) {
+ u32 => word,
+ i32 => @bitCast(i32, word),
+ Ref => @intToEnum(Ref, word),
+ StorageClass => @intToEnum(StorageClass, word),
+ String => @intToEnum(String, word),
+ else => @compileError("Invalid type: " ++ @typeName(field.type)),
+ };
+ }
+ return .{
+ .data = result,
+ .trail = offset + @intCast(u32, fields.len),
+ };
+}
+
+/// Represents a reference to some null-terminated string.
+pub const String = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub const Adapter = struct {
+ self: *const Self,
+
+ pub fn eql(ctx: @This(), a: []const u8, _: void, b_index: usize) bool {
+ const offset = ctx.self.strings.values()[b_index];
+ const b = std.mem.sliceTo(ctx.self.string_bytes.items[offset..], 0);
+ return std.mem.eql(u8, a, b);
+ }
+
+ pub fn hash(ctx: @This(), a: []const u8) u32 {
+ _ = ctx;
+ var hasher = std.hash.Wyhash.init(0);
+ hasher.update(a);
+ return @truncate(u32, hasher.final());
+ }
+ };
+};
+
+/// Add a string to the cache. Must not contain any 0 values.
+pub fn addString(self: *Self, spv: *Module, str: []const u8) !String {
+ assert(std.mem.indexOfScalar(u8, str, 0) == null);
+ const adapter = String.Adapter{ .self = self };
+ const entry = try self.strings.getOrPutAdapted(spv.gpa, str, adapter);
+ if (!entry.found_existing) {
+ const offset = self.string_bytes.items.len;
+ try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len);
+ self.string_bytes.appendSliceAssumeCapacity(str);
+ self.string_bytes.appendAssumeCapacity(0);
+ entry.value_ptr.* = @intCast(u32, offset);
+ }
+
+ return @intToEnum(String, entry.index);
+}
+
+pub fn getString(self: *const Self, ref: String) ?[]const u8 {
+ return switch (ref) {
+ .none => null,
+ else => std.mem.sliceTo(self.string_bytes.items[self.strings.values()[@enumToInt(ref)]..], 0),
+ };
+}
diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig
index 4bd6c834ce..d53dcb4368 100644
--- a/src/codegen/spirv/Module.zig
+++ b/src/codegen/spirv/Module.zig
@@ -11,7 +11,8 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const ZigDecl = @import("../../Module.zig").Decl;
+const ZigModule = @import("../../Module.zig");
+const ZigDecl = ZigModule.Decl;
const spec = @import("spec.zig");
const Word = spec.Word;
@@ -20,11 +21,13 @@ const IdResult = spec.IdResult;
const IdResultType = spec.IdResultType;
const Section = @import("Section.zig");
-const Type = @import("type.zig").Type;
-const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
+const Cache = @import("Cache.zig");
+pub const CacheKey = Cache.Key;
+pub const CacheRef = Cache.Ref;
+pub const CacheString = Cache.String;
-/// This structure represents a function that is in-progress of being emitted.
+/// This structure represents a function that isc in-progress of being emitted.
/// Commonly, the contents of this structure will be merged with the appropriate
/// sections of the module and re-used. Note that the SPIR-V module system makes
/// no attempt of compacting result-id's, so any Fn instance should ultimately
@@ -126,7 +129,13 @@ sections: struct {
/// Annotation instructions (OpDecorate etc).
annotations: Section = .{},
/// Type declarations, constants, global variables
- /// Below this section, OpLine and OpNoLine is allowed.
+ /// From this section, OpLine and OpNoLine is allowed.
+ /// According to the SPIR-V documentation, this section normally
+ /// also holds type and constant instructions. These are managed
+ /// via the cache instead, which is the sole structure that
+ /// manages that section. These will be inserted between this and
+ /// the previous section when emitting the final binary.
+ /// TODO: Do we need this section? Globals are also managed with another mechanism.
types_globals_constants: Section = .{},
// Functions without a body - skip for now.
/// Regular function definitions.
@@ -141,11 +150,9 @@ next_result_id: Word,
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
-/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
-/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
-/// the final binary.
-/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
-type_cache: TypeCache = .{},
+/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
+/// efficient manner.
+cache: Cache = .{},
/// Set of Decls, referred to by Decl.Index.
decls: std.ArrayListUnmanaged(Decl) = .{},
@@ -163,7 +170,7 @@ globals: struct {
globals: std.AutoArrayHashMapUnmanaged(Decl.Index, Global) = .{},
/// This pseudo-section contains the initialization code for all the globals. Instructions from
/// here are reordered when flushing the module. Its contents should be part of the
- /// `types_globals_constants` SPIR-V section.
+ /// `types_globals_constants` SPIR-V section when the module is emitted.
section: Section = .{},
} = .{},
@@ -182,11 +189,10 @@ pub fn deinit(self: *Module) void {
self.sections.debug_strings.deinit(self.gpa);
self.sections.debug_names.deinit(self.gpa);
self.sections.annotations.deinit(self.gpa);
- self.sections.types_globals_constants.deinit(self.gpa);
self.sections.functions.deinit(self.gpa);
self.source_file_names.deinit(self.gpa);
- self.type_cache.deinit(self.gpa);
+ self.cache.deinit(self);
self.decls.deinit(self.gpa);
self.decl_deps.deinit(self.gpa);
@@ -213,6 +219,22 @@ pub fn idBound(self: Module) Word {
return self.next_result_id;
}
+pub fn resolve(self: *Module, key: CacheKey) !CacheRef {
+ return self.cache.resolve(self, key);
+}
+
+pub fn resultId(self: *const Module, ref: CacheRef) IdResult {
+ return self.cache.resultId(ref);
+}
+
+pub fn resolveId(self: *Module, key: CacheKey) !IdResult {
+ return self.resultId(try self.resolve(key));
+}
+
+pub fn resolveString(self: *Module, str: []const u8) !CacheString {
+ return try self.cache.addString(self, str);
+}
+
fn orderGlobalsInto(
self: *Module,
decl_index: Decl.Index,
@@ -324,6 +346,9 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
var entry_points = try self.entryPoints();
defer entry_points.deinit(self.gpa);
+ var types_constants = try self.cache.materialize(self);
+ defer types_constants.deinit(self.gpa);
+
// Note: needs to be kept in order according to section 2.3!
const buffers = &[_][]const Word{
&header,
@@ -334,6 +359,7 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
self.sections.debug_strings.toWords(),
self.sections.debug_names.toWords(),
self.sections.annotations.toWords(),
+ types_constants.toWords(),
self.sections.types_globals_constants.toWords(),
globals.toWords(),
self.sections.functions.toWords(),
@@ -364,8 +390,8 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
/// Fetch the result-id of an OpString instruction that encodes the path of the source
/// file of the decl. This function may also emit an OpSource with source-level information regarding
/// the decl.
-pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
- const path = decl.getFileScope().sub_file_path;
+pub fn resolveSourceFileName(self: *Module, zig_module: *ZigModule, zig_decl: *ZigDecl) !IdRef {
+ const path = zig_decl.getFileScope(zig_module).sub_file_path;
const result = try self.source_file_names.getOrPut(self.gpa, path);
if (!result.found_existing) {
const file_result_id = self.allocId();
@@ -386,405 +412,71 @@ pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
return result.value_ptr.*;
}
-/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
-/// and returns a cached version if that exists.
-/// Note: This function does not attempt to perform any validation on the type.
-/// The type is emitted in a shallow fashion; any child types should already
-/// be emitted at this point.
-pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
- const result = try self.type_cache.getOrPut(self.gpa, ty);
- const index = @intToEnum(Type.Ref, result.index);
-
- if (!result.found_existing) {
- const ref = try self.emitType(ty);
- self.type_cache.values()[result.index] = ref;
- }
-
- return index;
-}
-
-pub fn resolveTypeId(self: *Module, ty: Type) !IdResultType {
- const ty_ref = try self.resolveType(ty);
- return self.typeId(ty_ref);
+pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
+ return try self.resolve(.{ .int_type = .{
+ .signedness = signedness,
+ .bits = bits,
+ } });
}
-pub fn typeRefType(self: Module, ty_ref: Type.Ref) Type {
- return self.type_cache.keys()[@enumToInt(ty_ref)];
+pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
+ const len_ty_ref = try self.resolve(.{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 32,
+ } });
+ const len_ref = try self.resolve(.{ .int = .{
+ .ty = len_ty_ref,
+ .value = .{ .uint64 = len },
+ } });
+ return try self.resolve(.{ .array_type = .{
+ .element_type = elem_ty_ref,
+ .length = len_ref,
+ } });
}
-/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
-pub fn typeId(self: Module, ty_ref: Type.Ref) IdResultType {
- return self.type_cache.values()[@enumToInt(ty_ref)];
+pub fn ptrType(
+ self: *Module,
+ child: CacheRef,
+ storage_class: spec.StorageClass,
+) !CacheRef {
+ return try self.resolve(.{ .ptr_type = .{
+ .storage_class = storage_class,
+ .child_type = child,
+ } });
}
-/// Unconditionally emit a spir-v type into the appropriate section.
-/// Note: If this function is called with a type that is already generated, it may yield an invalid module
-/// as non-pointer non-aggregrate types must me unique!
-/// Note: This function does not attempt to perform any validation on the type.
-/// The type is emitted in a shallow fashion; any child types should already
-/// be emitted at this point.
-pub fn emitType(self: *Module, ty: Type) error{OutOfMemory}!IdResultType {
- const result_id = self.allocId();
- const ref_id = result_id;
- const types = &self.sections.types_globals_constants;
- const debug_names = &self.sections.debug_names;
- const result_id_operand = .{ .id_result = result_id };
-
- switch (ty.tag()) {
- .void => {
- try types.emit(self.gpa, .OpTypeVoid, result_id_operand);
- try debug_names.emit(self.gpa, .OpName, .{
- .target = result_id,
- .name = "void",
- });
- },
- .bool => {
- try types.emit(self.gpa, .OpTypeBool, result_id_operand);
- try debug_names.emit(self.gpa, .OpName, .{
- .target = result_id,
- .name = "bool",
- });
- },
- .u8,
- .u16,
- .u32,
- .u64,
- .i8,
- .i16,
- .i32,
- .i64,
- .int,
- => {
- // TODO: Kernels do not support OpTypeInt that is signed. We can probably
- // can get rid of the signedness all together, in Shaders also.
- const bits = ty.intFloatBits();
- const signedness: spec.LiteralInteger = switch (ty.intSignedness()) {
- .unsigned => 0,
- .signed => 1,
- };
-
- try types.emit(self.gpa, .OpTypeInt, .{
- .id_result = result_id,
- .width = bits,
- .signedness = signedness,
- });
-
- const ui: []const u8 = switch (signedness) {
- 0 => "u",
- 1 => "i",
- else => unreachable,
- };
- const name = try std.fmt.allocPrint(self.gpa, "{s}{}", .{ ui, bits });
- defer self.gpa.free(name);
-
- try debug_names.emit(self.gpa, .OpName, .{
- .target = result_id,
- .name = name,
- });
- },
- .f16, .f32, .f64 => {
- const bits = ty.intFloatBits();
- try types.emit(self.gpa, .OpTypeFloat, .{
- .id_result = result_id,
- .width = bits,
- });
-
- const name = try std.fmt.allocPrint(self.gpa, "f{}", .{bits});
- defer self.gpa.free(name);
- try debug_names.emit(self.gpa, .OpName, .{
- .target = result_id,
- .name = name,
- });
- },
- .vector => try types.emit(self.gpa, .OpTypeVector, .{
- .id_result = result_id,
- .component_type = self.typeId(ty.childType()),
- .component_count = ty.payload(.vector).component_count,
- }),
- .matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
- .id_result = result_id,
- .column_type = self.typeId(ty.childType()),
- .column_count = ty.payload(.matrix).column_count,
- }),
- .image => {
- const info = ty.payload(.image);
- try types.emit(self.gpa, .OpTypeImage, .{
- .id_result = result_id,
- .sampled_type = self.typeId(ty.childType()),
- .dim = info.dim,
- .depth = @enumToInt(info.depth),
- .arrayed = @boolToInt(info.arrayed),
- .ms = @boolToInt(info.multisampled),
- .sampled = @enumToInt(info.sampled),
- .image_format = info.format,
- .access_qualifier = info.access_qualifier,
- });
- },
- .sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
- .sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
- .id_result = result_id,
- .image_type = self.typeId(ty.childType()),
- }),
- .array => {
- const info = ty.payload(.array);
- assert(info.length != 0);
-
- const size_type = Type.initTag(.u32);
- const size_type_id = try self.resolveTypeId(size_type);
- const length_id = self.allocId();
- try self.emitConstant(size_type_id, length_id, .{ .uint32 = info.length });
-
- try types.emit(self.gpa, .OpTypeArray, .{
- .id_result = result_id,
- .element_type = self.typeId(ty.childType()),
- .length = length_id,
- });
- if (info.array_stride != 0) {
- try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
- }
- },
- .runtime_array => {
- const info = ty.payload(.runtime_array);
- try types.emit(self.gpa, .OpTypeRuntimeArray, .{
- .id_result = result_id,
- .element_type = self.typeId(ty.childType()),
- });
- if (info.array_stride != 0) {
- try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
- }
- },
- .@"struct" => {
- const info = ty.payload(.@"struct");
- try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
- types.writeOperand(IdResult, result_id);
- for (info.members) |member| {
- types.writeOperand(IdRef, self.typeId(member.ty));
- }
- try self.decorateStruct(ref_id, info);
+pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
+ const ty = self.cache.lookup(ty_ref).int_type;
+ const Value = Cache.Key.Int.Value;
+ return try self.resolveId(.{ .int = .{
+ .ty = ty_ref,
+ .value = switch (ty.signedness) {
+ .signed => Value{ .int64 = @intCast(i64, value) },
+ .unsigned => Value{ .uint64 = @intCast(u64, value) },
},
- .@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
- .id_result = result_id,
- .literal_string = ty.payload(.@"opaque").name,
- }),
- .pointer => {
- const info = ty.payload(.pointer);
- try types.emit(self.gpa, .OpTypePointer, .{
- .id_result = result_id,
- .storage_class = info.storage_class,
- .type = self.typeId(ty.childType()),
- });
- if (info.array_stride != 0) {
- try self.decorate(ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
- }
- if (info.alignment != 0) {
- try self.decorate(ref_id, .{ .Alignment = .{ .alignment = info.alignment } });
- }
- if (info.max_byte_offset) |max_byte_offset| {
- try self.decorate(ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
- }
- },
- .function => {
- const info = ty.payload(.function);
- try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
- types.writeOperand(IdResult, result_id);
- types.writeOperand(IdRef, self.typeId(info.return_type));
- for (info.parameters) |parameter_type| {
- types.writeOperand(IdRef, self.typeId(parameter_type));
- }
- },
- .event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
- .device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
- .reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
- .queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
- .pipe => try types.emit(self.gpa, .OpTypePipe, .{
- .id_result = result_id,
- .qualifier = ty.payload(.pipe).qualifier,
- }),
- .pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
- .named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
- }
-
- return result_id;
+ } });
}
-fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
- const debug_names = &self.sections.debug_names;
-
- if (info.name.len != 0) {
- try debug_names.emit(self.gpa, .OpName, .{
- .target = target,
- .name = info.name,
- });
- }
-
- // Decorations for the struct type itself.
- if (info.decorations.block)
- try self.decorate(target, .Block);
- if (info.decorations.buffer_block)
- try self.decorate(target, .BufferBlock);
- if (info.decorations.glsl_shared)
- try self.decorate(target, .GLSLShared);
- if (info.decorations.glsl_packed)
- try self.decorate(target, .GLSLPacked);
- if (info.decorations.c_packed)
- try self.decorate(target, .CPacked);
-
- // Decorations for the struct members.
- const extra = info.member_decoration_extra;
- var extra_i: u32 = 0;
- for (info.members, 0..) |member, i| {
- const d = member.decorations;
- const index = @intCast(Word, i);
-
- if (member.name.len != 0) {
- try debug_names.emit(self.gpa, .OpMemberName, .{
- .type = target,
- .member = index,
- .name = member.name,
- });
- }
-
- switch (member.offset) {
- .none => {},
- else => try self.decorateMember(
- target,
- index,
- .{ .Offset = .{ .byte_offset = @enumToInt(member.offset) } },
- ),
- }
-
- switch (d.matrix_layout) {
- .row_major => try self.decorateMember(target, index, .RowMajor),
- .col_major => try self.decorateMember(target, index, .ColMajor),
- .none => {},
- }
- if (d.matrix_layout != .none) {
- try self.decorateMember(target, index, .{
- .MatrixStride = .{ .matrix_stride = extra[extra_i] },
- });
- extra_i += 1;
- }
-
- if (d.no_perspective)
- try self.decorateMember(target, index, .NoPerspective);
- if (d.flat)
- try self.decorateMember(target, index, .Flat);
- if (d.patch)
- try self.decorateMember(target, index, .Patch);
- if (d.centroid)
- try self.decorateMember(target, index, .Centroid);
- if (d.sample)
- try self.decorateMember(target, index, .Sample);
- if (d.invariant)
- try self.decorateMember(target, index, .Invariant);
- if (d.@"volatile")
- try self.decorateMember(target, index, .Volatile);
- if (d.coherent)
- try self.decorateMember(target, index, .Coherent);
- if (d.non_writable)
- try self.decorateMember(target, index, .NonWritable);
- if (d.non_readable)
- try self.decorateMember(target, index, .NonReadable);
-
- if (d.builtin) {
- try self.decorateMember(target, index, .{
- .BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
- });
- extra_i += 1;
- }
- if (d.stream) {
- try self.decorateMember(target, index, .{
- .Stream = .{ .stream_number = extra[extra_i] },
- });
- extra_i += 1;
- }
- if (d.location) {
- try self.decorateMember(target, index, .{
- .Location = .{ .location = extra[extra_i] },
- });
- extra_i += 1;
- }
- if (d.component) {
- try self.decorateMember(target, index, .{
- .Component = .{ .component = extra[extra_i] },
- });
- extra_i += 1;
- }
- if (d.xfb_buffer) {
- try self.decorateMember(target, index, .{
- .XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
- });
- extra_i += 1;
- }
- if (d.xfb_stride) {
- try self.decorateMember(target, index, .{
- .XfbStride = .{ .xfb_stride = extra[extra_i] },
- });
- extra_i += 1;
- }
- if (d.user_semantic) {
- const len = extra[extra_i];
- extra_i += 1;
- const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
- try self.decorateMember(target, index, .{
- .UserSemantic = .{ .semantic = semantic },
- });
- extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
- }
- }
+pub fn constUndef(self: *Module, ty_ref: CacheRef) !IdRef {
+ return try self.resolveId(.{ .undef = .{ .ty = ty_ref } });
}
-pub fn simpleStructType(self: *Module, members: []const Type.Payload.Struct.Member) !Type.Ref {
- const payload = try self.arena.create(Type.Payload.Struct);
- payload.* = .{
- .members = try self.arena.dupe(Type.Payload.Struct.Member, members),
- .decorations = .{},
- };
- return try self.resolveType(Type.initPayload(&payload.base));
+pub fn constNull(self: *Module, ty_ref: CacheRef) !IdRef {
+ return try self.resolveId(.{ .null = .{ .ty = ty_ref } });
}
-pub fn arrayType(self: *Module, len: u32, ty: Type.Ref) !Type.Ref {
- const payload = try self.arena.create(Type.Payload.Array);
- payload.* = .{
- .element_type = ty,
- .length = len,
- };
- return try self.resolveType(Type.initPayload(&payload.base));
+pub fn constBool(self: *Module, ty_ref: CacheRef, value: bool) !IdRef {
+ return try self.resolveId(.{ .bool = .{ .ty = ty_ref, .value = value } });
}
-pub fn ptrType(
- self: *Module,
- child: Type.Ref,
- storage_class: spec.StorageClass,
- alignment: u32,
-) !Type.Ref {
- const ptr_payload = try self.arena.create(Type.Payload.Pointer);
- ptr_payload.* = .{
- .storage_class = storage_class,
- .child_type = child,
- .alignment = alignment,
- };
- return try self.resolveType(Type.initPayload(&ptr_payload.base));
-}
-
-pub fn changePtrStorageClass(self: *Module, ptr_ty_ref: Type.Ref, new_storage_class: spec.StorageClass) !Type.Ref {
- const payload = try self.arena.create(Type.Payload.Pointer);
- payload.* = self.typeRefType(ptr_ty_ref).payload(.pointer).*;
- payload.storage_class = new_storage_class;
- return try self.resolveType(Type.initPayload(&payload.base));
-}
-
-pub fn emitConstant(
- self: *Module,
- ty_id: IdRef,
- result_id: IdRef,
- value: spec.LiteralContextDependentNumber,
-) !void {
- try self.sections.types_globals_constants.emit(self.gpa, .OpConstant, .{
- .id_result_type = ty_id,
+pub fn constComposite(self: *Module, ty_ref: CacheRef, members: []const IdRef) !IdRef {
+ const result_id = self.allocId();
+ try self.sections.types_globals_constants.emit(self.gpa, .OpSpecConstantComposite, .{
+ .id_result_type = self.resultId(ty_ref),
.id_result = result_id,
- .value = value,
+ .constituents = members,
});
+ return result_id;
}
/// Decorate a result-id.
@@ -873,3 +565,22 @@ pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8
.name = try self.arena.dupe(u8, name),
});
}
+
+pub fn debugName(self: *Module, target: IdResult, comptime fmt: []const u8, args: anytype) !void {
+ const name = try std.fmt.allocPrint(self.gpa, fmt, args);
+ defer self.gpa.free(name);
+ try self.sections.debug_names.emit(self.gpa, .OpName, .{
+ .target = target,
+ .name = name,
+ });
+}
+
+pub fn memberDebugName(self: *Module, target: IdResult, member: u32, comptime fmt: []const u8, args: anytype) !void {
+ const name = try std.fmt.allocPrint(self.gpa, fmt, args);
+ defer self.gpa.free(name);
+ try self.sections.debug_names.emit(self.gpa, .OpMemberName, .{
+ .type = target,
+ .member = member,
+ .name = name,
+ });
+}
diff --git a/src/codegen/spirv/spec.zig b/src/codegen/spirv/spec.zig
index 60d16461cb..f73487f41f 100644
--- a/src/codegen/spirv/spec.zig
+++ b/src/codegen/spirv/spec.zig
@@ -1,6 +1,6 @@
//! This file is auto-generated by tools/gen_spirv_spec.zig.
-const Version = @import("std").builtin.Version;
+const Version = @import("std").SemanticVersion;
pub const Word = u32;
pub const IdResult = struct {
diff --git a/src/codegen/spirv/type.zig b/src/codegen/spirv/type.zig
deleted file mode 100644
index 2e1661c14e..0000000000
--- a/src/codegen/spirv/type.zig
+++ /dev/null
@@ -1,567 +0,0 @@
-//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
-//! which are not representable by Zig directly.
-
-const std = @import("std");
-const assert = std.debug.assert;
-const Signedness = std.builtin.Signedness;
-const Allocator = std.mem.Allocator;
-
-const spec = @import("spec.zig");
-
-pub const Type = extern union {
- tag_if_small_enough: Tag,
- ptr_otherwise: *Payload,
-
- /// A reference to another SPIR-V type.
- pub const Ref = enum(u32) { _ };
-
- pub fn initTag(comptime small_tag: Tag) Type {
- comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
- return .{ .tag_if_small_enough = small_tag };
- }
-
- pub fn initPayload(pl: *Payload) Type {
- assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
- return .{ .ptr_otherwise = pl };
- }
-
- pub fn int(arena: Allocator, signedness: Signedness, bits: u16) !Type {
- const bits_and_signedness = switch (signedness) {
- .signed => -@as(i32, bits),
- .unsigned => @as(i32, bits),
- };
-
- return switch (bits_and_signedness) {
- 8 => initTag(.u8),
- 16 => initTag(.u16),
- 32 => initTag(.u32),
- 64 => initTag(.u64),
- -8 => initTag(.i8),
- -16 => initTag(.i16),
- -32 => initTag(.i32),
- -64 => initTag(.i64),
- else => {
- const int_payload = try arena.create(Payload.Int);
- int_payload.* = .{
- .width = bits,
- .signedness = signedness,
- };
- return initPayload(&int_payload.base);
- },
- };
- }
-
- pub fn float(bits: u16) Type {
- return switch (bits) {
- 16 => initTag(.f16),
- 32 => initTag(.f32),
- 64 => initTag(.f64),
- else => unreachable, // Enable more types if required.
- };
- }
-
- pub fn tag(self: Type) Tag {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
- return self.tag_if_small_enough;
- } else {
- return self.ptr_otherwise.tag;
- }
- }
-
- pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
- return null;
-
- if (self.ptr_otherwise.tag == t)
- return self.payload(t);
-
- return null;
- }
-
- /// Access the payload of a type directly.
- pub fn payload(self: Type, comptime t: Tag) *t.Type() {
- assert(self.tag() == t);
- return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
- }
-
- /// Perform a shallow equality test, comparing two types while assuming that any child types
- /// are equal only if their references are equal.
- pub fn eqlShallow(a: Type, b: Type) bool {
- if (a.tag_if_small_enough == b.tag_if_small_enough)
- return true;
-
- const tag_a = a.tag();
- const tag_b = b.tag();
- if (tag_a != tag_b)
- return false;
-
- inline for (@typeInfo(Tag).Enum.fields) |field| {
- const t = @field(Tag, field.name);
- if (t == tag_a) {
- return eqlPayloads(t, a, b);
- }
- }
-
- unreachable;
- }
-
- /// Compare the payload of two compatible tags, given that we already know the tag of both types.
- fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
- switch (t) {
- .void,
- .bool,
- .sampler,
- .event,
- .device_event,
- .reserve_id,
- .queue,
- .pipe_storage,
- .named_barrier,
- .u8,
- .u16,
- .u32,
- .u64,
- .i8,
- .i16,
- .i32,
- .i64,
- .f16,
- .f32,
- .f64,
- => return true,
- .int,
- .vector,
- .matrix,
- .sampled_image,
- .array,
- .runtime_array,
- .@"opaque",
- .pointer,
- .pipe,
- .image,
- => return std.meta.eql(a.payload(t).*, b.payload(t).*),
- .@"struct" => {
- const struct_a = a.payload(.@"struct");
- const struct_b = b.payload(.@"struct");
- if (struct_a.members.len != struct_b.members.len)
- return false;
- for (struct_a.members, 0..) |mem_a, i| {
- if (!std.meta.eql(mem_a, struct_b.members[i]))
- return false;
- }
- return true;
- },
- .function => {
- const fn_a = a.payload(.function);
- const fn_b = b.payload(.function);
- if (fn_a.return_type != fn_b.return_type)
- return false;
- return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
- },
- }
- }
-
- /// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
- pub fn hashShallow(self: Type) u64 {
- var hasher = std.hash.Wyhash.init(0);
- const t = self.tag();
- std.hash.autoHash(&hasher, t);
-
- inline for (@typeInfo(Tag).Enum.fields) |field| {
- if (@field(Tag, field.name) == t) {
- switch (@field(Tag, field.name)) {
- .void,
- .bool,
- .sampler,
- .event,
- .device_event,
- .reserve_id,
- .queue,
- .pipe_storage,
- .named_barrier,
- .u8,
- .u16,
- .u32,
- .u64,
- .i8,
- .i16,
- .i32,
- .i64,
- .f16,
- .f32,
- .f64,
- => {},
- else => self.hashPayload(@field(Tag, field.name), &hasher),
- }
- }
- }
-
- return hasher.final();
- }
-
- /// Perform a shallow hash, given that we know the tag of the field ahead of time.
- fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
- const fields = @typeInfo(t.Type()).Struct.fields;
- const pl = self.payload(t);
- comptime assert(std.mem.eql(u8, fields[0].name, "base"));
- inline for (fields[1..]) |field| { // Skip the 'base' field.
- std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
- }
- }
-
- /// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
- pub const ShallowHashContext32 = struct {
- pub fn hash(self: @This(), t: Type) u32 {
- _ = self;
- return @truncate(u32, t.hashShallow());
- }
- pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool {
- _ = self;
- _ = b_index;
- return a.eqlShallow(b);
- }
- };
-
- /// Return the reference to any child type. Asserts the type is one of:
- /// - Vectors
- /// - Matrices
- /// - Images
- /// - SampledImages,
- /// - Arrays
- /// - RuntimeArrays
- /// - Pointers
- pub fn childType(self: Type) Ref {
- return switch (self.tag()) {
- .vector => self.payload(.vector).component_type,
- .matrix => self.payload(.matrix).column_type,
- .image => self.payload(.image).sampled_type,
- .sampled_image => self.payload(.sampled_image).image_type,
- .array => self.payload(.array).element_type,
- .runtime_array => self.payload(.runtime_array).element_type,
- .pointer => self.payload(.pointer).child_type,
- else => unreachable,
- };
- }
-
- pub fn isInt(self: Type) bool {
- return switch (self.tag()) {
- .u8,
- .u16,
- .u32,
- .u64,
- .i8,
- .i16,
- .i32,
- .i64,
- .int,
- => true,
- else => false,
- };
- }
-
- pub fn isFloat(self: Type) bool {
- return switch (self.tag()) {
- .f16, .f32, .f64 => true,
- else => false,
- };
- }
-
- /// Returns the number of bits that make up an int or float type.
- /// Asserts type is either int or float.
- pub fn intFloatBits(self: Type) u16 {
- return switch (self.tag()) {
- .u8, .i8 => 8,
- .u16, .i16, .f16 => 16,
- .u32, .i32, .f32 => 32,
- .u64, .i64, .f64 => 64,
- .int => self.payload(.int).width,
- else => unreachable,
- };
- }
-
- /// Returns the signedness of an integer type.
- /// Asserts that the type is an int.
- pub fn intSignedness(self: Type) Signedness {
- return switch (self.tag()) {
- .u8, .u16, .u32, .u64 => .unsigned,
- .i8, .i16, .i32, .i64 => .signed,
- .int => self.payload(.int).signedness,
- else => unreachable,
- };
- }
-
- pub const Tag = enum(usize) {
- void,
- bool,
- sampler,
- event,
- device_event,
- reserve_id,
- queue,
- pipe_storage,
- named_barrier,
- u8,
- u16,
- u32,
- u64,
- i8,
- i16,
- i32,
- i64,
- f16,
- f32,
- f64,
-
- // After this, the tag requires a payload.
- int,
- vector,
- matrix,
- image,
- sampled_image,
- array,
- runtime_array,
- @"struct",
- @"opaque",
- pointer,
- function,
- pipe,
-
- pub const last_no_payload_tag = Tag.f64;
- pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
-
- pub fn Type(comptime t: Tag) type {
- return switch (t) {
- .void,
- .bool,
- .sampler,
- .event,
- .device_event,
- .reserve_id,
- .queue,
- .pipe_storage,
- .named_barrier,
- .u8,
- .u16,
- .u32,
- .u64,
- .i8,
- .i16,
- .i32,
- .i64,
- .f16,
- .f32,
- .f64,
- => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
- .int => Payload.Int,
- .vector => Payload.Vector,
- .matrix => Payload.Matrix,
- .image => Payload.Image,
- .sampled_image => Payload.SampledImage,
- .array => Payload.Array,
- .runtime_array => Payload.RuntimeArray,
- .@"struct" => Payload.Struct,
- .@"opaque" => Payload.Opaque,
- .pointer => Payload.Pointer,
- .function => Payload.Function,
- .pipe => Payload.Pipe,
- };
- }
- };
-
- pub const Payload = struct {
- tag: Tag,
-
- pub const Int = struct {
- base: Payload = .{ .tag = .int },
- width: u16,
- signedness: Signedness,
- };
-
- pub const Vector = struct {
- base: Payload = .{ .tag = .vector },
- component_type: Ref,
- component_count: u32,
- };
-
- pub const Matrix = struct {
- base: Payload = .{ .tag = .matrix },
- column_type: Ref,
- column_count: u32,
- };
-
- pub const Image = struct {
- base: Payload = .{ .tag = .image },
- sampled_type: Ref,
- dim: spec.Dim,
- depth: enum(u2) {
- no = 0,
- yes = 1,
- maybe = 2,
- },
- arrayed: bool,
- multisampled: bool,
- sampled: enum(u2) {
- known_at_runtime = 0,
- with_sampler = 1,
- without_sampler = 2,
- },
- format: spec.ImageFormat,
- access_qualifier: ?spec.AccessQualifier,
- };
-
- pub const SampledImage = struct {
- base: Payload = .{ .tag = .sampled_image },
- image_type: Ref,
- };
-
- pub const Array = struct {
- base: Payload = .{ .tag = .array },
- element_type: Ref,
- /// Note: Must be emitted as constant, not as literal!
- length: u32,
- /// Type has the 'ArrayStride' decoration.
- /// If zero, no stride is present.
- array_stride: u32 = 0,
- };
-
- pub const RuntimeArray = struct {
- base: Payload = .{ .tag = .runtime_array },
- element_type: Ref,
- /// Type has the 'ArrayStride' decoration.
- /// If zero, no stride is present.
- array_stride: u32 = 0,
- };
-
- pub const Struct = struct {
- base: Payload = .{ .tag = .@"struct" },
- members: []Member,
- name: []const u8 = "",
- decorations: StructDecorations = .{},
-
- /// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
- /// order of the `members` slice and `MemberDecorations` struct.
- member_decoration_extra: []u32 = &.{},
-
- pub const Member = struct {
- ty: Ref,
- name: []const u8 = "",
- offset: MemberOffset = .none,
- decorations: MemberDecorations = .{},
- };
-
- pub const MemberOffset = enum(u32) { none = 0xFFFF_FFFF, _ };
-
- pub const StructDecorations = packed struct {
- /// Type has the 'Block' decoration.
- block: bool = false,
- /// Type has the 'BufferBlock' decoration.
- buffer_block: bool = false,
- /// Type has the 'GLSLShared' decoration.
- glsl_shared: bool = false,
- /// Type has the 'GLSLPacked' decoration.
- glsl_packed: bool = false,
- /// Type has the 'CPacked' decoration.
- c_packed: bool = false,
- };
-
- pub const MemberDecorations = packed struct {
- /// Matrix layout for (arrays of) matrices. If this field is not .none,
- /// then there is also an extra field containing the matrix stride corresponding
- /// to the 'MatrixStride' decoration.
- matrix_layout: enum(u2) {
- /// Member has the 'RowMajor' decoration. The member type
- /// must be a matrix or an array of matrices.
- row_major,
- /// Member has the 'ColMajor' decoration. The member type
- /// must be a matrix or an array of matrices.
- col_major,
- /// Member is not a matrix or array of matrices.
- none,
- } = .none,
-
- // Regular decorations, these do not imply extra fields.
-
- /// Member has the 'NoPerspective' decoration.
- no_perspective: bool = false,
- /// Member has the 'Flat' decoration.
- flat: bool = false,
- /// Member has the 'Patch' decoration.
- patch: bool = false,
- /// Member has the 'Centroid' decoration.
- centroid: bool = false,
- /// Member has the 'Sample' decoration.
- sample: bool = false,
- /// Member has the 'Invariant' decoration.
- /// Note: requires parent struct to have 'Block'.
- invariant: bool = false,
- /// Member has the 'Volatile' decoration.
- @"volatile": bool = false,
- /// Member has the 'Coherent' decoration.
- coherent: bool = false,
- /// Member has the 'NonWritable' decoration.
- non_writable: bool = false,
- /// Member has the 'NonReadable' decoration.
- non_readable: bool = false,
-
- // The following decorations all imply extra field(s).
-
- /// Member has the 'BuiltIn' decoration.
- /// This decoration has an extra field of type `spec.BuiltIn`.
- /// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
- /// Note: Each builtin may only be reachable once for a particular entry point.
- /// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
- builtin: bool = false,
- /// Member has the 'Stream' decoration.
- /// This member has an extra field of type `u32`.
- stream: bool = false,
- /// Member has the 'Location' decoration.
- /// This member has an extra field of type `u32`.
- location: bool = false,
- /// Member has the 'Component' decoration.
- /// This member has an extra field of type `u32`.
- component: bool = false,
- /// Member has the 'XfbBuffer' decoration.
- /// This member has an extra field of type `u32`.
- xfb_buffer: bool = false,
- /// Member has the 'XfbStride' decoration.
- /// This member has an extra field of type `u32`.
- xfb_stride: bool = false,
- /// Member has the 'UserSemantic' decoration.
- /// This member has an extra field of type `[]u8`, which is encoded
- /// by an `u32` containing the number of chars exactly, and then the string padded to
- /// a multiple of 4 bytes with zeroes.
- user_semantic: bool = false,
- };
- };
-
- pub const Opaque = struct {
- base: Payload = .{ .tag = .@"opaque" },
- name: []u8,
- };
-
- pub const Pointer = struct {
- base: Payload = .{ .tag = .pointer },
- storage_class: spec.StorageClass,
- child_type: Ref,
- /// Type has the 'ArrayStride' decoration.
- /// This is valid for pointers to elements of an array.
- /// If zero, no stride is present.
- array_stride: u32 = 0,
- /// If nonzero, type has the 'Alignment' decoration.
- alignment: u32 = 0,
- /// Type has the 'MaxByteOffset' decoration.
- max_byte_offset: ?u32 = null,
- };
-
- pub const Function = struct {
- base: Payload = .{ .tag = .function },
- return_type: Ref,
- parameters: []Ref,
- };
-
- pub const Pipe = struct {
- base: Payload = .{ .tag = .pipe },
- qualifier: spec.AccessQualifier,
- };
- };
-};