aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/llvm.zig366
1 files changed, 183 insertions, 183 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index e76b2941c2..cf0188b060 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -745,7 +745,7 @@ pub const Object = struct {
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const param_llvm_ty = try dg.llvmType(param_ty);
+ const param_llvm_ty = try dg.lowerType(param_ty);
const abi_size = @intCast(c_uint, param_ty.abiSize(target));
const int_llvm_ty = dg.context.intType(abi_size * 8);
const int_ptr_llvm_ty = int_llvm_ty.pointerType(0);
@@ -775,7 +775,7 @@ pub const Object = struct {
.Struct => {
const fields = param_ty.structFields().values();
if (is_by_ref) {
- const param_llvm_ty = try dg.llvmType(param_ty);
+ const param_llvm_ty = try dg.lowerType(param_ty);
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
arg_ptr.setAlignment(param_ty.abiAlignment(target));
@@ -2100,7 +2100,7 @@ pub const DeclGen = struct {
break :init_val decl.val;
};
if (init_val.tag() != .unreachable_value) {
- const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val });
+ const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val });
if (global.globalGetValueType() == llvm_init.typeOf()) {
global.setInitializer(llvm_init);
} else {
@@ -2171,7 +2171,7 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
- const fn_type = try dg.llvmType(zig_fn_type);
+ const fn_type = try dg.lowerType(zig_fn_type);
const fqn = try decl.getFullyQualifiedName(dg.module);
defer dg.gpa.free(fqn);
@@ -2198,7 +2198,7 @@ pub const DeclGen = struct {
dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0
dg.addArgAttr(llvm_fn, 0, "noalias");
- const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type);
+ const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type);
llvm_fn.addSretAttr(0, raw_llvm_ret_ty);
}
@@ -2291,7 +2291,7 @@ pub const DeclGen = struct {
const fqn = try decl.getFullyQualifiedName(dg.module);
defer dg.gpa.free(fqn);
- const llvm_type = try dg.llvmType(decl.ty);
+ const llvm_type = try dg.lowerType(decl.ty);
const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace");
const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace);
gop.value_ptr.* = llvm_global;
@@ -2345,15 +2345,15 @@ pub const DeclGen = struct {
}
fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool {
- // Once `llvmType` succeeds, successive calls to it with the same Zig type
- // are guaranteed to succeed. So if a call to `llvmType` fails here it means
+ // Once `lowerType` succeeds, successive calls to it with the same Zig type
+ // are guaranteed to succeed. So if a call to `lowerType` fails here it means
// it is the first time lowering the type, which means the value can't possible
// have that type.
- const llvm_ty = dg.llvmType(ty) catch return true;
+ const llvm_ty = dg.lowerType(ty) catch return true;
return val.typeOf() != llvm_ty;
}
- fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
+ fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
const gpa = dg.gpa;
const target = dg.module.getTarget();
switch (t.zigTypeTag()) {
@@ -2385,8 +2385,8 @@ pub const DeclGen = struct {
const ptr_type = t.slicePtrFieldType(&buf);
const fields: [2]*const llvm.Type = .{
- try dg.llvmType(ptr_type),
- try dg.llvmType(Type.usize),
+ try dg.lowerType(ptr_type),
+ try dg.lowerType(Type.usize),
};
return dg.context.structType(&fields, fields.len, .False);
}
@@ -2402,7 +2402,7 @@ pub const DeclGen = struct {
else => elem_ty.hasRuntimeBitsIgnoreComptime(),
};
const llvm_elem_ty = if (lower_elem_ty)
- try dg.llvmType(elem_ty)
+ try dg.lowerType(elem_ty)
else
dg.context.intType(8);
return llvm_elem_ty.pointerType(llvm_addrspace);
@@ -2430,12 +2430,12 @@ pub const DeclGen = struct {
.Array => {
const elem_ty = t.childType();
assert(elem_ty.onePossibleValue() == null);
- const elem_llvm_ty = try dg.llvmType(elem_ty);
+ const elem_llvm_ty = try dg.lowerType(elem_ty);
const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
},
.Vector => {
- const elem_type = try dg.llvmType(t.childType());
+ const elem_type = try dg.lowerType(t.childType());
return elem_type.vectorType(t.vectorLen());
},
.Optional => {
@@ -2444,7 +2444,7 @@ pub const DeclGen = struct {
if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
return dg.context.intType(1);
}
- const payload_llvm_ty = try dg.llvmType(child_ty);
+ const payload_llvm_ty = try dg.lowerType(child_ty);
if (t.optionalReprIsPayload()) {
return payload_llvm_ty;
}
@@ -2458,13 +2458,13 @@ pub const DeclGen = struct {
const error_type = t.errorUnionSet();
const payload_type = t.errorUnionPayload();
if (error_type.errorSetCardinality() == .zero) {
- return dg.llvmType(payload_type);
+ return dg.lowerType(payload_type);
}
if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
- return try dg.llvmType(Type.anyerror);
+ return try dg.lowerType(Type.anyerror);
}
- const llvm_error_type = try dg.llvmType(error_type);
- const llvm_payload_type = try dg.llvmType(payload_type);
+ const llvm_error_type = try dg.lowerType(error_type);
+ const llvm_payload_type = try dg.lowerType(payload_type);
const payload_align = payload_type.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
@@ -2515,7 +2515,7 @@ pub const DeclGen = struct {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
}
- const field_llvm_ty = try dg.llvmType(field_ty);
+ const field_llvm_ty = try dg.lowerType(field_ty);
try llvm_field_types.append(gpa, field_llvm_ty);
offset += field_ty.abiSize(target);
@@ -2544,7 +2544,7 @@ pub const DeclGen = struct {
if (struct_obj.layout == .Packed) {
var buf: Type.Payload.Bits = undefined;
const int_ty = struct_obj.packedIntegerType(target, &buf);
- const int_llvm_ty = try dg.llvmType(int_ty);
+ const int_llvm_ty = try dg.lowerType(int_ty);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
}
@@ -2579,7 +2579,7 @@ pub const DeclGen = struct {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
}
- const field_llvm_ty = try dg.llvmType(field.ty);
+ const field_llvm_ty = try dg.lowerType(field.ty);
try llvm_field_types.append(gpa, field_llvm_ty);
offset += field.ty.abiSize(target);
@@ -2614,7 +2614,7 @@ pub const DeclGen = struct {
const union_obj = t.cast(Type.Payload.Union).?.data;
if (layout.payload_size == 0) {
- const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty);
+ const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty);
gop.value_ptr.* = enum_tag_llvm_ty;
return enum_tag_llvm_ty;
}
@@ -2626,7 +2626,7 @@ pub const DeclGen = struct {
gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls
const aligned_field = union_obj.fields.values()[layout.most_aligned_field];
- const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty);
+ const llvm_aligned_field_ty = try dg.lowerType(aligned_field.ty);
const llvm_payload_ty = t: {
if (layout.most_aligned_field_size == layout.payload_size) {
@@ -2645,7 +2645,7 @@ pub const DeclGen = struct {
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
}
- const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty);
+ const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty);
// Put the tag before or after the payload depending on which one's
// alignment is greater.
@@ -2667,7 +2667,7 @@ pub const DeclGen = struct {
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False);
return llvm_union_ty;
},
- .Fn => return llvmTypeFn(dg, t),
+ .Fn => return lowerTypeFn(dg, t),
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
.Type => unreachable,
@@ -2682,7 +2682,7 @@ pub const DeclGen = struct {
}
}
- fn llvmTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type {
+ fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type {
const target = dg.module.getTarget();
const fn_info = fn_ty.fnInfo();
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
@@ -2691,7 +2691,7 @@ pub const DeclGen = struct {
defer llvm_params.deinit();
if (firstParamSRet(fn_info, target)) {
- const llvm_sret_ty = try dg.llvmType(fn_info.return_type);
+ const llvm_sret_ty = try dg.lowerType(fn_info.return_type);
try llvm_params.append(llvm_sret_ty.pointerType(0));
}
@@ -2703,7 +2703,7 @@ pub const DeclGen = struct {
.data = dg.object.getStackTraceType(),
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
- try llvm_params.append(try dg.llvmType(ptr_ty));
+ try llvm_params.append(try dg.lowerType(ptr_ty));
}
var it = iterateParamTypes(dg, fn_info);
@@ -2711,11 +2711,11 @@ pub const DeclGen = struct {
.no_bits => continue,
.byval => {
const param_ty = fn_info.param_types[it.zig_index - 1];
- try llvm_params.append(try dg.llvmType(param_ty));
+ try llvm_params.append(try dg.lowerType(param_ty));
},
.byref => {
const param_ty = fn_info.param_types[it.zig_index - 1];
- const raw_llvm_ty = try dg.llvmType(param_ty);
+ const raw_llvm_ty = try dg.lowerType(param_ty);
try llvm_params.append(raw_llvm_ty.pointerType(0));
},
.abi_sized_int => {
@@ -2757,7 +2757,7 @@ pub const DeclGen = struct {
// one field; in this case keep the type information
// to avoid the potentially costly ptrtoint/bitcast.
if (bits_used == 0 and field_abi_bits == int_bits) {
- const llvm_field_ty = try dg.llvmType(field.ty);
+ const llvm_field_ty = try dg.lowerType(field.ty);
llvm_params.appendAssumeCapacity(llvm_field_ty);
field_i += 1;
if (field_i >= fields.len) {
@@ -2795,16 +2795,16 @@ pub const DeclGen = struct {
);
}
- fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value {
+ fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value {
if (tv.val.isUndef()) {
- const llvm_type = try dg.llvmType(tv.ty);
+ const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
}
const target = dg.module.getTarget();
switch (tv.ty.zigTypeTag()) {
.Bool => {
- const llvm_type = try dg.llvmType(tv.ty);
+ const llvm_type = try dg.lowerType(tv.ty);
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
},
// TODO this duplicates code with Pointer but they should share the handling
@@ -2865,7 +2865,7 @@ pub const DeclGen = struct {
return unsigned_val;
},
.Float => {
- const llvm_ty = try dg.llvmType(tv.ty);
+ const llvm_ty = try dg.lowerType(tv.ty);
switch (tv.ty.floatBits(target)) {
16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
80 => {
@@ -2902,7 +2902,7 @@ pub const DeclGen = struct {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
const val = try dg.resolveGlobalDecl(decl_index);
- const llvm_var_type = try dg.llvmType(tv.ty);
+ const llvm_var_type = try dg.lowerType(tv.ty);
const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace");
const llvm_type = llvm_var_type.pointerType(llvm_addrspace);
return val.constBitCast(llvm_type);
@@ -2911,11 +2911,11 @@ pub const DeclGen = struct {
const slice = tv.val.castTag(.slice).?.data;
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const fields: [2]*const llvm.Value = .{
- try dg.genTypedValue(.{
+ try dg.lowerValue(.{
.ty = tv.ty.slicePtrFieldType(&buf),
.val = slice.ptr,
}),
- try dg.genTypedValue(.{
+ try dg.lowerValue(.{
.ty = Type.usize,
.val = slice.len,
}),
@@ -2923,15 +2923,15 @@ pub const DeclGen = struct {
return dg.context.constStruct(&fields, fields.len, .False);
},
.int_u64, .one, .int_big_positive => {
- const llvm_usize = try dg.llvmType(Type.usize);
+ const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False);
- return llvm_int.constIntToPtr(try dg.llvmType(tv.ty));
+ return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
},
.field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
return dg.lowerParentPtr(tv.val, tv.ty.childType());
},
.null_value, .zero => {
- const llvm_type = try dg.llvmType(tv.ty);
+ const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.constNull();
},
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
@@ -2986,7 +2986,7 @@ pub const DeclGen = struct {
defer gpa.free(llvm_elems);
var need_unnamed = false;
for (elem_vals[0..len]) |elem_val, i| {
- llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
+ llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
}
if (need_unnamed) {
@@ -2996,7 +2996,7 @@ pub const DeclGen = struct {
.True,
);
} else {
- const llvm_elem_ty = try dg.llvmType(elem_ty);
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
@@ -3016,13 +3016,13 @@ pub const DeclGen = struct {
var need_unnamed = false;
if (len != 0) {
for (llvm_elems[0..len]) |*elem| {
- elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
+ elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
}
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
}
if (sentinel) |sent| {
- llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent });
+ llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
}
@@ -3033,7 +3033,7 @@ pub const DeclGen = struct {
.True,
);
} else {
- const llvm_elem_ty = try dg.llvmType(elem_ty);
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
@@ -3043,13 +3043,13 @@ pub const DeclGen = struct {
.empty_array_sentinel => {
const elem_ty = tv.ty.elemType();
const sent_val = tv.ty.sentinel().?;
- const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val });
+ const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
const llvm_elems: [1]*const llvm.Value = .{sentinel};
const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
if (need_unnamed) {
return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
} else {
- const llvm_elem_ty = try dg.llvmType(elem_ty);
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
}
},
@@ -3066,17 +3066,17 @@ pub const DeclGen = struct {
}
if (tv.ty.optionalReprIsPayload()) {
if (tv.val.castTag(.opt_payload)) |payload| {
- return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data });
+ return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data });
} else if (is_pl) {
- return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val });
+ return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val });
} else {
- const llvm_ty = try dg.llvmType(tv.ty);
+ const llvm_ty = try dg.lowerType(tv.ty);
return llvm_ty.constNull();
}
}
assert(payload_ty.zigTypeTag() != .Fn);
const fields: [2]*const llvm.Value = .{
- try dg.genTypedValue(.{
+ try dg.lowerValue(.{
.ty = payload_ty,
.val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
}),
@@ -3095,7 +3095,7 @@ pub const DeclGen = struct {
return dg.resolveLlvmFunction(fn_decl_index);
},
.ErrorSet => {
- const llvm_ty = try dg.llvmType(tv.ty);
+ const llvm_ty = try dg.lowerType(tv.ty);
switch (tv.val.tag()) {
.@"error" => {
const err_name = tv.val.castTag(.@"error").?.data.name;
@@ -3113,23 +3113,23 @@ pub const DeclGen = struct {
const payload_type = tv.ty.errorUnionPayload();
if (error_type.errorSetCardinality() == .zero) {
const payload_val = tv.val.castTag(.eu_payload).?.data;
- return dg.genTypedValue(.{ .ty = payload_type, .val = payload_val });
+ return dg.lowerValue(.{ .ty = payload_type, .val = payload_val });
}
const is_pl = tv.val.errorUnionIsPayload();
if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
// We use the error type directly as the type.
const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
- return dg.genTypedValue(.{ .ty = error_type, .val = err_val });
+ return dg.lowerValue(.{ .ty = error_type, .val = err_val });
}
const payload_align = payload_type.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
- const llvm_error_value = try dg.genTypedValue(.{
+ const llvm_error_value = try dg.lowerValue(.{
.ty = error_type,
.val = if (is_pl) Value.initTag(.zero) else tv.val,
});
- const llvm_payload_value = try dg.genTypedValue(.{
+ const llvm_payload_value = try dg.lowerValue(.{
.ty = payload_type,
.val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef),
});
@@ -3142,7 +3142,7 @@ pub const DeclGen = struct {
}
},
.Struct => {
- const llvm_struct_ty = try dg.llvmType(tv.ty);
+ const llvm_struct_ty = try dg.lowerType(tv.ty);
const field_vals = tv.val.castTag(.aggregate).?.data;
const gpa = dg.gpa;
@@ -3175,7 +3175,7 @@ pub const DeclGen = struct {
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
- const field_llvm_val = try dg.genTypedValue(.{
+ const field_llvm_val = try dg.lowerValue(.{
.ty = field_ty,
.val = field_vals[i],
});
@@ -3223,7 +3223,7 @@ pub const DeclGen = struct {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
- const non_int_val = try dg.genTypedValue(.{
+ const non_int_val = try dg.lowerValue(.{
.ty = field.ty,
.val = field_val,
});
@@ -3267,7 +3267,7 @@ pub const DeclGen = struct {
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
- const field_llvm_val = try dg.genTypedValue(.{
+ const field_llvm_val = try dg.lowerValue(.{
.ty = field.ty,
.val = field_vals[i],
});
@@ -3302,13 +3302,13 @@ pub const DeclGen = struct {
}
},
.Union => {
- const llvm_union_ty = try dg.llvmType(tv.ty);
+ const llvm_union_ty = try dg.lowerType(tv.ty);
const tag_and_val = tv.val.castTag(.@"union").?.data;
const layout = tv.ty.unionGetLayout(target);
if (layout.payload_size == 0) {
- return genTypedValue(dg, .{
+ return lowerValue(dg, .{
.ty = tv.ty.unionTagType().?,
.val = tag_and_val.tag,
});
@@ -3322,7 +3322,7 @@ pub const DeclGen = struct {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
}
- const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
+ const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
const field_size = field_ty.abiSize(target);
if (field_size == layout.payload_size) {
break :p field;
@@ -3348,7 +3348,7 @@ pub const DeclGen = struct {
return llvm_union_ty.constNamedStruct(&fields, fields.len);
}
}
- const llvm_tag_value = try genTypedValue(dg, .{
+ const llvm_tag_value = try lowerValue(dg, .{
.ty = tv.ty.unionTagType().?,
.val = tag_and_val.tag,
});
@@ -3385,7 +3385,7 @@ pub const DeclGen = struct {
.data = bytes[i],
};
- elem.* = try dg.genTypedValue(.{
+ elem.* = try dg.lowerValue(.{
.ty = elem_ty,
.val = Value.initPayload(&byte_payload.base),
});
@@ -3405,7 +3405,7 @@ pub const DeclGen = struct {
const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem, i| {
- elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
+ elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
}
return llvm.constVector(
llvm_elems.ptr,
@@ -3420,7 +3420,7 @@ pub const DeclGen = struct {
const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem| {
- elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
+ elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
}
return llvm.constVector(
llvm_elems.ptr,
@@ -3470,7 +3470,7 @@ pub const DeclGen = struct {
if (ptr_child_ty.eql(decl.ty, dg.module)) {
return llvm_ptr;
} else {
- return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0));
+ return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0));
}
}
@@ -3492,15 +3492,15 @@ pub const DeclGen = struct {
},
.int_i64 => {
const int = ptr_val.castTag(.int_i64).?.data;
- const llvm_usize = try dg.llvmType(Type.usize);
+ const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False);
- return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0));
+ return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0));
},
.int_u64 => {
const int = ptr_val.castTag(.int_u64).?.data;
- const llvm_usize = try dg.llvmType(Type.usize);
+ const llvm_usize = try dg.lowerType(Type.usize);
const llvm_int = llvm_usize.constInt(int, .False);
- return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0));
+ return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0));
},
.field_ptr => blk: {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
@@ -3549,7 +3549,7 @@ pub const DeclGen = struct {
const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module);
- const llvm_usize = try dg.llvmType(Type.usize);
+ const llvm_usize = try dg.lowerType(Type.usize);
const indices: [1]*const llvm.Value = .{
llvm_usize.constInt(elem_ptr.index, .False),
};
@@ -3602,7 +3602,7 @@ pub const DeclGen = struct {
else => unreachable,
};
if (bitcast_needed) {
- return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0));
+ return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0));
} else {
return llvm_ptr;
}
@@ -3621,11 +3621,11 @@ pub const DeclGen = struct {
.data = tv.val.sliceLen(self.module),
};
const fields: [2]*const llvm.Value = .{
- try self.genTypedValue(.{
+ try self.lowerValue(.{
.ty = ptr_ty,
.val = tv.val,
}),
- try self.genTypedValue(.{
+ try self.lowerValue(.{
.ty = Type.usize,
.val = Value.initPayload(&slice_len.base),
}),
@@ -3657,7 +3657,7 @@ pub const DeclGen = struct {
else
try self.resolveGlobalDecl(decl_index);
- const llvm_type = try self.llvmType(tv.ty);
+ const llvm_type = try self.lowerType(tv.ty);
if (tv.ty.zigTypeTag() == .Int) {
return llvm_val.constPtrToInt(llvm_type);
} else {
@@ -3672,8 +3672,8 @@ pub const DeclGen = struct {
// The value cannot be undefined, because we use the `nonnull` annotation
// for non-optional pointers. We also need to respect the alignment, even though
// the address will never be dereferenced.
- const llvm_usize = try dg.llvmType(Type.usize);
- const llvm_ptr_ty = try dg.llvmType(ptr_ty);
+ const llvm_usize = try dg.lowerType(Type.usize);
+ const llvm_ptr_ty = try dg.lowerType(ptr_ty);
if (alignment != 0) {
return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty);
}
@@ -3852,7 +3852,7 @@ pub const FuncGen = struct {
const val = self.air.value(inst).?;
const ty = self.air.typeOf(inst);
- const llvm_val = try self.dg.genTypedValue(.{ .ty = ty, .val = val });
+ const llvm_val = try self.dg.lowerValue(.{ .ty = ty, .val = val });
if (!isByRef(ty)) {
gop.value_ptr.* = llvm_val;
return llvm_val;
@@ -3870,7 +3870,7 @@ pub const FuncGen = struct {
// Because of LLVM limitations for lowering certain types such as unions,
// the type of global constants might not match the type it is supposed to
// be, and so we must bitcast the pointer at the usage sites.
- const wanted_llvm_ty = try self.dg.llvmType(ty);
+ const wanted_llvm_ty = try self.dg.lowerType(ty);
const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(0);
const casted_ptr = global.constBitCast(wanted_llvm_ptr_ty);
gop.value_ptr.* = casted_ptr;
@@ -4094,7 +4094,7 @@ pub const FuncGen = struct {
defer llvm_args.deinit();
const ret_ptr = if (!sret) null else blk: {
- const llvm_ret_ty = try self.dg.llvmType(return_type);
+ const llvm_ret_ty = try self.dg.lowerType(return_type);
const ret_ptr = self.buildAlloca(llvm_ret_ty);
ret_ptr.setAlignment(return_type.abiAlignment(target));
try llvm_args.append(ret_ptr);
@@ -4126,7 +4126,7 @@ pub const FuncGen = struct {
// which is always lowered to an LLVM type of `*i8`.
// 2. The argument is a global which does act as a pointer, however
// a bitcast is needed in order for the LLVM types to match.
- const llvm_param_ty = try self.dg.llvmType(param_ty);
+ const llvm_param_ty = try self.dg.lowerType(param_ty);
const casted_ptr = self.builder.buildBitCast(llvm_arg, llvm_param_ty, "");
try llvm_args.append(casted_ptr);
} else {
@@ -4173,7 +4173,7 @@ pub const FuncGen = struct {
);
const int_ptr = self.buildAlloca(int_llvm_ty);
int_ptr.setAlignment(alignment);
- const param_llvm_ty = try self.dg.llvmType(param_ty);
+ const param_llvm_ty = try self.dg.lowerType(param_ty);
const casted_ptr = self.builder.buildBitCast(int_ptr, param_llvm_ty.pointerType(0), "");
const store_inst = self.builder.buildStore(llvm_arg, casted_ptr);
store_inst.setAlignment(alignment);
@@ -4284,7 +4284,7 @@ pub const FuncGen = struct {
return null;
}
- const llvm_ret_ty = try self.dg.llvmType(return_type);
+ const llvm_ret_ty = try self.dg.lowerType(return_type);
if (ret_ptr) |rp| {
call.setCallSret(llvm_ret_ty);
@@ -4354,7 +4354,7 @@ pub const FuncGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_int = try self.dg.llvmType(Type.anyerror);
+ const err_int = try self.dg.lowerType(Type.anyerror);
_ = self.builder.buildRet(err_int.constInt(0, .False));
} else {
_ = self.builder.buildRetVoid();
@@ -4393,7 +4393,7 @@ pub const FuncGen = struct {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
- const err_int = try self.dg.llvmType(Type.anyerror);
+ const err_int = try self.dg.lowerType(Type.anyerror);
_ = self.builder.buildRet(err_int.constInt(0, .False));
} else {
_ = self.builder.buildRetVoid();
@@ -4407,7 +4407,7 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(un_op);
const target = self.dg.module.getTarget();
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
- const llvm_ret_ty = try self.dg.llvmType(ret_ty);
+ const llvm_ret_ty = try self.dg.lowerType(ret_ty);
const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: {
const ptr_abi_ty = abi_ret_ty.pointerType(0);
break :p self.builder.buildBitCast(ptr, ptr_abi_ty, "");
@@ -4588,7 +4588,7 @@ pub const FuncGen = struct {
const is_body = inst_ty.zigTypeTag() == .Fn;
if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null;
- const raw_llvm_ty = try self.dg.llvmType(inst_ty);
+ const raw_llvm_ty = try self.dg.lowerType(inst_ty);
const llvm_ty = ty: {
// If the zig tag type is a function, this represents an actual function body; not
@@ -4728,9 +4728,9 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const array_ty = operand_ty.childType();
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
- const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
if (!array_ty.hasRuntimeBitsIgnoreComptime()) {
return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
}
@@ -4755,7 +4755,7 @@ pub const FuncGen = struct {
const dest_ty = self.air.typeOfIndex(inst);
const dest_scalar_ty = dest_ty.scalarType();
- const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
const target = self.dg.module.getTarget();
if (intrinsicsAllowed(dest_scalar_ty, target)) {
@@ -4806,7 +4806,7 @@ pub const FuncGen = struct {
const dest_ty = self.air.typeOfIndex(inst);
const dest_scalar_ty = dest_ty.scalarType();
- const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
if (intrinsicsAllowed(operand_scalar_ty, target)) {
// TODO set fast math flag
@@ -4833,7 +4833,7 @@ pub const FuncGen = struct {
compiler_rt_dest_abbrev,
}) catch unreachable;
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const param_types = [1]*const llvm.Type{operand_llvm_ty};
const libc_fn = self.getLibcFunction(fn_name, &param_types, libc_ret_ty);
const params = [1]*const llvm.Value{operand};
@@ -4994,7 +4994,7 @@ pub const FuncGen = struct {
const containing_int = struct_llvm_val;
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
- const elem_llvm_ty = try self.dg.llvmType(field_ty);
+ const elem_llvm_ty = try self.dg.lowerType(field_ty);
if (field_ty.zigTypeTag() == .Float) {
const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
const same_size_int = self.context.intType(elem_bits);
@@ -5026,7 +5026,7 @@ pub const FuncGen = struct {
return self.load(field_ptr, field_ptr_ty);
},
.Union => {
- const llvm_field_ty = try self.dg.llvmType(field_ty);
+ const llvm_field_ty = try self.dg.lowerType(field_ty);
const layout = struct_ty.unionGetLayout(target);
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, "");
@@ -5053,7 +5053,7 @@ pub const FuncGen = struct {
const struct_ty = self.air.getRefType(ty_pl.ty).childType();
const field_offset = struct_ty.structFieldOffset(extra.field_index, target);
- const res_ty = try self.dg.llvmType(self.air.getRefType(ty_pl.ty));
+ const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
if (field_offset == 0) {
return self.builder.buildBitCast(field_ptr, res_ty, "");
}
@@ -5383,7 +5383,7 @@ pub const FuncGen = struct {
}
const ret_ty = self.air.typeOfIndex(inst);
- const ret_llvm_ty = try self.dg.llvmType(ret_ty);
+ const ret_llvm_ty = try self.dg.lowerType(ret_ty);
const llvm_fn_ty = llvm.functionType(
ret_llvm_ty,
llvm_param_types.ptr,
@@ -5425,7 +5425,7 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(un_op);
const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
if (optional_ty.optionalReprIsPayload()) {
- const optional_llvm_ty = try self.dg.llvmType(optional_ty);
+ const optional_llvm_ty = try self.dg.lowerType(optional_ty);
const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand;
return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), "");
}
@@ -5462,7 +5462,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(un_op);
const err_union_ty = self.air.typeOf(un_op);
const payload_ty = err_union_ty.errorUnionPayload();
- const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror));
+ const err_set_ty = try self.dg.lowerType(Type.initTag(.anyerror));
const zero = err_set_ty.constNull();
if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) {
@@ -5506,7 +5506,7 @@ pub const FuncGen = struct {
// a pointer to a zero-bit value.
// TODO once we update to LLVM 14 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.llvmType(result_ty);
+ const res_ptr_ty = try self.dg.lowerType(result_ty);
return self.builder.buildBitCast(operand, res_ptr_ty, "");
}
if (optional_ty.optionalReprIsPayload()) {
@@ -5534,7 +5534,7 @@ pub const FuncGen = struct {
_ = self.builder.buildStore(non_null_bit, operand);
// TODO once we update to LLVM 14 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.llvmType(result_ty);
+ const res_ptr_ty = try self.dg.lowerType(result_ty);
return self.builder.buildBitCast(operand, res_ptr_ty, "");
}
if (optional_ty.optionalReprIsPayload()) {
@@ -5604,7 +5604,7 @@ pub const FuncGen = struct {
if (!operand_is_ptr) return null;
// TODO once we update to LLVM 14 this bitcast won't be necessary.
- const res_ptr_ty = try self.dg.llvmType(result_ty);
+ const res_ptr_ty = try self.dg.lowerType(result_ty);
return self.builder.buildBitCast(operand, res_ptr_ty, "");
}
if (operand_is_ptr or isByRef(payload_ty)) {
@@ -5626,7 +5626,7 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(ty_op.operand);
const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) {
- const err_llvm_ty = try self.dg.llvmType(Type.anyerror);
+ const err_llvm_ty = try self.dg.lowerType(Type.anyerror);
if (operand_is_ptr) {
return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), "");
} else {
@@ -5662,7 +5662,7 @@ pub const FuncGen = struct {
return operand;
}
const payload_ty = error_union_ty.errorUnionPayload();
- const non_error_val = try self.dg.genTypedValue(.{ .ty = error_ty, .val = Value.zero });
+ const non_error_val = try self.dg.lowerValue(.{ .ty = error_ty, .val = Value.zero });
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
_ = self.builder.buildStore(non_error_val, operand);
return operand;
@@ -5715,7 +5715,7 @@ pub const FuncGen = struct {
if (optional_ty.optionalReprIsPayload()) {
return operand;
}
- const llvm_optional_ty = try self.dg.llvmType(optional_ty);
+ const llvm_optional_ty = try self.dg.lowerType(optional_ty);
if (isByRef(optional_ty)) {
const optional_ptr = self.buildAlloca(llvm_optional_ty);
const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, "");
@@ -5746,8 +5746,8 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return operand;
}
- const ok_err_code = (try self.dg.llvmType(Type.anyerror)).constNull();
- const err_un_llvm_ty = try self.dg.llvmType(inst_ty);
+ const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull();
+ const err_un_llvm_ty = try self.dg.lowerType(inst_ty);
const target = self.dg.module.getTarget();
const payload_offset = errUnionPayloadOffset(payload_ty, target);
@@ -5781,7 +5781,7 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
return operand;
}
- const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
+ const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
const target = self.dg.module.getTarget();
const payload_offset = errUnionPayloadOffset(payload_ty, target);
@@ -5866,7 +5866,7 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const llvm_slice_ty = try self.dg.llvmType(inst_ty);
+ const llvm_slice_ty = try self.dg.lowerType(inst_ty);
// In case of slicing a global, the result type looks something like `{ i8*, i64 }`
// but `ptr` is pointing to the global directly. If it's an array, we would want to
@@ -5874,7 +5874,7 @@ pub const FuncGen = struct {
// This prevents an assertion failure.
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = inst_ty.slicePtrFieldType(&buf);
- const ptr_llvm_ty = try self.dg.llvmType(ptr_ty);
+ const ptr_llvm_ty = try self.dg.lowerType(ptr_ty);
const casted_ptr = self.builder.buildBitCast(ptr, ptr_llvm_ty, "");
const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), casted_ptr, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
@@ -6040,7 +6040,7 @@ pub const FuncGen = struct {
// const d = @divTrunc(a, b);
// const r = @rem(a, b);
// return if (r == 0) d else d - ((a < 0) ^ (b < 0));
- const result_llvm_ty = try self.dg.llvmType(inst_ty);
+ const result_llvm_ty = try self.dg.lowerType(inst_ty);
const zero = result_llvm_ty.constNull();
const div_trunc = self.builder.buildSDiv(lhs, rhs, "");
const rem = self.builder.buildSRem(lhs, rhs, "");
@@ -6090,7 +6090,7 @@ pub const FuncGen = struct {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const inst_llvm_ty = try self.dg.llvmType(inst_ty);
+ const inst_llvm_ty = try self.dg.lowerType(inst_ty);
const scalar_ty = inst_ty.scalarType();
if (scalar_ty.isRuntimeFloat()) {
@@ -6174,8 +6174,8 @@ pub const FuncGen = struct {
const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic;
- const llvm_lhs_ty = try self.dg.llvmType(lhs_ty);
- const llvm_dest_ty = try self.dg.llvmType(dest_ty);
+ const llvm_lhs_ty = try self.dg.lowerType(lhs_ty);
+ const llvm_dest_ty = try self.dg.lowerType(dest_ty);
const tg = self.dg.module.getTarget();
@@ -6283,7 +6283,7 @@ pub const FuncGen = struct {
) !*const llvm.Value {
const target = self.dg.module.getTarget();
const scalar_ty = ty.scalarType();
- const scalar_llvm_ty = try self.dg.llvmType(scalar_ty);
+ const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
if (intrinsicsAllowed(scalar_ty, target)) {
const llvm_predicate: llvm.RealPredicate = switch (pred) {
@@ -6383,8 +6383,8 @@ pub const FuncGen = struct {
) !*const llvm.Value {
const target = self.dg.module.getTarget();
const scalar_ty = ty.scalarType();
- const llvm_ty = try self.dg.llvmType(ty);
- const scalar_llvm_ty = try self.dg.llvmType(scalar_ty);
+ const llvm_ty = try self.dg.lowerType(ty);
+ const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target);
var fn_name_buf: [64]u8 = undefined;
@@ -6478,12 +6478,12 @@ pub const FuncGen = struct {
const rhs_scalar_ty = rhs_ty.scalarType();
const dest_ty = self.air.typeOfIndex(inst);
- const llvm_dest_ty = try self.dg.llvmType(dest_ty);
+ const llvm_dest_ty = try self.dg.lowerType(dest_ty);
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
- self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "")
+ self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
@@ -6543,7 +6543,7 @@ pub const FuncGen = struct {
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
- self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "")
+ self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, "");
@@ -6566,7 +6566,7 @@ pub const FuncGen = struct {
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
- self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "")
+ self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "")
else
rhs;
return self.builder.buildShl(lhs, casted_rhs, "");
@@ -6588,7 +6588,7 @@ pub const FuncGen = struct {
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
- self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "")
+ self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
if (lhs_scalar_ty.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, "");
@@ -6611,7 +6611,7 @@ pub const FuncGen = struct {
const tg = self.dg.module.getTarget();
const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
- self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "")
+ self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
const is_signed_int = lhs_scalar_ty.isSignedInt();
@@ -6639,7 +6639,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
const dest_info = dest_ty.intInfo(target);
- const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
const operand_info = operand_ty.intInfo(target);
@@ -6661,7 +6661,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
return self.builder.buildTrunc(operand, dest_llvm_ty, "");
}
@@ -6679,7 +6679,7 @@ pub const FuncGen = struct {
if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) {
return softF80TruncOrExt(self, operand, src_bits, dest_bits);
}
- const dest_llvm_ty = try self.dg.llvmType(dest_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
}
@@ -6697,7 +6697,7 @@ pub const FuncGen = struct {
if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) {
return softF80TruncOrExt(self, operand, src_bits, dest_bits);
}
- const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
return self.builder.buildFPExt(operand, dest_llvm_ty, "");
}
@@ -6707,7 +6707,7 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
- const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
return self.builder.buildPtrToInt(operand, dest_llvm_ty, "");
}
@@ -6720,7 +6720,7 @@ pub const FuncGen = struct {
const inst_ty = self.air.typeOfIndex(inst);
const operand_is_ref = isByRef(operand_ty);
const result_is_ref = isByRef(inst_ty);
- const llvm_dest_ty = try self.dg.llvmType(inst_ty);
+ const llvm_dest_ty = try self.dg.lowerType(inst_ty);
const target = self.dg.module.getTarget();
if (operand_is_ref and result_is_ref) {
@@ -6740,14 +6740,14 @@ pub const FuncGen = struct {
const array_ptr = self.buildAlloca(llvm_dest_ty);
const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
if (bitcast_ok) {
- const llvm_vector_ty = try self.dg.llvmType(operand_ty);
+ const llvm_vector_ty = try self.dg.lowerType(operand_ty);
const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), "");
const llvm_store = self.builder.buildStore(operand, casted_ptr);
llvm_store.setAlignment(inst_ty.abiAlignment(target));
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
const vector_len = operand_ty.arrayLen();
@@ -6764,7 +6764,7 @@ pub const FuncGen = struct {
return array_ptr;
} else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
const elem_ty = operand_ty.childType();
- const llvm_vector_ty = try self.dg.llvmType(inst_ty);
+ const llvm_vector_ty = try self.dg.lowerType(inst_ty);
if (!operand_is_ref) {
return self.dg.todo("implement bitcast non-ref array to vector", .{});
}
@@ -6781,7 +6781,7 @@ pub const FuncGen = struct {
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
const vector_len = operand_ty.arrayLen();
@@ -6813,7 +6813,7 @@ pub const FuncGen = struct {
const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty);
result_ptr.setAlignment(alignment);
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
const store_inst = self.builder.buildStore(operand, casted_ptr);
store_inst.setAlignment(alignment);
@@ -6827,7 +6827,7 @@ pub const FuncGen = struct {
const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
const result_ptr = self.buildAlloca(llvm_dest_ty);
result_ptr.setAlignment(alignment);
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), "");
const store_inst = self.builder.buildStore(operand, casted_ptr);
store_inst.setAlignment(alignment);
@@ -6901,7 +6901,7 @@ pub const FuncGen = struct {
const pointee_type = ptr_ty.childType();
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
- const pointee_llvm_ty = try self.dg.llvmType(pointee_type);
+ const pointee_llvm_ty = try self.dg.lowerType(pointee_type);
const alloca_inst = self.buildAlloca(pointee_llvm_ty);
const target = self.dg.module.getTarget();
const alignment = ptr_ty.ptrAlignment(target);
@@ -6915,7 +6915,7 @@ pub const FuncGen = struct {
const ret_ty = ptr_ty.childType();
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
if (self.ret_ptr) |ret_ptr| return ret_ptr;
- const ret_llvm_ty = try self.dg.llvmType(ret_ty);
+ const ret_llvm_ty = try self.dg.lowerType(ret_ty);
const target = self.dg.module.getTarget();
const alloca_inst = self.buildAlloca(ret_llvm_ty);
alloca_inst.setAlignment(ptr_ty.ptrAlignment(target));
@@ -6946,7 +6946,7 @@ pub const FuncGen = struct {
const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
const fill_char = u8_llvm_ty.constInt(0xaa, .False);
const dest_ptr_align = ptr_ty.ptrAlignment(target);
- const usize_llvm_ty = try self.dg.llvmType(Type.usize);
+ const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const len = usize_llvm_ty.constInt(operand_size, .False);
_ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
if (self.dg.module.comp.bin_file.options.valgrind) {
@@ -6983,7 +6983,7 @@ pub const FuncGen = struct {
const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{});
const params = [_]*const llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn, &params, params.len, .Fast, .Auto, "");
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
@@ -7001,7 +7001,7 @@ pub const FuncGen = struct {
const params = [_]*const llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn, &params, params.len, .Fast, .Auto, "");
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
@@ -7046,7 +7046,7 @@ pub const FuncGen = struct {
var payload = self.builder.buildExtractValue(result, 0, "");
if (opt_abi_ty != null) {
- payload = self.builder.buildTrunc(payload, try self.dg.llvmType(operand_ty), "");
+ payload = self.builder.buildTrunc(payload, try self.dg.lowerType(operand_ty), "");
}
const success_bit = self.builder.buildExtractValue(result, 1, "");
@@ -7054,7 +7054,7 @@ pub const FuncGen = struct {
return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, "");
}
- const optional_llvm_ty = try self.dg.llvmType(optional_ty);
+ const optional_llvm_ty = try self.dg.lowerType(optional_ty);
const non_null_bit = self.builder.buildNot(success_bit, "");
const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, "");
return self.builder.buildInsertValue(partial, non_null_bit, 1, "");
@@ -7090,7 +7090,7 @@ pub const FuncGen = struct {
ordering,
single_threaded,
);
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
if (is_float) {
return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, "");
} else {
@@ -7103,7 +7103,7 @@ pub const FuncGen = struct {
}
// It's a pointer but we need to treat it as an int.
- const usize_llvm_ty = try self.dg.llvmType(Type.usize);
+ const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), "");
const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, "");
const uncasted_result = self.builder.buildAtomicRmw(
@@ -7113,7 +7113,7 @@ pub const FuncGen = struct {
ordering,
single_threaded,
);
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, "");
}
@@ -7132,7 +7132,7 @@ pub const FuncGen = struct {
const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
const load_inst = (try self.load(casted_ptr, ptr_ty)).?;
load_inst.setOrdering(ordering);
- return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), "");
+ return self.builder.buildTrunc(load_inst, try self.dg.lowerType(operand_ty), "");
}
const load_inst = (try self.load(ptr, ptr_ty)).?;
load_inst.setOrdering(ordering);
@@ -7273,13 +7273,13 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const llvm_i1 = self.context.intType(1);
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() };
const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
- const result_llvm_ty = try self.dg.llvmType(result_ty);
+ const result_llvm_ty = try self.dg.lowerType(result_ty);
const target = self.dg.module.getTarget();
const bits = operand_ty.intInfo(target).bits;
@@ -7301,12 +7301,12 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const params = [_]*const llvm.Value{operand};
- const operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
- const result_llvm_ty = try self.dg.llvmType(result_ty);
+ const result_llvm_ty = try self.dg.lowerType(result_ty);
const target = self.dg.module.getTarget();
const bits = operand_ty.intInfo(target).bits;
@@ -7330,7 +7330,7 @@ pub const FuncGen = struct {
assert(bits % 8 == 0);
var operand = try self.resolveInst(ty_op.operand);
- var operand_llvm_ty = try self.dg.llvmType(operand_ty);
+ var operand_llvm_ty = try self.dg.lowerType(operand_ty);
if (bits % 16 == 8) {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
@@ -7364,7 +7364,7 @@ pub const FuncGen = struct {
const wrong_size_result = self.builder.buildCall(fn_val, &params, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
- const result_llvm_ty = try self.dg.llvmType(result_ty);
+ const result_llvm_ty = try self.dg.lowerType(result_ty);
const result_bits = result_ty.intInfo(target).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
@@ -7407,14 +7407,14 @@ pub const FuncGen = struct {
}
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const llvm_ret_ty = try self.dg.llvmType(slice_ty);
- const usize_llvm_ty = try self.dg.llvmType(Type.usize);
+ const llvm_ret_ty = try self.dg.lowerType(slice_ty);
+ const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const target = self.dg.module.getTarget();
const slice_alignment = slice_ty.abiAlignment(target);
var int_tag_type_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
- const param_types = [_]*const llvm.Type{try self.dg.llvmType(int_tag_ty)};
+ const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)};
const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
@@ -7471,7 +7471,7 @@ pub const FuncGen = struct {
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, field_index),
};
- break :int try self.dg.genTypedValue(.{
+ break :int try self.dg.lowerValue(.{
.ty = enum_ty,
.val = Value.initPayload(&tag_val_payload.base),
});
@@ -7496,8 +7496,8 @@ pub const FuncGen = struct {
// Function signature: fn (anyerror) bool
- const ret_llvm_ty = try self.dg.llvmType(Type.bool);
- const anyerror_llvm_ty = try self.dg.llvmType(Type.anyerror);
+ const ret_llvm_ty = try self.dg.lowerType(Type.bool);
+ const anyerror_llvm_ty = try self.dg.lowerType(Type.anyerror);
const param_types = [_]*const llvm.Type{anyerror_llvm_ty};
const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
@@ -7606,7 +7606,7 @@ pub const FuncGen = struct {
.Add => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildAddReduce(operand),
.Float => {
- const scalar_llvm_ty = try self.dg.llvmType(scalar_ty);
+ const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const neutral_value = scalar_llvm_ty.constReal(-0.0);
return self.builder.buildFPAddReduce(neutral_value, operand);
},
@@ -7615,7 +7615,7 @@ pub const FuncGen = struct {
.Mul => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildMulReduce(operand),
.Float => {
- const scalar_llvm_ty = try self.dg.llvmType(scalar_ty);
+ const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const neutral_value = scalar_llvm_ty.constReal(1.0);
return self.builder.buildFPMulReduce(neutral_value, operand);
},
@@ -7631,7 +7631,7 @@ pub const FuncGen = struct {
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
- const llvm_result_ty = try self.dg.llvmType(result_ty);
+ const llvm_result_ty = try self.dg.lowerType(result_ty);
const target = self.dg.module.getTarget();
switch (result_ty.zigTypeTag()) {
@@ -7719,7 +7719,7 @@ pub const FuncGen = struct {
.Array => {
assert(isByRef(result_ty));
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
const alloca_inst = self.buildAlloca(llvm_result_ty);
alloca_inst.setAlignment(result_ty.abiAlignment(target));
@@ -7754,7 +7754,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.air.typeOfIndex(inst);
- const union_llvm_ty = try self.dg.llvmType(union_ty);
+ const union_llvm_ty = try self.dg.lowerType(union_ty);
const target = self.dg.module.getTarget();
const layout = union_ty.unionGetLayout(target);
if (layout.payload_size == 0) {
@@ -7774,8 +7774,8 @@ pub const FuncGen = struct {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
assert(union_obj.haveFieldTypes());
const field = union_obj.fields.values()[extra.field_index];
- const field_llvm_ty = try self.dg.llvmType(field.ty);
- const tag_llvm_ty = try self.dg.llvmType(union_obj.tag_ty);
+ const field_llvm_ty = try self.dg.lowerType(field.ty);
+ const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
const field_size = field.ty.abiSize(target);
const field_align = field.normalAlignment(target);
@@ -8011,7 +8011,7 @@ pub const FuncGen = struct {
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget());
- const llvm_slice_ty = try self.dg.llvmType(slice_ty);
+ const llvm_slice_ty = try self.dg.lowerType(slice_ty);
const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space
const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
@@ -8075,7 +8075,7 @@ pub const FuncGen = struct {
// out the relevant bits when accessing the pointee.
// Here we perform a bitcast because we want to use the host_size
// as the llvm pointer element type.
- const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
// TODO this can be removed if we change host_size to be bits instead
// of bytes.
return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
@@ -8090,7 +8090,7 @@ pub const FuncGen = struct {
// end of the struct. Treat our struct pointer as an array of two and get
// the index to the element at index `1` to get a pointer to the end of
// the struct.
- const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_index = llvm_usize.constInt(1, .False);
const indices: [1]*const llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, "");
@@ -8111,7 +8111,7 @@ pub const FuncGen = struct {
) !?*const llvm.Value {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = &union_obj.fields.values()[field_index];
- const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
+ const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
if (!field.ty.hasRuntimeBitsIgnoreComptime()) {
return null;
}
@@ -8150,7 +8150,7 @@ pub const FuncGen = struct {
const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr());
if (info.host_size == 0) {
if (isByRef(info.pointee_type)) {
- const elem_llvm_ty = try self.dg.llvmType(info.pointee_type);
+ const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
const result_align = info.pointee_type.abiAlignment(target);
const max_align = @maximum(result_align, ptr_alignment);
const result_ptr = self.buildAlloca(elem_llvm_ty);
@@ -8183,7 +8183,7 @@ pub const FuncGen = struct {
const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
- const elem_llvm_ty = try self.dg.llvmType(info.pointee_type);
+ const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
if (isByRef(info.pointee_type)) {
const result_align = info.pointee_type.abiAlignment(target);
@@ -8625,7 +8625,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
if (fn_info.return_type.isError()) {
- return dg.llvmType(Type.anyerror);
+ return dg.lowerType(Type.anyerror);
} else {
return dg.context.voidType();
}
@@ -8636,7 +8636,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
if (isByRef(fn_info.return_type)) {
return dg.context.voidType();
} else {
- return dg.llvmType(fn_info.return_type);
+ return dg.lowerType(fn_info.return_type);
}
},
.C => {
@@ -8657,24 +8657,24 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
else => false,
};
switch (target.cpu.arch) {
- .mips, .mipsel => return dg.llvmType(fn_info.return_type),
+ .mips, .mipsel => return dg.lowerType(fn_info.return_type),
.x86_64 => switch (target.os.tag) {
.windows => switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
.integer => {
if (is_scalar) {
- return dg.llvmType(fn_info.return_type);
+ return dg.lowerType(fn_info.return_type);
} else {
const abi_size = fn_info.return_type.abiSize(target);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
.memory => return dg.context.voidType(),
- .sse => return dg.llvmType(fn_info.return_type),
+ .sse => return dg.lowerType(fn_info.return_type),
else => unreachable,
},
else => {
if (is_scalar) {
- return dg.llvmType(fn_info.return_type);
+ return dg.lowerType(fn_info.return_type);
}
const classes = x86_64_abi.classifySystemV(fn_info.return_type, target);
if (classes[0] == .memory) {
@@ -8715,10 +8715,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
},
},
// TODO investigate C ABI for other architectures
- else => return dg.llvmType(fn_info.return_type),
+ else => return dg.lowerType(fn_info.return_type),
}
},
- else => return dg.llvmType(fn_info.return_type),
+ else => return dg.lowerType(fn_info.return_type),
}
}