aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/aarch64/abi.zig62
-rw-r--r--src/arch/arm/abi.zig154
-rw-r--r--src/arch/riscv64/abi.zig69
-rw-r--r--src/arch/x86_64/CodeGen.zig2
-rw-r--r--src/arch/x86_64/abi.zig24
-rw-r--r--src/codegen/llvm.zig194
6 files changed, 426 insertions, 79 deletions
diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig
index 7c92d4e91c..e83cc0444a 100644
--- a/src/arch/aarch64/abi.zig
+++ b/src/arch/aarch64/abi.zig
@@ -5,41 +5,54 @@ const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
-pub const Class = enum(u8) { memory, integer, none, float_array, _ };
+pub const Class = union(enum) {
+ memory,
+ byval,
+ integer,
+ double_integer,
+ float_array: u8,
+};
/// For `float_array` the second element will be the amount of floats.
-pub fn classifyType(ty: Type, target: std.Target) [2]Class {
- var maybe_float_bits: ?u16 = null;
- const float_count = countFloats(ty, target, &maybe_float_bits);
- if (float_count <= sret_float_count) return .{ .float_array, @intToEnum(Class, float_count) };
- return classifyTypeInner(ty, target);
-}
+pub fn classifyType(ty: Type, target: std.Target) Class {
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
-fn classifyTypeInner(ty: Type, target: std.Target) [2]Class {
- if (!ty.hasRuntimeBitsIgnoreComptime()) return .{ .none, .none };
+ var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag()) {
.Struct => {
- if (ty.containerLayout() == .Packed) return .{ .integer, .none };
+ if (ty.containerLayout() == .Packed) return .byval;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= sret_float_count) return .{ .float_array = float_count };
+
const bit_size = ty.bitSize(target);
- if (bit_size > 128) return .{ .memory, .none };
- if (bit_size > 64) return .{ .integer, .integer };
- return .{ .integer, .none };
+ if (bit_size > 128) return .memory;
+ if (bit_size > 64) return .double_integer;
+ return .integer;
},
.Union => {
+ if (ty.containerLayout() == .Packed) return .byval;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= sret_float_count) return .{ .float_array = float_count };
+
+ const bit_size = ty.bitSize(target);
+ if (bit_size > 128) return .memory;
+ if (bit_size > 64) return .double_integer;
+ return .integer;
+ },
+ .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
+ .Vector => {
const bit_size = ty.bitSize(target);
- if (bit_size > 128) return .{ .memory, .none };
- if (bit_size > 64) return .{ .integer, .integer };
- return .{ .integer, .none };
+ // TODO is this controlled by a cpu feature?
+ if (bit_size > 128) return .memory;
+ return .byval;
},
- .Int, .Enum, .ErrorSet, .Vector, .Float, .Bool => return .{ .integer, .none },
- .Array => return .{ .memory, .none },
.Optional => {
std.debug.assert(ty.isPtrLikeOptional());
- return .{ .integer, .none };
+ return .byval;
},
.Pointer => {
std.debug.assert(!ty.isSlice());
- return .{ .integer, .none };
+ return .byval;
},
.ErrorUnion,
.Frame,
@@ -55,17 +68,18 @@ fn classifyTypeInner(ty: Type, target: std.Target) [2]Class {
.Fn,
.Opaque,
.EnumLiteral,
+ .Array,
=> unreachable,
}
}
const sret_float_count = 4;
-fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
- const invalid = std.math.maxInt(u32);
+fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
+ const invalid = std.math.maxInt(u8);
switch (ty.zigTypeTag()) {
.Union => {
const fields = ty.unionFields();
- var max_count: u32 = 0;
+ var max_count: u8 = 0;
for (fields.values()) |field| {
const field_count = countFloats(field.ty, target, maybe_float_bits);
if (field_count == invalid) return invalid;
@@ -76,7 +90,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
},
.Struct => {
const fields_len = ty.structFieldCount();
- var count: u32 = 0;
+ var count: u8 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig
index 3fcfb0e561..9659ca13d7 100644
--- a/src/arch/arm/abi.zig
+++ b/src/arch/arm/abi.zig
@@ -2,6 +2,160 @@ const std = @import("std");
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
+const Type = @import("../../type.zig").Type;
+
+pub const Class = union(enum) {
+ memory,
+ byval,
+ i32_array: u8,
+ i64_array: u8,
+
+ fn arrSize(total_size: u64, arr_size: u64) Class {
+ const count = @intCast(u8, std.mem.alignForward(total_size, arr_size) / arr_size);
+ if (arr_size == 32) {
+ return .{ .i32_array = count };
+ } else {
+ return .{ .i64_array = count };
+ }
+ }
+};
+
+pub const Context = enum { ret, arg };
+
+pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
+
+ var maybe_float_bits: ?u16 = null;
+ const max_byval_size = 512;
+ switch (ty.zigTypeTag()) {
+ .Struct => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > 64) return .memory;
+ return .byval;
+ }
+ if (bit_size > max_byval_size) return .memory;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= byval_float_count) return .byval;
+
+ const fields = ty.structFieldCount();
+ var i: u32 = 0;
+ while (i < fields) : (i += 1) {
+ const field_ty = ty.structFieldType(i);
+ const field_alignment = ty.structFieldAlign(i, target);
+ const field_size = field_ty.bitSize(target);
+ if (field_size > 32 or field_alignment > 32) {
+ return Class.arrSize(bit_size, 64);
+ }
+ }
+ return Class.arrSize(bit_size, 32);
+ },
+ .Union => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > 64) return .memory;
+ return .byval;
+ }
+ if (bit_size > max_byval_size) return .memory;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= byval_float_count) return .byval;
+
+ for (ty.unionFields().values()) |field| {
+ if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) {
+ return Class.arrSize(bit_size, 64);
+ }
+ }
+ return Class.arrSize(bit_size, 32);
+ },
+ .Bool, .Float => return .byval,
+ .Int => {
+ // TODO this is incorrect for _BitInt(128) but implementing
+ // this correctly makes implementing compiler-rt impossible.
+ // const bit_size = ty.bitSize(target);
+ // if (bit_size > 64) return .memory;
+ return .byval;
+ },
+ .Enum, .ErrorSet => {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > 64) return .memory;
+ return .byval;
+ },
+ .Vector => {
+ const bit_size = ty.bitSize(target);
+ // TODO is this controlled by a cpu feature?
+ if (ctx == .ret and bit_size > 128) return .memory;
+ if (bit_size > 512) return .memory;
+ return .byval;
+ },
+ .Optional => {
+ std.debug.assert(ty.isPtrLikeOptional());
+ return .byval;
+ },
+ .Pointer => {
+ std.debug.assert(!ty.isSlice());
+ return .byval;
+ },
+ .ErrorUnion,
+ .Frame,
+ .AnyFrame,
+ .NoReturn,
+ .Void,
+ .Type,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .BoundFn,
+ .Fn,
+ .Opaque,
+ .EnumLiteral,
+ .Array,
+ => unreachable,
+ }
+}
+
+const byval_float_count = 4;
+fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
+ const invalid = std.math.maxInt(u32);
+ switch (ty.zigTypeTag()) {
+ .Union => {
+ const fields = ty.unionFields();
+ var max_count: u32 = 0;
+ for (fields.values()) |field| {
+ const field_count = countFloats(field.ty, target, maybe_float_bits);
+ if (field_count == invalid) return invalid;
+ if (field_count > max_count) max_count = field_count;
+ if (max_count > byval_float_count) return invalid;
+ }
+ return max_count;
+ },
+ .Struct => {
+ const fields_len = ty.structFieldCount();
+ var count: u32 = 0;
+ var i: u32 = 0;
+ while (i < fields_len) : (i += 1) {
+ const field_ty = ty.structFieldType(i);
+ const field_count = countFloats(field_ty, target, maybe_float_bits);
+ if (field_count == invalid) return invalid;
+ count += field_count;
+ if (count > byval_float_count) return invalid;
+ }
+ return count;
+ },
+ .Float => {
+ const float_bits = maybe_float_bits.* orelse {
+ const float_bits = ty.floatBits(target);
+ if (float_bits != 32 and float_bits != 64) return invalid;
+ maybe_float_bits.* = float_bits;
+ return 1;
+ };
+ if (ty.floatBits(target) == float_bits) return 1;
+ return invalid;
+ },
+ .Void => return 0,
+ else => return invalid,
+ }
+}
pub const callee_preserved_regs = [_]Register{ .r4, .r5, .r6, .r7, .r8, .r10 };
pub const caller_preserved_regs = [_]Register{ .r0, .r1, .r2, .r3 };
diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig
index 3792c4ab18..8a560f4596 100644
--- a/src/arch/riscv64/abi.zig
+++ b/src/arch/riscv64/abi.zig
@@ -2,6 +2,75 @@ const std = @import("std");
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
+const Type = @import("../../type.zig").Type;
+
+pub const Class = enum { memory, byval, integer, double_integer };
+
+pub fn classifyType(ty: Type, target: std.Target) Class {
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
+
+ const max_byval_size = target.cpu.arch.ptrBitWidth() * 2;
+ switch (ty.zigTypeTag()) {
+ .Struct => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > max_byval_size) return .memory;
+ return .byval;
+ }
+ // TODO this doesn't exactly match what clang produces but its better than nothing
+ if (bit_size > max_byval_size) return .memory;
+ if (bit_size > max_byval_size / 2) return .double_integer;
+ return .integer;
+ },
+ .Union => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > max_byval_size) return .memory;
+ return .byval;
+ }
+ // TODO this doesn't exactly match what clang produces but its better than nothing
+ if (bit_size > max_byval_size) return .memory;
+ if (bit_size > max_byval_size / 2) return .double_integer;
+ return .integer;
+ },
+ .Bool => return .integer,
+ .Float => return .byval,
+ .Int, .Enum, .ErrorSet => {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > max_byval_size) return .memory;
+ return .byval;
+ },
+ .Vector => {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > max_byval_size) return .memory;
+ return .integer;
+ },
+ .Optional => {
+ std.debug.assert(ty.isPtrLikeOptional());
+ return .byval;
+ },
+ .Pointer => {
+ std.debug.assert(!ty.isSlice());
+ return .byval;
+ },
+ .ErrorUnion,
+ .Frame,
+ .AnyFrame,
+ .NoReturn,
+ .Void,
+ .Type,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .BoundFn,
+ .Fn,
+ .Opaque,
+ .EnumLiteral,
+ .Array,
+ => unreachable,
+ }
+}
pub const callee_preserved_regs = [_]Register{
.s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index a1b354482b..5f793aaeb9 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -7149,7 +7149,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const classes: []const abi.Class = switch (self.target.os.tag) {
.windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)},
- else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*), .none),
+ else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none),
};
if (classes.len > 1) {
return self.fail("TODO handle multiple classes per type", .{});
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index 45c5760540..a428bcacdd 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -60,9 +60,11 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
}
}
+pub const Context = enum { ret, arg };
+
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystemV(ty: Type, target: Target) [8]Class {
+pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
@@ -134,6 +136,22 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
},
.Vector => {
const elem_ty = ty.childType();
+ if (ctx == .arg) {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > 128) return memory_class;
+ if (bit_size > 80) return .{
+ .integer, .integer, .none, .none,
+ .none, .none, .none, .none,
+ };
+ if (bit_size > 64) return .{
+ .x87, .none, .none, .none,
+ .none, .none, .none, .none,
+ };
+ return .{
+ .integer, .none, .none, .none,
+ .none, .none, .none, .none,
+ };
+ }
const bits = elem_ty.bitSize(target) * ty.arrayLen();
if (bits <= 64) return .{
.sse, .none, .none, .none,
@@ -201,7 +219,7 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
}
}
const field_size = field.ty.abiSize(target);
- const field_class_array = classifySystemV(field.ty, target);
+ const field_class_array = classifySystemV(field.ty, target, .arg);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
@@ -315,7 +333,7 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
}
}
// Combine this field with the previous one.
- const field_class = classifySystemV(field.ty, target);
+ const field_class = classifySystemV(field.ty, target, .arg);
for (result) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 3ebca13c20..ffc19cb6f6 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -24,6 +24,8 @@ const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
+const arm_c_abi = @import("../arch/arm/abi.zig");
+const riscv_c_abi = @import("../arch/riscv64/abi.zig");
const Error = error{ OutOfMemory, CodegenFail };
@@ -1138,6 +1140,25 @@ pub const Object = struct {
try args.append(load_inst);
}
},
+ .i32_array, .i64_array => {
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_llvm_ty = try dg.lowerType(param_ty);
+ const param = llvm_func.getParam(llvm_arg_i);
+ llvm_arg_i += 1;
+
+ const alignment = param_ty.abiAlignment(target);
+ const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
+ const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), "");
+ _ = builder.buildStore(param, casted_ptr);
+
+ if (isByRef(param_ty)) {
+ try args.append(arg_ptr);
+ } else {
+ const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
+ load_inst.setAlignment(alignment);
+ try args.append(load_inst);
+ }
+ },
};
}
@@ -2578,6 +2599,8 @@ pub const DeclGen = struct {
.multiple_llvm_float,
.as_u16,
.float_array,
+ .i32_array,
+ .i64_array,
=> continue,
.slice => unreachable, // extern functions do not support slice types.
@@ -3132,6 +3155,11 @@ pub const DeclGen = struct {
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
},
+ .i32_array, .i64_array => |arr_len| {
+ const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
+ const arr_ty = dg.context.intType(elem_size).arrayType(arr_len);
+ try llvm_params.append(arr_ty);
+ },
};
return llvm.functionType(
@@ -4822,6 +4850,25 @@ pub const FuncGen = struct {
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
},
+ .i32_array, .i64_array => |arr_len| {
+ const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
+ const arg = args[it.zig_index - 1];
+ const arg_ty = self.air.typeOf(arg);
+ var llvm_arg = try self.resolveInst(arg);
+ if (!isByRef(arg_ty)) {
+ const p = self.buildAlloca(llvm_arg.typeOf(), null);
+ const store_inst = self.builder.buildStore(llvm_arg, p);
+ store_inst.setAlignment(arg_ty.abiAlignment(target));
+ llvm_arg = store_inst;
+ }
+
+ const array_llvm_ty = self.dg.context.intType(elem_size).arrayType(arr_len);
+ const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), "");
+ const alignment = arg_ty.abiAlignment(target);
+ const load_inst = self.builder.buildLoad(array_llvm_ty, casted, "");
+ load_inst.setAlignment(alignment);
+ try llvm_args.append(load_inst);
+ },
};
const call = self.builder.buildCall(
@@ -10083,10 +10130,16 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
- else => return x86_64_abi.classifySystemV(fn_info.return_type, target)[0] == .memory,
+ else => return x86_64_abi.classifySystemV(fn_info.return_type, target, .ret)[0] == .memory,
},
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
- .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target)[0] == .memory,
+ .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ .memory, .i64_array => return true,
+ .i32_array => |size| return size != 1,
+ .byval => return false,
+ },
+ .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
else => return false,
@@ -10139,7 +10192,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
if (is_scalar) {
return dg.lowerType(fn_info.return_type);
}
- const classes = x86_64_abi.classifySystemV(fn_info.return_type, target);
+ const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
@@ -10197,22 +10250,44 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
return dg.context.intType(@intCast(c_uint, abi_size * 8));
},
.aarch64, .aarch64_be => {
- if (is_scalar) {
- return dg.lowerType(fn_info.return_type);
- }
- const classes = aarch64_c_abi.classifyType(fn_info.return_type, target);
- if (classes[0] == .memory or classes[0] == .none) {
- return dg.context.voidType();
+ switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) {
+ .memory => return dg.context.voidType(),
+ .float_array => return dg.lowerType(fn_info.return_type),
+ .byval => return dg.lowerType(fn_info.return_type),
+ .integer => {
+ const bit_size = fn_info.return_type.bitSize(target);
+ return dg.context.intType(@intCast(c_uint, bit_size));
+ },
+ .double_integer => return dg.context.intType(64).arrayType(2),
}
- if (classes[0] == .float_array) {
- return dg.lowerType(fn_info.return_type);
+ },
+ .arm, .armeb => {
+ switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ .memory, .i64_array => return dg.context.voidType(),
+ .i32_array => |len| if (len == 1) {
+ return dg.context.intType(32);
+ } else {
+ return dg.context.voidType();
+ },
+ .byval => return dg.lowerType(fn_info.return_type),
}
- if (classes[1] == .none) {
- const bit_size = fn_info.return_type.bitSize(target);
- return dg.context.intType(@intCast(c_uint, bit_size));
+ },
+ .riscv32, .riscv64 => {
+ switch (riscv_c_abi.classifyType(fn_info.return_type, target)) {
+ .memory => return dg.context.voidType(),
+ .integer => {
+ const bit_size = fn_info.return_type.bitSize(target);
+ return dg.context.intType(@intCast(c_uint, bit_size));
+ },
+ .double_integer => {
+ var llvm_types_buffer: [2]*llvm.Type = .{
+ dg.context.intType(64),
+ dg.context.intType(64),
+ };
+ return dg.context.structType(&llvm_types_buffer, 2, .False);
+ },
+ .byval => return dg.lowerType(fn_info.return_type),
}
-
- return dg.context.intType(64).arrayType(2);
},
// TODO investigate C ABI for other architectures
else => return dg.lowerType(fn_info.return_type),
@@ -10242,6 +10317,8 @@ const ParamTypeIterator = struct {
slice,
as_u16,
float_array: u8,
+ i32_array: u8,
+ i64_array: u8,
};
pub fn next(it: *ParamTypeIterator) ?Lowering {
@@ -10288,15 +10365,6 @@ const ParamTypeIterator = struct {
.C => {
const is_scalar = isScalar(ty);
switch (it.target.cpu.arch) {
- .riscv32, .riscv64 => {
- it.zig_index += 1;
- it.llvm_index += 1;
- if (ty.tag() == .f16) {
- return .as_u16;
- } else {
- return .byval;
- }
- },
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
@@ -10334,18 +10402,18 @@ const ParamTypeIterator = struct {
else => unreachable,
},
else => {
- if (is_scalar) {
- it.zig_index += 1;
- it.llvm_index += 1;
- return .byval;
- }
- const classes = x86_64_abi.classifySystemV(ty, it.target);
+ const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
+ if (is_scalar) {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ return .byval;
+ }
var llvm_types_buffer: [8]u16 = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
@@ -10383,11 +10451,6 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .abi_sized_int;
}
- if (classes[0] == .sse and classes[1] == .none) {
- it.zig_index += 1;
- it.llvm_index += 1;
- return .byval;
- }
it.llvm_types_buffer = llvm_types_buffer;
it.llvm_types_len = llvm_types_index;
it.llvm_index += llvm_types_index;
@@ -10410,24 +10473,45 @@ const ParamTypeIterator = struct {
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
- if (is_scalar) {
- return .byval;
+ switch (aarch64_c_abi.classifyType(ty, it.target)) {
+ .memory => return .byref,
+ .float_array => |len| return Lowering{ .float_array = len },
+ .byval => return .byval,
+ .integer => {
+ it.llvm_types_len = 1;
+ it.llvm_types_buffer[0] = 64;
+ return .multiple_llvm_ints;
+ },
+ .double_integer => return Lowering{ .i64_array = 2 },
}
- const classes = aarch64_c_abi.classifyType(ty, it.target);
- if (classes[0] == .memory) {
- return .byref;
+ },
+ .arm, .armeb => {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ switch (arm_c_abi.classifyType(ty, it.target, .arg)) {
+ .memory => {
+ it.byval_attr = true;
+ return .byref;
+ },
+ .byval => return .byval,
+ .i32_array => |size| return Lowering{ .i32_array = size },
+ .i64_array => |size| return Lowering{ .i64_array = size },
}
- if (classes[0] == .float_array) {
- return Lowering{ .float_array = @enumToInt(classes[1]) };
+ },
+ .riscv32, .riscv64 => {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ if (ty.tag() == .f16) {
+ return .as_u16;
}
- if (classes[1] == .none) {
- it.llvm_types_len = 1;
- } else {
- it.llvm_types_len = 2;
+ switch (riscv_c_abi.classifyType(ty, it.target)) {
+ .memory => {
+ return .byref;
+ },
+ .byval => return .byval,
+ .integer => return .abi_sized_int,
+ .double_integer => return Lowering{ .i64_array = 2 },
}
- it.llvm_types_buffer[0] = 64;
- it.llvm_types_buffer[1] = 64;
- return .multiple_llvm_ints;
},
// TODO investigate C ABI for other architectures
else => {
@@ -10475,8 +10559,16 @@ fn ccAbiPromoteInt(
};
if (int_info.bits <= 16) return int_info.signedness;
switch (target.cpu.arch) {
+ .riscv64 => {
+ if (int_info.bits == 32) {
+ // LLVM always signextends 32 bit ints, unsure if bug.
+ return .signed;
+ }
+ if (int_info.bits < 64) {
+ return int_info.signedness;
+ }
+ },
.sparc64,
- .riscv64,
.powerpc64,
.powerpc64le,
=> {