aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-07-13 19:15:19 -0400
committerGitHub <noreply@github.com>2022-07-13 19:15:19 -0400
commit1653a9b2597c66cbcc88ea75d8a4b88c163584a5 (patch)
tree9cbe5d66e5088006ac4b5d5b4861a3b8b25a7a54 /src
parentfad95741db7529bbad873fb330c25d64ac765340 (diff)
parent92bc3cbe27792be0300fb5f104c011a11f3cf40f (diff)
downloadzig-1653a9b2597c66cbcc88ea75d8a4b88c163584a5.tar.gz
zig-1653a9b2597c66cbcc88ea75d8a4b88c163584a5.zip
Merge pull request #12098 from ziglang/llvm-riscv64
LLVM: implement signext/zeroext attributes
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig42
-rw-r--r--src/codegen/llvm.zig65
-rw-r--r--src/type.zig10
-rw-r--r--src/value.zig17
4 files changed, 123 insertions, 11 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index b139c3f89e..29840820d0 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -22571,6 +22571,48 @@ fn bitCastVal(
const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val;
+ // Some conversions have a bitwise definition that ignores in-memory layout,
+ // such as converting between f80 and u80.
+
+ if (old_ty.eql(Type.f80, sema.mod) and new_ty.isAbiInt()) {
+ const float = val.toFloat(f80);
+ switch (new_ty.intInfo(target).signedness) {
+ .signed => {
+ const int = @bitCast(i80, float);
+ const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
+ const big_int = std.math.big.int.Mutable.init(limbs, int);
+ return Value.fromBigInt(sema.arena, big_int.toConst());
+ },
+ .unsigned => {
+ const int = @bitCast(u80, float);
+ const limbs = try sema.arena.alloc(std.math.big.Limb, 2);
+ const big_int = std.math.big.int.Mutable.init(limbs, int);
+ return Value.fromBigInt(sema.arena, big_int.toConst());
+ },
+ }
+ }
+
+ if (new_ty.eql(Type.f80, sema.mod) and old_ty.isAbiInt()) {
+ var bigint_space: Value.BigIntSpace = undefined;
+ var bigint = try val.toBigIntAdvanced(&bigint_space, target, sema.kit(block, src));
+ switch (old_ty.intInfo(target).signedness) {
+ .signed => {
+ // This conversion cannot fail because we already checked bit size before
+ // calling bitCastVal.
+ const int = bigint.to(i80) catch unreachable;
+ const float = @bitCast(f80, int);
+ return Value.Tag.float_80.create(sema.arena, float);
+ },
+ .unsigned => {
+ // This conversion cannot fail because we already checked bit size before
+ // calling bitCastVal.
+ const int = bigint.to(u80) catch unreachable;
+ const float = @bitCast(f80, int);
+ return Value.Tag.float_80.create(sema.arena, float);
+ },
+ }
+ }
+
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 8857c96bc1..fe35620d38 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -717,6 +717,11 @@ pub const Object = struct {
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
const gpa = dg.gpa;
+ if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) {
+ .signed => dg.addAttr(llvm_func, 0, "signext"),
+ .unsigned => dg.addAttr(llvm_func, 0, "zeroext"),
+ };
+
const err_return_tracing = fn_info.return_type.isError() and
dg.module.comp.bin_file.options.error_return_tracing;
@@ -774,7 +779,10 @@ pub const Object = struct {
);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
}
- }
+ } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) {
+ .signed => dg.addArgAttr(llvm_func, llvm_arg_i, "signext"),
+ .unsigned => dg.addArgAttr(llvm_func, llvm_arg_i, "zeroext"),
+ };
}
llvm_arg_i += 1;
},
@@ -887,6 +895,13 @@ pub const Object = struct {
};
try args.append(loaded);
},
+ .as_u16 => {
+ const param = llvm_func.getParam(llvm_arg_i);
+ llvm_arg_i += 1;
+ const casted = builder.buildBitCast(param, dg.context.halfType(), "");
+ try args.ensureUnusedCapacity(1);
+ args.appendAssumeCapacity(casted);
+ },
};
}
@@ -2794,6 +2809,9 @@ pub const DeclGen = struct {
llvm_params.appendAssumeCapacity(big_int_ty);
}
},
+ .as_u16 => {
+ try llvm_params.append(dg.context.intType(16));
+ },
};
return llvm.functionType(
@@ -4234,6 +4252,12 @@ pub const FuncGen = struct {
llvm_args.appendAssumeCapacity(load_inst);
}
},
+ .as_u16 => {
+ const arg = args[it.zig_index - 1];
+ const llvm_arg = try self.resolveInst(arg);
+ const casted = self.builder.buildBitCast(llvm_arg, self.dg.context.intType(16), "");
+ try llvm_args.append(casted);
+ },
};
const call = self.builder.buildCall(
@@ -8965,6 +8989,7 @@ const ParamTypeIterator = struct {
abi_sized_int,
multiple_llvm_ints,
slice,
+ as_u16,
};
pub fn next(it: *ParamTypeIterator) ?Lowering {
@@ -9025,6 +9050,15 @@ const ParamTypeIterator = struct {
else => false,
};
switch (it.target.cpu.arch) {
+ .riscv32, .riscv64 => {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ if (ty.tag() == .f16) {
+ return .as_u16;
+ } else {
+ return .byval;
+ }
+ },
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
@@ -9135,6 +9169,35 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
};
}
+fn ccAbiPromoteInt(
+ cc: std.builtin.CallingConvention,
+ target: std.Target,
+ ty: Type,
+) ?std.builtin.Signedness {
+ switch (cc) {
+ .Unspecified, .Inline, .Async => return null,
+ else => {},
+ }
+ const int_info = switch (ty.zigTypeTag()) {
+ .Int, .Enum, .ErrorSet => ty.intInfo(target),
+ else => return null,
+ };
+ if (int_info.bits <= 16) return int_info.signedness;
+ switch (target.cpu.arch) {
+ .sparc64,
+ .riscv64,
+ .powerpc64,
+ .powerpc64le,
+ => {
+ if (int_info.bits < 64) {
+ return int_info.signedness;
+ }
+ },
+ else => {},
+ }
+ return null;
+}
+
fn isByRef(ty: Type) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
diff --git a/src/type.zig b/src/type.zig
index 765f1da18c..0744a50579 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4439,6 +4439,16 @@ pub const Type = extern union {
};
}
+ /// Returns true for integers, enums, error sets, and packed structs.
+ /// If this function returns true, then intInfo() can be called on the type.
+ pub fn isAbiInt(ty: Type) bool {
+ return switch (ty.zigTypeTag()) {
+ .Int, .Enum, .ErrorSet => true,
+ .Struct => ty.containerLayout() == .Packed,
+ else => false,
+ };
+ }
+
/// Asserts the type is an integer, enum, error set, or vector of one of them.
pub fn intInfo(self: Type, target: Target) struct { signedness: std.builtin.Signedness, bits: u16 } {
var ty = self;
diff --git a/src/value.zig b/src/value.zig
index 04999c778a..b52e67e31c 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1468,8 +1468,7 @@ pub const Value = extern union {
const repr = std.math.break_f80(f);
std.mem.writeInt(u64, buffer[0..8], repr.fraction, endian);
std.mem.writeInt(u16, buffer[8..10], repr.exp, endian);
- // TODO set the rest of the bytes to undefined. should we use 0xaa
- // or is there a different way?
+ std.mem.set(u8, buffer[10..], 0);
return;
}
const Int = @Type(.{ .Int = .{
@@ -1481,20 +1480,18 @@ pub const Value = extern union {
}
fn floatReadFromMemory(comptime F: type, target: Target, buffer: []const u8) F {
+ const endian = target.cpu.arch.endian();
if (F == f80) {
- switch (target.cpu.arch) {
- .i386, .x86_64 => return std.math.make_f80(.{
- .fraction = std.mem.readIntLittle(u64, buffer[0..8]),
- .exp = std.mem.readIntLittle(u16, buffer[8..10]),
- }),
- else => {},
- }
+ return std.math.make_f80(.{
+ .fraction = readInt(u64, buffer[0..8], endian),
+ .exp = readInt(u16, buffer[8..10], endian),
+ });
}
const Int = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(F).Float.bits,
} });
- const int = readInt(Int, buffer[0..@sizeOf(Int)], target.cpu.arch.endian());
+ const int = readInt(Int, buffer[0..@sizeOf(Int)], endian);
return @bitCast(F, int);
}