aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorAlex Rønne Petersen <alex@alexrp.com>2024-07-27 03:52:19 +0200
committerAndrew Kelley <andrew@ziglang.org>2024-07-28 19:44:52 -0700
commitd1d95294fd657f771657ea671a6984b860347fb0 (patch)
treed54c1098e7f0ec8c2b65ba7a6207bf4095c4b20e /src/codegen
parentc15755092821c5c27727ebf416689084eab5b73e (diff)
downloadzig-d1d95294fd657f771657ea671a6984b860347fb0.tar.gz
zig-d1d95294fd657f771657ea671a6984b860347fb0.zip
std.Target.Cpu.Arch: Remove the `aarch64_32` tag.
This is a misfeature that we inherited from LLVM: * https://reviews.llvm.org/D61259 * https://reviews.llvm.org/D61939 (`aarch64_32` and `arm64_32` are equivalent.) I truly have no idea why this triple passed review in LLVM. It is, to date, the *only* tag in the architecture component that is not, in fact, an architecture. In reality, it is just an ILP32 ABI for AArch64 (*not* AArch32). The triples that use `aarch64_32` look like `aarch64_32-apple-watchos`. Yes, that triple is exactly what you think; it has no ABI component. They really, seriously did this. Since only Apple could come up with silliness like this, it should come as no surprise that no one else uses `aarch64_32`. Later on, a GNU ILP32 ABI for AArch64 was developed, and support was added to LLVM: * https://reviews.llvm.org/D94143 * https://reviews.llvm.org/D104931 Here, sanity seems to have prevailed, and a triple using this ABI looks like `aarch64-linux-gnu_ilp32` as you would expect. As can be seen from the diffs in this commit, there was plenty of confusion throughout the Zig codebase about what exactly `aarch64_32` was. So let's just remove it. In its place, we'll use `aarch64-watchos-ilp32`, `aarch64-linux-gnuilp32`, and so on. We'll then translate these appropriately when talking to LLVM. Hence, this commit adds the `ilp32` ABI tag (we already have `gnuilp32`).
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/llvm.zig17
1 files changed, 6 insertions, 11 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index f83d115b18..23f423ab2c 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -43,9 +43,8 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
- .aarch64 => "aarch64",
+ .aarch64 => if (target.abi == .ilp32) "aarch64_32" else "aarch64",
.aarch64_be => "aarch64_be",
- .aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
@@ -157,7 +156,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
try llvm_triple.append('-');
const llvm_abi = switch (target.abi) {
- .none => "unknown",
+ .none, .ilp32 => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
@@ -259,7 +258,6 @@ pub fn targetArch(arch_tag: std.Target.Cpu.Arch) llvm.ArchType {
.armeb => .armeb,
.aarch64 => .aarch64,
.aarch64_be => .aarch64_be,
- .aarch64_32 => .aarch64_32,
.arc => .arc,
.avr => .avr,
.bpfel => .bpfel,
@@ -393,7 +391,6 @@ const DataLayoutBuilder = struct {
.pref = pref,
.idx = idx,
};
- if (self.target.cpu.arch == .aarch64_32) continue;
if (!info.force_in_data_layout and matches_default and
self.target.cpu.arch != .riscv64 and
self.target.cpu.arch != .loongarch64 and
@@ -483,7 +480,6 @@ const DataLayoutBuilder = struct {
=> &.{32},
.aarch64,
.aarch64_be,
- .aarch64_32,
.amdgcn,
.bpfeb,
.bpfel,
@@ -587,7 +583,6 @@ const DataLayoutBuilder = struct {
switch (self.target.cpu.arch) {
.aarch64,
.aarch64_be,
- .aarch64_32,
=> if (size == 128) {
abi = size;
pref = size;
@@ -705,7 +700,7 @@ const DataLayoutBuilder = struct {
force_pref = true;
},
.float => switch (self.target.cpu.arch) {
- .aarch64_32, .amdgcn => if (size == 128) {
+ .amdgcn => if (size == 128) {
abi = size;
pref = size;
},
@@ -10860,7 +10855,7 @@ pub const FuncGen = struct {
,
.constraints = "={rdx},{rax},0,~{cc},~{memory}",
},
- .aarch64, .aarch64_32, .aarch64_be => .{
+ .aarch64, .aarch64_be => .{
.template =
\\ror x12, x12, #3 ; ror x12, x12, #13
\\ror x12, x12, #51 ; ror x12, x12, #61
@@ -10932,7 +10927,7 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) Builder
.Fastcall => .x86_fastcallcc,
.Vectorcall => return switch (target.cpu.arch) {
.x86, .x86_64 => .x86_vectorcallcc,
- .aarch64, .aarch64_be, .aarch64_32 => .aarch64_vector_pcs,
+ .aarch64, .aarch64_be => .aarch64_vector_pcs,
else => unreachable,
},
.Thiscall => .x86_thiscallcc,
@@ -11929,7 +11924,7 @@ fn constraintAllowsRegister(constraint: []const u8) bool {
pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => {
+ .aarch64, .aarch64_be => {
llvm.LLVMInitializeAArch64Target();
llvm.LLVMInitializeAArch64TargetInfo();
llvm.LLVMInitializeAArch64TargetMC();