aboutsummaryrefslogtreecommitdiff
path: root/lib/compiler_rt
diff options
context:
space:
mode:
authorRue <78876133+IOKG04@users.noreply.github.com>2025-07-28 14:54:52 +0200
committerGitHub <noreply@github.com>2025-07-28 14:54:52 +0200
commit5381e7891dcdd7b6a9e74250cdcce221fe464cdc (patch)
tree4c74744ed84120dccae6dc9811ce945911108a17 /lib/compiler_rt
parent84ae54fbe64a15301317716e7f901d81585332d5 (diff)
parentdea3ed7f59347e87a1b8fa237202873988084ae8 (diff)
downloadzig-5381e7891dcdd7b6a9e74250cdcce221fe464cdc.tar.gz
zig-5381e7891dcdd7b6a9e74250cdcce221fe464cdc.zip
Merge branch 'ziglang:master' into some-documentation-updates-0
Diffstat (limited to 'lib/compiler_rt')
-rw-r--r--lib/compiler_rt/addo.zig4
-rw-r--r--lib/compiler_rt/addoti4_test.zig3
-rw-r--r--lib/compiler_rt/clear_cache.zig14
-rw-r--r--lib/compiler_rt/cmp.zig1
-rw-r--r--lib/compiler_rt/common.zig7
-rw-r--r--lib/compiler_rt/comparedf2_test.zig1
-rw-r--r--lib/compiler_rt/comparesf2_test.zig1
-rw-r--r--lib/compiler_rt/count0bits.zig1
-rw-r--r--lib/compiler_rt/divdf3.zig1
-rw-r--r--lib/compiler_rt/divmodei4.zig4
-rw-r--r--lib/compiler_rt/fixint_test.zig1
-rw-r--r--lib/compiler_rt/int.zig1
-rw-r--r--lib/compiler_rt/memcpy.zig4
-rw-r--r--lib/compiler_rt/memmove.zig16
-rw-r--r--lib/compiler_rt/mulf3.zig4
-rw-r--r--lib/compiler_rt/rem_pio2_large.zig2
-rw-r--r--lib/compiler_rt/stack_probe.zig1
-rw-r--r--lib/compiler_rt/suboti4_test.zig3
-rw-r--r--lib/compiler_rt/udivmod.zig10
-rw-r--r--lib/compiler_rt/udivmodei4.zig5
20 files changed, 42 insertions, 42 deletions
diff --git a/lib/compiler_rt/addo.zig b/lib/compiler_rt/addo.zig
index beb6249223..610d620690 100644
--- a/lib/compiler_rt/addo.zig
+++ b/lib/compiler_rt/addo.zig
@@ -1,6 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("./common.zig");
pub const panic = @import("common.zig").panic;
@@ -16,7 +14,7 @@ comptime {
// - addoXi4_generic as default
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
overflow.* = 0;
const sum: ST = a +% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
diff --git a/lib/compiler_rt/addoti4_test.zig b/lib/compiler_rt/addoti4_test.zig
index dc85830df9..d031d1d428 100644
--- a/lib/compiler_rt/addoti4_test.zig
+++ b/lib/compiler_rt/addoti4_test.zig
@@ -1,4 +1,5 @@
const addv = @import("addo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -23,6 +24,8 @@ fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "addoti4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
diff --git a/lib/compiler_rt/clear_cache.zig b/lib/compiler_rt/clear_cache.zig
index e4a0a9d00d..c43d35602c 100644
--- a/lib/compiler_rt/clear_cache.zig
+++ b/lib/compiler_rt/clear_cache.zig
@@ -97,8 +97,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
.nbytes = end - start,
.whichcache = 3, // ICACHE | DCACHE
};
- asm volatile (
- \\ syscall
+ asm volatile ("syscall"
:
: [_] "{$2}" (165), // nr = SYS_sysarch
[_] "{$4}" (0), // op = MIPS_CACHEFLUSH
@@ -116,11 +115,8 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
} else if (arm64 and !apple) {
// Get Cache Type Info.
// TODO memoize this?
- var ctr_el0: u64 = 0;
- asm volatile (
- \\mrs %[x], ctr_el0
- \\
- : [x] "=r" (ctr_el0),
+ const ctr_el0 = asm volatile ("mrs %[ctr_el0], ctr_el0"
+ : [ctr_el0] "=r" (-> u64),
);
// The DC and IC instructions must use 64-bit registers so we don't use
// uintptr_t in case this runs in an IPL32 environment.
@@ -187,9 +183,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
exportIt();
} else if (os == .linux and loongarch) {
// See: https://github.com/llvm/llvm-project/blob/cf54cae26b65fc3201eff7200ffb9b0c9e8f9a13/compiler-rt/lib/builtins/clear_cache.c#L94-L95
- asm volatile (
- \\ ibar 0
- );
+ asm volatile ("ibar 0");
exportIt();
}
diff --git a/lib/compiler_rt/cmp.zig b/lib/compiler_rt/cmp.zig
index e1273aa622..67cb5b0938 100644
--- a/lib/compiler_rt/cmp.zig
+++ b/lib/compiler_rt/cmp.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig
index f5423019f1..1160b1c718 100644
--- a/lib/compiler_rt/common.zig
+++ b/lib/compiler_rt/common.zig
@@ -102,9 +102,14 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
+pub const test_safety = switch (builtin.zig_backend) {
+ .stage2_aarch64 => false,
+ else => builtin.is_test,
+};
+
// Avoid dragging in the runtime safety mechanisms into this .o file, unless
// we're trying to test compiler-rt.
-pub const panic = if (builtin.is_test) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
+pub const panic = if (test_safety) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
/// This seems to mostly correspond to `clang::TargetInfo::HasFloat16`.
pub fn F16T(comptime OtherType: type) type {
diff --git a/lib/compiler_rt/comparedf2_test.zig b/lib/compiler_rt/comparedf2_test.zig
index 9444c6adf7..dbae6bbeec 100644
--- a/lib/compiler_rt/comparedf2_test.zig
+++ b/lib/compiler_rt/comparedf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
const __ledf2 = @import("./cmpdf2.zig").__ledf2;
diff --git a/lib/compiler_rt/comparesf2_test.zig b/lib/compiler_rt/comparesf2_test.zig
index 40b1324cfa..65e78da99e 100644
--- a/lib/compiler_rt/comparesf2_test.zig
+++ b/lib/compiler_rt/comparesf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
const __lesf2 = @import("./cmpsf2.zig").__lesf2;
diff --git a/lib/compiler_rt/count0bits.zig b/lib/compiler_rt/count0bits.zig
index c9bdfb7c23..874604eb2c 100644
--- a/lib/compiler_rt/count0bits.zig
+++ b/lib/compiler_rt/count0bits.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
diff --git a/lib/compiler_rt/divdf3.zig b/lib/compiler_rt/divdf3.zig
index 0340404a69..7b47cd3a70 100644
--- a/lib/compiler_rt/divdf3.zig
+++ b/lib/compiler_rt/divdf3.zig
@@ -5,7 +5,6 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const normalize = common.normalize;
diff --git a/lib/compiler_rt/divmodei4.zig b/lib/compiler_rt/divmodei4.zig
index 3f12e8697d..ab11452206 100644
--- a/lib/compiler_rt/divmodei4.zig
+++ b/lib/compiler_rt/divmodei4.zig
@@ -34,7 +34,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
}
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -43,7 +43,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
}
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
diff --git a/lib/compiler_rt/fixint_test.zig b/lib/compiler_rt/fixint_test.zig
index 57b4093809..198167ab86 100644
--- a/lib/compiler_rt/fixint_test.zig
+++ b/lib/compiler_rt/fixint_test.zig
@@ -1,4 +1,3 @@
-const is_test = @import("builtin").is_test;
const std = @import("std");
const math = std.math;
const testing = std.testing;
diff --git a/lib/compiler_rt/int.zig b/lib/compiler_rt/int.zig
index 4a89d0799d..16c504ee66 100644
--- a/lib/compiler_rt/int.zig
+++ b/lib/compiler_rt/int.zig
@@ -6,7 +6,6 @@ const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
const __divti3 = @import("divti3.zig").__divti3;
diff --git a/lib/compiler_rt/memcpy.zig b/lib/compiler_rt/memcpy.zig
index 30971677ab..424e92954d 100644
--- a/lib/compiler_rt/memcpy.zig
+++ b/lib/compiler_rt/memcpy.zig
@@ -11,7 +11,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memcpySmall, export_options)
else
@export(&memcpyFast, export_options);
@@ -195,6 +195,8 @@ inline fn copyRange4(
}
test "memcpy" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
fn testFunc(comptime copy_func: anytype) !void {
const max_len = 1024;
diff --git a/lib/compiler_rt/memmove.zig b/lib/compiler_rt/memmove.zig
index 71289a50ae..46c5a631cb 100644
--- a/lib/compiler_rt/memmove.zig
+++ b/lib/compiler_rt/memmove.zig
@@ -14,7 +14,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memmoveSmall, export_options)
else
@export(&memmoveFast, export_options);
@@ -39,7 +39,7 @@ fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.c
}
fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.c) ?[*]u8 {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const small_limit = @max(2 * @sizeOf(Element), @sizeOf(Element));
if (copySmallLength(small_limit, dest.?, src.?, len)) return dest;
@@ -79,7 +79,7 @@ inline fn copyLessThan16(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
if (len < 4) {
if (len == 0) return;
const b = len / 2;
@@ -100,7 +100,7 @@ inline fn copy16ToSmallLimit(
src: [*]const u8,
len: usize,
) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
inline for (2..(std.math.log2(small_limit) + 1) / 2 + 1) |p| {
const limit = 1 << (2 * p);
if (len < limit) {
@@ -119,7 +119,7 @@ inline fn copyRange4(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
comptime assert(std.math.isPowerOfTwo(copy_len));
assert(len >= copy_len);
assert(len < 4 * copy_len);
@@ -147,7 +147,7 @@ inline fn copyForwards(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
assert(len >= 2 * @sizeOf(Element));
const head = src[0..@sizeOf(Element)].*;
@@ -181,7 +181,7 @@ inline fn copyBlocks(
src: anytype,
max_bytes: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const T = @typeInfo(@TypeOf(dest)).pointer.child;
comptime assert(T == @typeInfo(@TypeOf(src)).pointer.child);
@@ -217,6 +217,8 @@ inline fn copyBackwards(
}
test memmoveFast {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 = undefined;
for (&buffer, 0..) |*b, i| {
diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig
index ad60ec41a5..34d39fb9b7 100644
--- a/lib/compiler_rt/mulf3.zig
+++ b/lib/compiler_rt/mulf3.zig
@@ -6,7 +6,7 @@ const common = @import("./common.zig");
/// Ported from:
/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
pub inline fn mulf3(comptime T: type, a: T, b: T) T {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(T).float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
@@ -163,7 +163,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
///
/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(Z).int.bits;
var inexact = false;
if (count < typeWidth) {
diff --git a/lib/compiler_rt/rem_pio2_large.zig b/lib/compiler_rt/rem_pio2_large.zig
index b107a0fabb..f15e0d71f6 100644
--- a/lib/compiler_rt/rem_pio2_large.zig
+++ b/lib/compiler_rt/rem_pio2_large.zig
@@ -251,7 +251,7 @@ const PIo2 = [_]f64{
/// compiler will convert from decimal to binary accurately enough
/// to produce the hexadecimal values shown.
///
-pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
+pub fn rem_pio2_large(x: []const f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
var jz: i32 = undefined;
var jx: i32 = undefined;
var jv: i32 = undefined;
diff --git a/lib/compiler_rt/stack_probe.zig b/lib/compiler_rt/stack_probe.zig
index 94212b7a23..21259ec435 100644
--- a/lib/compiler_rt/stack_probe.zig
+++ b/lib/compiler_rt/stack_probe.zig
@@ -4,7 +4,6 @@ const common = @import("common.zig");
const os_tag = builtin.os.tag;
const arch = builtin.cpu.arch;
const abi = builtin.abi;
-const is_test = builtin.is_test;
pub const panic = common.panic;
diff --git a/lib/compiler_rt/suboti4_test.zig b/lib/compiler_rt/suboti4_test.zig
index 68ad0ff72f..65018bc966 100644
--- a/lib/compiler_rt/suboti4_test.zig
+++ b/lib/compiler_rt/suboti4_test.zig
@@ -1,4 +1,5 @@
const subo = @import("subo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -27,6 +28,8 @@ pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "suboti3" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
diff --git a/lib/compiler_rt/udivmod.zig b/lib/compiler_rt/udivmod.zig
index a9705f317d..bf6aaadeae 100644
--- a/lib/compiler_rt/udivmod.zig
+++ b/lib/compiler_rt/udivmod.zig
@@ -1,8 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const Log2Int = std.math.Log2Int;
-const HalveInt = @import("common.zig").HalveInt;
+const common = @import("common.zig");
+const HalveInt = common.HalveInt;
const lo = switch (builtin.cpu.arch.endian()) {
.big => 1,
@@ -14,7 +14,7 @@ const hi = 1 - lo;
// Returns U / v_ and sets r = U % v_.
fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
const HalfT = HalveInt(T, false).HalfT;
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
var v = v_;
const b = @as(T, 1) << (@bitSizeOf(T) / 2);
@@ -70,7 +70,7 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
}
fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
if (T == u64 and builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows) {
var rem: T = undefined;
const quo = asm (
@@ -90,7 +90,7 @@ fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
// Returns a_ / b_ and sets maybe_rem = a_ % b.
pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
const HalfT = HalveInt(T, false).HalfT;
const SignedT = std.meta.Int(.signed, @bitSizeOf(T));
diff --git a/lib/compiler_rt/udivmodei4.zig b/lib/compiler_rt/udivmodei4.zig
index 6d6f6c1b65..0923f3f222 100644
--- a/lib/compiler_rt/udivmodei4.zig
+++ b/lib/compiler_rt/udivmodei4.zig
@@ -113,7 +113,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
}
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -122,7 +122,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -131,6 +131,7 @@ pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
test "__udivei4/__umodei4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;