aboutsummaryrefslogtreecommitdiff
path: root/lib/std
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-01-03 16:50:29 -0500
committerGitHub <noreply@github.com>2022-01-03 16:50:29 -0500
commit81fa31c05456facea1d1963a1e7f665351fb248d (patch)
tree8ea6dba0c5b8ec9e8ba9cdff48189da68c198dfd /lib/std
parent850b053ea6b7d6f0f5e0e8dbcf37080ca012024f (diff)
parentd94303be2bcee33e7efba22a186fd06eaa809707 (diff)
downloadzig-81fa31c05456facea1d1963a1e7f665351fb248d.tar.gz
zig-81fa31c05456facea1d1963a1e7f665351fb248d.zip
Merge pull request #10451 from ziglang/cache-mode
stage2: introduce CacheMode
Diffstat (limited to 'lib/std')
-rw-r--r--lib/std/fs.zig2
-rw-r--r--lib/std/fs/watch.zig2
-rw-r--r--lib/std/os.zig11
-rw-r--r--lib/std/os/windows.zig23
-rw-r--r--lib/std/special/compiler_rt.zig1050
-rw-r--r--lib/std/special/compiler_rt/absv.zig53
-rw-r--r--lib/std/special/compiler_rt/atomics.zig468
-rw-r--r--lib/std/special/compiler_rt/bswap.zig110
-rw-r--r--lib/std/special/compiler_rt/cmp.zig56
-rw-r--r--lib/std/special/compiler_rt/count0bits.zig238
-rw-r--r--lib/std/special/compiler_rt/divdf3.zig2
-rw-r--r--lib/std/special/compiler_rt/divsf3.zig2
-rw-r--r--lib/std/special/compiler_rt/divtf3.zig2
-rw-r--r--lib/std/special/compiler_rt/fixuint.zig2
-rw-r--r--lib/std/special/compiler_rt/floatXisf.zig11
-rw-r--r--lib/std/special/compiler_rt/floatsiXf.zig12
-rw-r--r--lib/std/special/compiler_rt/floatundisf.zig9
-rw-r--r--lib/std/special/compiler_rt/floatunsidf.zig9
-rw-r--r--lib/std/special/compiler_rt/floatunsisf.zig9
-rw-r--r--lib/std/special/compiler_rt/mulXf3.zig2
-rw-r--r--lib/std/special/compiler_rt/negXi2.zig24
-rw-r--r--lib/std/special/compiler_rt/negv.zig43
-rw-r--r--lib/std/special/compiler_rt/parity.zig48
-rw-r--r--lib/std/special/compiler_rt/popcount.zig52
-rw-r--r--lib/std/special/compiler_rt/shift.zig24
25 files changed, 1206 insertions, 1058 deletions
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 6e1821178f..a09f9f2ed2 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1361,7 +1361,7 @@ pub const Dir = struct {
.share_access = share_access,
.creation = creation,
.io_mode = .blocking,
- .open_dir = true,
+ .filter = .dir_only,
}) catch |er| switch (er) {
error.WouldBlock => unreachable,
else => |e2| return e2,
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
index c103925bdd..e2ec8b8061 100644
--- a/lib/std/fs/watch.zig
+++ b/lib/std/fs/watch.zig
@@ -401,7 +401,7 @@ pub fn Watch(comptime V: type) type {
.access_mask = windows.FILE_LIST_DIRECTORY,
.creation = windows.FILE_OPEN,
.io_mode = .evented,
- .open_dir = true,
+ .filter = .dir_only,
});
errdefer windows.CloseHandle(dir_handle);
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 1728c2ac0d..7be8825fcc 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -1353,7 +1353,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
access_mask |= w.GENERIC_READ | w.GENERIC_WRITE;
}
- const open_dir: bool = flags & O.DIRECTORY != 0;
+ const filter: windows.OpenFileOptions.Filter = if (flags & O.DIRECTORY != 0) .dir_only else .file_only;
const follow_symlinks: bool = flags & O.NOFOLLOW == 0;
const creation: w.ULONG = blk: {
@@ -1369,7 +1369,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
.access_mask = access_mask,
.io_mode = .blocking,
.creation = creation,
- .open_dir = open_dir,
+ .filter = filter,
.follow_symlinks = follow_symlinks,
};
}
@@ -2324,6 +2324,7 @@ pub fn renameatW(
.access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE,
.creation = windows.FILE_OPEN,
.io_mode = .blocking,
+ .filter = .any, // This function is supposed to rename both files and directories.
}) catch |err| switch (err) {
error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
else => |e| return e,
@@ -2435,7 +2436,7 @@ pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!v
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
.io_mode = .blocking,
- .open_dir = true,
+ .filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
error.PipeBusy => unreachable,
@@ -2511,7 +2512,7 @@ pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
.io_mode = .blocking,
- .open_dir = true,
+ .filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
error.PipeBusy => unreachable,
@@ -4693,7 +4694,7 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
.share_access = share_access,
.creation = creation,
.io_mode = .blocking,
- .open_dir = true,
+ .filter = .dir_only,
}) catch |er| switch (er) {
error.WouldBlock => unreachable,
else => |e2| return e2,
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 59e65ed54c..0d9907893c 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -53,17 +53,26 @@ pub const OpenFileOptions = struct {
io_mode: std.io.ModeOverride,
/// If true, tries to open path as a directory.
/// Defaults to false.
- open_dir: bool = false,
+ filter: Filter = .file_only,
/// If false, tries to open path as a reparse point without dereferencing it.
/// Defaults to true.
follow_symlinks: bool = true,
+
+ pub const Filter = enum {
+ /// Causes `OpenFile` to return `error.IsDir` if the opened handle would be a directory.
+ file_only,
+ /// Causes `OpenFile` to return `error.NotDir` if the opened handle would be a file.
+ dir_only,
+ /// `OpenFile` does not discriminate between opening files and directories.
+ any,
+ };
};
pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HANDLE {
- if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and !options.open_dir) {
+ if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and options.filter == .file_only) {
return error.IsDir;
}
- if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and !options.open_dir) {
+ if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and options.filter == .file_only) {
return error.IsDir;
}
@@ -87,7 +96,11 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
};
var io: IO_STATUS_BLOCK = undefined;
const blocking_flag: ULONG = if (options.io_mode == .blocking) FILE_SYNCHRONOUS_IO_NONALERT else 0;
- const file_or_dir_flag: ULONG = if (options.open_dir) FILE_DIRECTORY_FILE else FILE_NON_DIRECTORY_FILE;
+ const file_or_dir_flag: ULONG = switch (options.filter) {
+ .file_only => FILE_NON_DIRECTORY_FILE,
+ .dir_only => FILE_DIRECTORY_FILE,
+ .any => 0,
+ };
// If we're not following symlinks, we need to ensure we don't pass in any synchronization flags such as FILE_SYNCHRONOUS_IO_NONALERT.
const flags: ULONG = if (options.follow_symlinks) file_or_dir_flag | blocking_flag else file_or_dir_flag | FILE_OPEN_REPARSE_POINT;
@@ -695,7 +708,7 @@ pub fn CreateSymbolicLink(
.dir = dir,
.creation = FILE_CREATE,
.io_mode = .blocking,
- .open_dir = is_directory,
+ .filter = if (is_directory) .dir_only else .file_only,
}) catch |err| switch (err) {
error.IsDir => return error.PathAlreadyExists,
error.NotDir => unreachable,
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 297fd1cb39..7801772647 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -22,6 +22,12 @@ else
const long_double_is_f128 = builtin.target.longDoubleIsF128();
comptime {
+ // These files do their own comptime exporting logic.
+ if (!builtin.zig_is_stage2) {
+ _ = @import("compiler_rt/atomics.zig");
+ }
+ _ = @import("compiler_rt/clear_cache.zig").clear_cache;
+
const __extenddftf2 = @import("compiler_rt/extendXfYf2.zig").__extenddftf2;
@export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = linkage });
const __extendsftf2 = @import("compiler_rt/extendXfYf2.zig").__extendsftf2;
@@ -171,16 +177,16 @@ comptime {
const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2;
@export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
- if (!builtin.zig_is_stage2) {
- if (!long_double_is_f128) {
- // TODO implement these
- //const __extendxftf2 = @import("compiler_rt/extendXfYf2.zig").__extendxftf2;
- //@export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
+ if (!long_double_is_f128) {
+ // TODO implement these
+ //const __extendxftf2 = @import("compiler_rt/extendXfYf2.zig").__extendxftf2;
+ //@export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
- //const __trunctfxf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfxf2;
- //@export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
- }
+ //const __trunctfxf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfxf2;
+ //@export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
+ }
+ if (!builtin.zig_is_stage2) {
switch (arch) {
.i386,
.x86_64,
@@ -193,531 +199,533 @@ comptime {
},
else => {},
}
+ }
- // __clear_cache manages its own logic about whether to be exported or not.
- _ = @import("compiler_rt/clear_cache.zig").clear_cache;
-
- const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
- @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
- const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
- @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
- const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2;
- @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
-
- const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3;
- @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
- const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
- @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
- const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
- @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
- const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
- @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
- const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
- @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
- const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
- @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
-
- const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
- @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
- const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
- @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
- const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
- @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
-
- const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3;
- @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage });
- const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3;
- @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage });
- const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3;
- @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage });
-
- // Integral bit manipulation
- const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3;
- @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage });
- const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3;
- @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage });
- const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3;
- @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage });
- const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3;
- @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage });
- const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3;
- @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage });
- const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3;
- @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage });
-
- const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2;
- @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage });
- const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2;
- @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage });
- const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2;
- @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage });
- const __ctzsi2 = @import("compiler_rt/count0bits.zig").__ctzsi2;
- @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = linkage });
- const __ctzdi2 = @import("compiler_rt/count0bits.zig").__ctzdi2;
- @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = linkage });
- const __ctzti2 = @import("compiler_rt/count0bits.zig").__ctzti2;
- @export(__ctzti2, .{ .name = "__ctzti2", .linkage = linkage });
- const __ffssi2 = @import("compiler_rt/count0bits.zig").__ffssi2;
- @export(__ffssi2, .{ .name = "__ffssi2", .linkage = linkage });
- const __ffsdi2 = @import("compiler_rt/count0bits.zig").__ffsdi2;
- @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = linkage });
- const __ffsti2 = @import("compiler_rt/count0bits.zig").__ffsti2;
- @export(__ffsti2, .{ .name = "__ffsti2", .linkage = linkage });
-
- const __paritysi2 = @import("compiler_rt/parity.zig").__paritysi2;
- @export(__paritysi2, .{ .name = "__paritysi2", .linkage = linkage });
- const __paritydi2 = @import("compiler_rt/parity.zig").__paritydi2;
- @export(__paritydi2, .{ .name = "__paritydi2", .linkage = linkage });
- const __parityti2 = @import("compiler_rt/parity.zig").__parityti2;
- @export(__parityti2, .{ .name = "__parityti2", .linkage = linkage });
- const __popcountsi2 = @import("compiler_rt/popcount.zig").__popcountsi2;
- @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = linkage });
- const __popcountdi2 = @import("compiler_rt/popcount.zig").__popcountdi2;
- @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage });
- const __popcountti2 = @import("compiler_rt/popcount.zig").__popcountti2;
- @export(__popcountti2, .{ .name = "__popcountti2", .linkage = linkage });
- const __bswapsi2 = @import("compiler_rt/bswap.zig").__bswapsi2;
- @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = linkage });
- const __bswapdi2 = @import("compiler_rt/bswap.zig").__bswapdi2;
- @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = linkage });
- const __bswapti2 = @import("compiler_rt/bswap.zig").__bswapti2;
- @export(__bswapti2, .{ .name = "__bswapti2", .linkage = linkage });
-
- // Integral / floating point conversion (part 1/2)
- const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf;
- @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
- const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf;
- @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
- const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf;
- @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
- const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf;
- @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
-
- const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf;
- @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
+ const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2;
+ @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
+ const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2;
+ @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
+ const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2;
+ @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
+
+ const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3;
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
+ const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
+ const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
+ const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
+ @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
+ const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
+ const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
+ @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
+
+ const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
+ const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
+ const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
+ @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
+
+ const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3;
+ @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage });
+ const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3;
+ @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage });
+ const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3;
+ @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage });
+
+ // Integral bit manipulation
+ const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3;
+ @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage });
+ const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3;
+ @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage });
+ const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3;
+ @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage });
+ const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3;
+ @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage });
+ const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3;
+ @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage });
+ const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3;
+ @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage });
+
+ const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2;
+ @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage });
+ const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2;
+ @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage });
+ const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2;
+ @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage });
+ const __ctzsi2 = @import("compiler_rt/count0bits.zig").__ctzsi2;
+ @export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = linkage });
+ const __ctzdi2 = @import("compiler_rt/count0bits.zig").__ctzdi2;
+ @export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = linkage });
+ const __ctzti2 = @import("compiler_rt/count0bits.zig").__ctzti2;
+ @export(__ctzti2, .{ .name = "__ctzti2", .linkage = linkage });
+ const __ffssi2 = @import("compiler_rt/count0bits.zig").__ffssi2;
+ @export(__ffssi2, .{ .name = "__ffssi2", .linkage = linkage });
+ const __ffsdi2 = @import("compiler_rt/count0bits.zig").__ffsdi2;
+ @export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = linkage });
+ const __ffsti2 = @import("compiler_rt/count0bits.zig").__ffsti2;
+ @export(__ffsti2, .{ .name = "__ffsti2", .linkage = linkage });
+
+ const __paritysi2 = @import("compiler_rt/parity.zig").__paritysi2;
+ @export(__paritysi2, .{ .name = "__paritysi2", .linkage = linkage });
+ const __paritydi2 = @import("compiler_rt/parity.zig").__paritydi2;
+ @export(__paritydi2, .{ .name = "__paritydi2", .linkage = linkage });
+ const __parityti2 = @import("compiler_rt/parity.zig").__parityti2;
+ @export(__parityti2, .{ .name = "__parityti2", .linkage = linkage });
+
+ const __popcountsi2 = @import("compiler_rt/popcount.zig").__popcountsi2;
+ @export(__popcountsi2, .{ .name = "__popcountsi2", .linkage = linkage });
+ const __popcountdi2 = @import("compiler_rt/popcount.zig").__popcountdi2;
+ @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage });
+ const __popcountti2 = @import("compiler_rt/popcount.zig").__popcountti2;
+ @export(__popcountti2, .{ .name = "__popcountti2", .linkage = linkage });
+
+ const __bswapsi2 = @import("compiler_rt/bswap.zig").__bswapsi2;
+ @export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = linkage });
+ const __bswapdi2 = @import("compiler_rt/bswap.zig").__bswapdi2;
+ @export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = linkage });
+ const __bswapti2 = @import("compiler_rt/bswap.zig").__bswapti2;
+ @export(__bswapti2, .{ .name = "__bswapti2", .linkage = linkage });
+
+ // Integral / floating point conversion (part 1/2)
+ const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf;
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
+ const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf;
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
+ const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf;
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
+ const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf;
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
+
+ const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf;
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
+ if (!builtin.zig_is_stage2) {
const __floatundisf = @import("compiler_rt/floatundisf.zig").__floatundisf;
@export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage });
- const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf;
- @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
- const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf;
- @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
-
- const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf;
- @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
- const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf;
- @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
- const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf;
- @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
- const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf;
- @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
- const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf;
- @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
-
- const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf;
- @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
- const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf;
- @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
-
- const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf;
- @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
- const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf;
- @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
- const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf;
- @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
-
- const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
- if (!is_test) {
- @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
- }
- const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2;
- @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
-
- // Integral / floating point conversion (part 2/2)
- const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi;
- @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
- const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi;
- @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
- const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti;
- @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
-
- const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi;
- @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
- const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi;
- @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
- const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti;
- @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
-
- const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi;
- @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
- const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi;
- @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
- const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti;
- @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
-
- const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi;
- @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
- const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi;
- @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
- const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti;
- @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
- const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi;
- @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
- const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi;
- @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
- const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti;
- @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
- const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi;
- @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
- const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi;
- @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
- const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti;
- @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
-
- const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4;
- @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage });
-
- if (is_darwin) {
- const __isPlatformVersionAtLeast = @import("compiler_rt/os_version_check.zig").__isPlatformVersionAtLeast;
- @export(__isPlatformVersionAtLeast, .{ .name = "__isPlatformVersionAtLeast", .linkage = linkage });
- }
+ }
+ const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf;
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
+ const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf;
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
+
+ const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf;
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
+ const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf;
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
+ const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf;
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
+ const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf;
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
+ const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf;
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
+
+ const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf;
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
+ const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf;
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
+
+ const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf;
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
+ const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf;
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
+ const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf;
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
+
+ const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2;
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
+ if (!is_test) {
+ @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
+ }
+ const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2;
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
+
+ // Integral / floating point conversion (part 2/2)
+ const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi;
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
+ const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi;
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
+ const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti;
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
+
+ const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi;
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
+ const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi;
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
+ const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti;
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
+
+ const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi;
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
+ const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi;
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
+ const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti;
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
+
+ const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi;
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
+ const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi;
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
+ const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti;
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
+ const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi;
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
+ const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi;
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
+ const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti;
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
+ const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi;
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
+ const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi;
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
+ const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti;
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
+
+ const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4;
+ @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage });
+
+ if (is_darwin) {
+ const __isPlatformVersionAtLeast = @import("compiler_rt/os_version_check.zig").__isPlatformVersionAtLeast;
+ @export(__isPlatformVersionAtLeast, .{ .name = "__isPlatformVersionAtLeast", .linkage = linkage });
+ }
- // Integral arithmetic
- const __negsi2 = @import("compiler_rt/negXi2.zig").__negsi2;
- @export(__negsi2, .{ .name = "__negsi2", .linkage = linkage });
- const __negdi2 = @import("compiler_rt/negXi2.zig").__negdi2;
- @export(__negdi2, .{ .name = "__negdi2", .linkage = linkage });
- const __negti2 = @import("compiler_rt/negXi2.zig").__negti2;
- @export(__negti2, .{ .name = "__negti2", .linkage = linkage });
- const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3;
- @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage });
- const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3;
- @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage });
- const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4;
- @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage });
- const __divsi3 = @import("compiler_rt/int.zig").__divsi3;
- @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage });
- const __divdi3 = @import("compiler_rt/int.zig").__divdi3;
- @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage });
- const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3;
- @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage });
- const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3;
- @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage });
- const __modsi3 = @import("compiler_rt/int.zig").__modsi3;
- @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage });
- const __moddi3 = @import("compiler_rt/int.zig").__moddi3;
- @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage });
- const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3;
- @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage });
- const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3;
- @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage });
- const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4;
- @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage });
- const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4;
- @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage });
-
- // Integral arithmetic with trapping overflow
- const __absvsi2 = @import("compiler_rt/absv.zig").__absvsi2;
- @export(__absvsi2, .{ .name = "__absvsi2", .linkage = linkage });
- const __absvdi2 = @import("compiler_rt/absv.zig").__absvdi2;
- @export(__absvdi2, .{ .name = "__absvdi2", .linkage = linkage });
- const __absvti2 = @import("compiler_rt/absv.zig").__absvti2;
- @export(__absvti2, .{ .name = "__absvti2", .linkage = linkage });
- const __negvsi2 = @import("compiler_rt/negv.zig").__negvsi2;
- @export(__negvsi2, .{ .name = "__negvsi2", .linkage = linkage });
- const __negvdi2 = @import("compiler_rt/negv.zig").__negvdi2;
- @export(__negvdi2, .{ .name = "__negvdi2", .linkage = linkage });
- const __negvti2 = @import("compiler_rt/negv.zig").__negvti2;
- @export(__negvti2, .{ .name = "__negvti2", .linkage = linkage });
-
- // missing: Integral arithmetic which returns if overflow
-
- // Integral comparison
- // (a < b) => 0
- // (a == b) => 1
- // (a > b) => 2
- const __cmpsi2 = @import("compiler_rt/cmp.zig").__cmpsi2;
- @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = linkage });
- const __cmpdi2 = @import("compiler_rt/cmp.zig").__cmpdi2;
- @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = linkage });
- const __cmpti2 = @import("compiler_rt/cmp.zig").__cmpti2;
- @export(__cmpti2, .{ .name = "__cmpti2", .linkage = linkage });
- const __ucmpsi2 = @import("compiler_rt/cmp.zig").__ucmpsi2;
- @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = linkage });
- const __ucmpdi2 = @import("compiler_rt/cmp.zig").__ucmpdi2;
- @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = linkage });
- const __ucmpti2 = @import("compiler_rt/cmp.zig").__ucmpti2;
- @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = linkage });
-
- // missing: Floating point raised to integer power
-
- // missing: Complex arithmetic
- // (a + ib) * (c + id)
- // (a + ib) / (c + id)
-
- const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2;
- @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage });
- const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2;
- @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage });
-
- if (builtin.link_libc and os_tag == .openbsd) {
- const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address;
- @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage });
- }
+ // Integral arithmetic
+ const __negsi2 = @import("compiler_rt/negXi2.zig").__negsi2;
+ @export(__negsi2, .{ .name = "__negsi2", .linkage = linkage });
+ const __negdi2 = @import("compiler_rt/negXi2.zig").__negdi2;
+ @export(__negdi2, .{ .name = "__negdi2", .linkage = linkage });
+ const __negti2 = @import("compiler_rt/negXi2.zig").__negti2;
+ @export(__negti2, .{ .name = "__negti2", .linkage = linkage });
+ const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3;
+ @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage });
+ const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3;
+ @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage });
+ const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4;
+ @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage });
+ const __divsi3 = @import("compiler_rt/int.zig").__divsi3;
+ @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage });
+ const __divdi3 = @import("compiler_rt/int.zig").__divdi3;
+ @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage });
+ const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3;
+ @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage });
+ const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3;
+ @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage });
+ const __modsi3 = @import("compiler_rt/int.zig").__modsi3;
+ @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage });
+ const __moddi3 = @import("compiler_rt/int.zig").__moddi3;
+ @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage });
+ const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3;
+ @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage });
+ const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3;
+ @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage });
+ const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4;
+ @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage });
+ const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4;
+ @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage });
+
+ // Integral arithmetic with trapping overflow
+ const __absvsi2 = @import("compiler_rt/absv.zig").__absvsi2;
+ @export(__absvsi2, .{ .name = "__absvsi2", .linkage = linkage });
+ const __absvdi2 = @import("compiler_rt/absv.zig").__absvdi2;
+ @export(__absvdi2, .{ .name = "__absvdi2", .linkage = linkage });
+ const __absvti2 = @import("compiler_rt/absv.zig").__absvti2;
+ @export(__absvti2, .{ .name = "__absvti2", .linkage = linkage });
+ const __negvsi2 = @import("compiler_rt/negv.zig").__negvsi2;
+ @export(__negvsi2, .{ .name = "__negvsi2", .linkage = linkage });
+ const __negvdi2 = @import("compiler_rt/negv.zig").__negvdi2;
+ @export(__negvdi2, .{ .name = "__negvdi2", .linkage = linkage });
+ const __negvti2 = @import("compiler_rt/negv.zig").__negvti2;
+ @export(__negvti2, .{ .name = "__negvti2", .linkage = linkage });
+
+ // missing: Integral arithmetic which returns if overflow
+
+ // Integral comparison
+ // (a < b) => 0
+ // (a == b) => 1
+ // (a > b) => 2
+ const __cmpsi2 = @import("compiler_rt/cmp.zig").__cmpsi2;
+ @export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = linkage });
+ const __cmpdi2 = @import("compiler_rt/cmp.zig").__cmpdi2;
+ @export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = linkage });
+ const __cmpti2 = @import("compiler_rt/cmp.zig").__cmpti2;
+ @export(__cmpti2, .{ .name = "__cmpti2", .linkage = linkage });
+ const __ucmpsi2 = @import("compiler_rt/cmp.zig").__ucmpsi2;
+ @export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = linkage });
+ const __ucmpdi2 = @import("compiler_rt/cmp.zig").__ucmpdi2;
+ @export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = linkage });
+ const __ucmpti2 = @import("compiler_rt/cmp.zig").__ucmpti2;
+ @export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = linkage });
+
+ // missing: Floating point raised to integer power
+
+ // missing: Complex arithmetic
+ // (a + ib) * (c + id)
+ // (a + ib) / (c + id)
+
+ const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2;
+ @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage });
+ const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2;
+ @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage });
+
+ if (builtin.link_libc and os_tag == .openbsd) {
+ const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address;
+ @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage });
+ }
- if ((arch.isARM() or arch.isThumb()) and !is_test) {
- const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0;
- @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage });
- const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1;
- @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage });
- const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2;
- @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage });
-
- @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage });
-
- const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod;
- @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage });
- const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod;
- @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage });
-
- @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage });
- const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod;
- @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage });
- @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage });
- const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod;
- @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage });
-
- const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy;
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage });
- @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage });
-
- const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove;
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage });
- @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage });
-
- const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset;
- @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage });
- @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage });
-
- const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr;
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage });
- @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage });
-
- if (os_tag == .linux) {
- const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp;
- @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage });
- }
-
- const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d;
- @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
- const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d;
- @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
- const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d;
- @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
- const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f;
- @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
- const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d;
- @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
- const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d;
- @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
- const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f;
- @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
- const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f;
- @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
-
- const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg;
- @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage });
- const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg;
- @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage });
-
- const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul;
- @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
- const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul;
- @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
-
- const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h;
- @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
-
- const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz;
- @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
- const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz;
- @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
-
- const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz;
- @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
- const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz;
- @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
-
- const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz;
- @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
-
- const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f;
- @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
- const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h;
- @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
-
- const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f;
- @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
- const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f;
- @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
-
- const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd;
- @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
- const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd;
- @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
- const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub;
- @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
- const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub;
- @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
-
- const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz;
- @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
-
- const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz;
- @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
- const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz;
- @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
-
- const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv;
- @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage });
- const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv;
- @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage });
-
- const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl;
- @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage });
- const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr;
- @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage });
- const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr;
- @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage });
-
- const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq;
- @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
- const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt;
- @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
- const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple;
- @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
- const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge;
- @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
- const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt;
- @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
- const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun;
- @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
-
- const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq;
- @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
- const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt;
- @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
- const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple;
- @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
- const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge;
- @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
- const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt;
- @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
- const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun;
- @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
+ if ((arch.isARM() or arch.isThumb()) and !is_test) {
+ const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0;
+ @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage });
+ const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1;
+ @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage });
+ const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2;
+ @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage });
+
+ @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage });
+
+ const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod;
+ @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage });
+ const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod;
+ @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage });
+
+ @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage });
+ const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod;
+ @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage });
+ @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage });
+ const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod;
+ @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage });
+
+ const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy;
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage });
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage });
+ @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage });
+
+ const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove;
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage });
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage });
+ @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage });
+
+ const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset;
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage });
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage });
+ @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage });
+
+ const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr;
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage });
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage });
+ @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage });
+
+ if (os_tag == .linux) {
+ const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp;
+ @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage });
}
- if (arch == .i386 and abi == .msvc) {
- // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
- const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv;
- @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage });
- const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv;
- @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage });
- const _allrem = @import("compiler_rt/aullrem.zig")._allrem;
- @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage });
- const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem;
- @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage });
- }
+ const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d;
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
+ const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d;
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
+ const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d;
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
+ const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f;
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
+ const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d;
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
+ const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d;
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
+ const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f;
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
+ const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f;
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
+
+ const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg;
+ @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage });
+ const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg;
+ @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage });
+
+ const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul;
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
+ const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul;
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
+
+ const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h;
+ @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
+
+ const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz;
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
+ const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz;
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
+
+ const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz;
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
+ const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz;
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
+
+ const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz;
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
+
+ const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f;
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
+ const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h;
+ @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
+
+ const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f;
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
+ const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f;
+ @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
+
+ const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd;
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
+ const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd;
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
+ const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub;
+ @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
+ const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub;
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
+
+ const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz;
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
+
+ const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz;
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
+ const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz;
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
+
+ const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv;
+ @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage });
+ const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv;
+ @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage });
+
+ const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl;
+ @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage });
+ const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr;
+ @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage });
+ const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr;
+ @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage });
+
+ const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq;
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
+ const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt;
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
+ const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple;
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
+ const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge;
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
+ const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt;
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
+ const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun;
+ @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
+
+ const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq;
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
+ const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt;
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
+ const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple;
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
+ const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge;
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
+ const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt;
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
+ const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun;
+ @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
+ }
- if (arch.isSPARC()) {
- // SPARC systems use a different naming scheme
- const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add;
- @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage });
- const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div;
- @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage });
- const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul;
- @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage });
- const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub;
- @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage });
-
- const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp;
- @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage });
- const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq;
- @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage });
- const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne;
- @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage });
- const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt;
- @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage });
- const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle;
- @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage });
- const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt;
- @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
- const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge;
- @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
-
- const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq;
- @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
- const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq;
- @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
- const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq;
- @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
- const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq;
- @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
- const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq;
- @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
- const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq;
- @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
- const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi;
- @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
- const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui;
- @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
- const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox;
- @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
- const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux;
- @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
- const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos;
- @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
- const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod;
- @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
- }
+ if (arch == .i386 and abi == .msvc) {
+ // Don't let LLVM apply the stdcall name mangling on those MSVC builtins
+ const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv;
+ @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage });
+ const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv;
+ @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage });
+ const _allrem = @import("compiler_rt/aullrem.zig")._allrem;
+ @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage });
+ const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem;
+ @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage });
+ }
- if ((arch == .powerpc or arch.isPPC64()) and !is_test) {
- @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage });
- @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage });
- @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage });
- @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage });
- @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage });
- @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage });
- @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
- @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
- @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage });
- @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage });
- @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
- @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
- @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage });
- @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage });
- @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage });
- @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage });
-
- @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__nekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gekf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__lekf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage });
- @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
- }
+ if (arch.isSPARC()) {
+ // SPARC systems use a different naming scheme
+ const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add;
+ @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage });
+ const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div;
+ @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage });
+ const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul;
+ @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage });
+ const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub;
+ @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage });
+
+ const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp;
+ @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage });
+ const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq;
+ @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage });
+ const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne;
+ @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage });
+ const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt;
+ @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage });
+ const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle;
+ @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage });
+ const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt;
+ @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
+ const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge;
+ @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
+
+ const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq;
+ @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
+ const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq;
+ @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
+ const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq;
+ @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
+ const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq;
+ @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
+ const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq;
+ @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
+ const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq;
+ @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
+ const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi;
+ @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
+ const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui;
+ @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
+ const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox;
+ @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
+ const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux;
+ @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
+ const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos;
+ @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
+ const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod;
+ @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
+ }
- _ = @import("compiler_rt/atomics.zig");
+ if ((arch == .powerpc or arch.isPPC64()) and !is_test) {
+ @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage });
+ @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage });
+ @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage });
+ @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage });
+ @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage });
+ @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage });
+ @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
+ @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
+ @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage });
+ @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage });
+ @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
+ @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
+ @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage });
+ @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage });
+ @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage });
+ @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage });
+
+ @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__nekf2", .linkage = linkage });
+ @export(__getf2, .{ .name = "__gekf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage });
+ @export(__letf2, .{ .name = "__lekf2", .linkage = linkage });
+ @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage });
+ @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
+ }
+
+ @export(floorf, .{ .name = "floorf", .linkage = linkage });
+ @export(floor, .{ .name = "floor", .linkage = linkage });
+ @export(floorl, .{ .name = "floorl", .linkage = linkage });
+ if (!builtin.zig_is_stage2) {
@export(fmaq, .{ .name = "fmaq", .linkage = linkage });
- @export(floorf, .{ .name = "floorf", .linkage = linkage });
- @export(floor, .{ .name = "floor", .linkage = linkage });
- @export(floorl, .{ .name = "floorl", .linkage = linkage });
}
}
diff --git a/lib/std/special/compiler_rt/absv.zig b/lib/std/special/compiler_rt/absv.zig
index 5c46d028a9..f14497daf2 100644
--- a/lib/std/special/compiler_rt/absv.zig
+++ b/lib/std/special/compiler_rt/absv.zig
@@ -2,31 +2,36 @@
// * @panic, if value can not be represented
// - absvXi4_generic for unoptimized version
-fn absvXi_generic(comptime ST: type) fn (a: ST) callconv(.C) ST {
- return struct {
- fn f(a: ST) callconv(.C) ST {
- const UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
- else => unreachable,
- };
- // taken from Bit Twiddling Hacks
- // compute the integer absolute value (abs) without branching
- var x: ST = a;
- const N: UT = @bitSizeOf(ST);
- const sign: ST = a >> N - 1;
- x +%= sign;
- x ^= sign;
- if (x < 0)
- @panic("compiler_rt absv: overflow");
- return x;
- }
- }.f;
+inline fn absvXi(comptime ST: type, a: ST) ST {
+ const UT = switch (ST) {
+ i32 => u32,
+ i64 => u64,
+ i128 => u128,
+ else => unreachable,
+ };
+ // taken from Bit Twiddling Hacks
+ // compute the integer absolute value (abs) without branching
+ var x: ST = a;
+ const N: UT = @bitSizeOf(ST);
+ const sign: ST = a >> N - 1;
+ x +%= sign;
+ x ^= sign;
+ if (x < 0)
+ @panic("compiler_rt absv: overflow");
+ return x;
+}
+
+pub fn __absvsi2(a: i32) callconv(.C) i32 {
+ return absvXi(i32, a);
+}
+
+pub fn __absvdi2(a: i64) callconv(.C) i64 {
+ return absvXi(i64, a);
+}
+
+pub fn __absvti2(a: i128) callconv(.C) i128 {
+ return absvXi(i128, a);
}
-pub const __absvsi2 = absvXi_generic(i32);
-pub const __absvdi2 = absvXi_generic(i64);
-pub const __absvti2 = absvXi_generic(i128);
test {
_ = @import("absvsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/atomics.zig b/lib/std/special/compiler_rt/atomics.zig
index 3d93dc33b1..7727d7af3d 100644
--- a/lib/std/special/compiler_rt/atomics.zig
+++ b/lib/std/special/compiler_rt/atomics.zig
@@ -119,225 +119,311 @@ fn __atomic_compare_exchange(
return 0;
}
-comptime {
- if (supports_atomic_ops) {
- @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage });
- @export(__atomic_store, .{ .name = "__atomic_store", .linkage = linkage });
- @export(__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage });
- @export(__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage });
- }
-}
-
// Specialized versions of the GCC atomic builtin functions.
// LLVM emits those iff the object size is known and the pointers are correctly
// aligned.
+inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
+ _ = model;
+ if (@sizeOf(T) > largest_atomic_size) {
+ var sl = spinlocks.get(@ptrToInt(src));
+ defer sl.release();
+ return src.*;
+ } else {
+ return @atomicLoad(T, src, .SeqCst);
+ }
+}
-fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
- return struct {
- fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
- _ = model;
- if (@sizeOf(T) > largest_atomic_size) {
- var sl = spinlocks.get(@ptrToInt(src));
- defer sl.release();
- return src.*;
- } else {
- return @atomicLoad(T, src, .SeqCst);
- }
- }
- }.atomic_load_N;
+fn __atomic_load_1(src: *u8, model: i32) callconv(.C) u8 {
+ return atomic_load_N(u8, src, model);
}
-comptime {
- if (supports_atomic_ops) {
- const atomicLoad_u8 = atomicLoadFn(u8);
- const atomicLoad_u16 = atomicLoadFn(u16);
- const atomicLoad_u32 = atomicLoadFn(u32);
- const atomicLoad_u64 = atomicLoadFn(u64);
- @export(atomicLoad_u8, .{ .name = "__atomic_load_1", .linkage = linkage });
- @export(atomicLoad_u16, .{ .name = "__atomic_load_2", .linkage = linkage });
- @export(atomicLoad_u32, .{ .name = "__atomic_load_4", .linkage = linkage });
- @export(atomicLoad_u64, .{ .name = "__atomic_load_8", .linkage = linkage });
- }
+fn __atomic_load_2(src: *u16, model: i32) callconv(.C) u16 {
+ return atomic_load_N(u16, src, model);
}
-fn atomicStoreFn(comptime T: type) fn (*T, T, i32) callconv(.C) void {
- return struct {
- fn atomic_store_N(dst: *T, value: T, model: i32) callconv(.C) void {
- _ = model;
- if (@sizeOf(T) > largest_atomic_size) {
- var sl = spinlocks.get(@ptrToInt(dst));
- defer sl.release();
- dst.* = value;
- } else {
- @atomicStore(T, dst, value, .SeqCst);
- }
- }
- }.atomic_store_N;
+fn __atomic_load_4(src: *u32, model: i32) callconv(.C) u32 {
+ return atomic_load_N(u32, src, model);
}
-comptime {
- if (supports_atomic_ops) {
- const atomicStore_u8 = atomicStoreFn(u8);
- const atomicStore_u16 = atomicStoreFn(u16);
- const atomicStore_u32 = atomicStoreFn(u32);
- const atomicStore_u64 = atomicStoreFn(u64);
- @export(atomicStore_u8, .{ .name = "__atomic_store_1", .linkage = linkage });
- @export(atomicStore_u16, .{ .name = "__atomic_store_2", .linkage = linkage });
- @export(atomicStore_u32, .{ .name = "__atomic_store_4", .linkage = linkage });
- @export(atomicStore_u64, .{ .name = "__atomic_store_8", .linkage = linkage });
+fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
+ return atomic_load_N(u64, src, model);
+}
+
+inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
+ _ = model;
+ if (@sizeOf(T) > largest_atomic_size) {
+ var sl = spinlocks.get(@ptrToInt(dst));
+ defer sl.release();
+ dst.* = value;
+ } else {
+ @atomicStore(T, dst, value, .SeqCst);
}
}
-fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
- return struct {
- fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
- _ = model;
- if (@sizeOf(T) > largest_atomic_size) {
- var sl = spinlocks.get(@ptrToInt(ptr));
- defer sl.release();
- const value = ptr.*;
- ptr.* = val;
- return value;
- } else {
- return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
- }
- }
- }.atomic_exchange_N;
+fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.C) void {
+ return atomic_store_N(u8, dst, value, model);
}
-comptime {
- if (supports_atomic_ops) {
- const atomicExchange_u8 = atomicExchangeFn(u8);
- const atomicExchange_u16 = atomicExchangeFn(u16);
- const atomicExchange_u32 = atomicExchangeFn(u32);
- const atomicExchange_u64 = atomicExchangeFn(u64);
- @export(atomicExchange_u8, .{ .name = "__atomic_exchange_1", .linkage = linkage });
- @export(atomicExchange_u16, .{ .name = "__atomic_exchange_2", .linkage = linkage });
- @export(atomicExchange_u32, .{ .name = "__atomic_exchange_4", .linkage = linkage });
- @export(atomicExchange_u64, .{ .name = "__atomic_exchange_8", .linkage = linkage });
+fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.C) void {
+ return atomic_store_N(u16, dst, value, model);
+}
+
+fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.C) void {
+ return atomic_store_N(u32, dst, value, model);
+}
+
+fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
+ return atomic_store_N(u64, dst, value, model);
+}
+
+inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
+ _ = model;
+ if (@sizeOf(T) > largest_atomic_size) {
+ var sl = spinlocks.get(@ptrToInt(ptr));
+ defer sl.release();
+ const value = ptr.*;
+ ptr.* = val;
+ return value;
+ } else {
+ return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
}
}
-fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
- return struct {
- fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
- _ = success;
- _ = failure;
- if (@sizeOf(T) > largest_atomic_size) {
- var sl = spinlocks.get(@ptrToInt(ptr));
- defer sl.release();
- const value = ptr.*;
- if (value == expected.*) {
- ptr.* = desired;
- return 1;
- }
- expected.* = value;
- return 0;
- } else {
- if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
- expected.* = old_value;
- return 0;
- }
- return 1;
- }
+fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return atomic_exchange_N(u8, ptr, val, model);
+}
+
+fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return atomic_exchange_N(u16, ptr, val, model);
+}
+
+fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return atomic_exchange_N(u32, ptr, val, model);
+}
+
+fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return atomic_exchange_N(u64, ptr, val, model);
+}
+
+inline fn atomic_compare_exchange_N(
+ comptime T: type,
+ ptr: *T,
+ expected: *T,
+ desired: T,
+ success: i32,
+ failure: i32,
+) i32 {
+ _ = success;
+ _ = failure;
+ if (@sizeOf(T) > largest_atomic_size) {
+ var sl = spinlocks.get(@ptrToInt(ptr));
+ defer sl.release();
+ const value = ptr.*;
+ if (value == expected.*) {
+ ptr.* = desired;
+ return 1;
}
- }.atomic_compare_exchange_N;
+ expected.* = value;
+ return 0;
+ } else {
+ if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
+ expected.* = old_value;
+ return 0;
+ }
+ return 1;
+ }
}
-comptime {
- if (supports_atomic_ops) {
- const atomicCompareExchange_u8 = atomicCompareExchangeFn(u8);
- const atomicCompareExchange_u16 = atomicCompareExchangeFn(u16);
- const atomicCompareExchange_u32 = atomicCompareExchangeFn(u32);
- const atomicCompareExchange_u64 = atomicCompareExchangeFn(u64);
- @export(atomicCompareExchange_u8, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage });
- @export(atomicCompareExchange_u16, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage });
- @export(atomicCompareExchange_u32, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage });
- @export(atomicCompareExchange_u64, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage });
+fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.C) i32 {
+ return atomic_compare_exchange_N(u8, ptr, expected, desired, success, failure);
+}
+
+fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.C) i32 {
+ return atomic_compare_exchange_N(u16, ptr, expected, desired, success, failure);
+}
+
+fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.C) i32 {
+ return atomic_compare_exchange_N(u32, ptr, expected, desired, success, failure);
+}
+
+fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.C) i32 {
+ return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
+}
+
+inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T {
+ _ = model;
+ if (@sizeOf(T) > largest_atomic_size) {
+ var sl = spinlocks.get(@ptrToInt(ptr));
+ defer sl.release();
+
+ const value = ptr.*;
+ ptr.* = switch (op) {
+ .Add => value +% val,
+ .Sub => value -% val,
+ .And => value & val,
+ .Nand => ~(value & val),
+ .Or => value | val,
+ .Xor => value ^ val,
+ else => @compileError("unsupported atomic op"),
+ };
+
+ return value;
}
+
+ return @atomicRmw(T, ptr, op, val, .SeqCst);
}
-fn fetchFn(comptime T: type, comptime op: std.builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
- return struct {
- pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
- _ = model;
- if (@sizeOf(T) > largest_atomic_size) {
- var sl = spinlocks.get(@ptrToInt(ptr));
- defer sl.release();
-
- const value = ptr.*;
- ptr.* = switch (op) {
- .Add => value +% val,
- .Sub => value -% val,
- .And => value & val,
- .Nand => ~(value & val),
- .Or => value | val,
- .Xor => value ^ val,
- else => @compileError("unsupported atomic op"),
- };
-
- return value;
- }
+fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .Add, ptr, val, model);
+}
- return @atomicRmw(T, ptr, op, val, .SeqCst);
- }
- }.fetch_op_N;
+fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .Add, ptr, val, model);
+}
+
+fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .Add, ptr, val, model);
+}
+
+fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .Add, ptr, val, model);
+}
+
+fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .Sub, ptr, val, model);
+}
+
+fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .Sub, ptr, val, model);
+}
+
+fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .Sub, ptr, val, model);
+}
+
+fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .Sub, ptr, val, model);
+}
+
+fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .And, ptr, val, model);
+}
+
+fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .And, ptr, val, model);
+}
+
+fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .And, ptr, val, model);
+}
+
+fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .And, ptr, val, model);
+}
+
+fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .Or, ptr, val, model);
+}
+
+fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .Or, ptr, val, model);
+}
+
+fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .Or, ptr, val, model);
+}
+
+fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .Or, ptr, val, model);
+}
+
+fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .Xor, ptr, val, model);
+}
+
+fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .Xor, ptr, val, model);
+}
+
+fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .Xor, ptr, val, model);
+}
+
+fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .Xor, ptr, val, model);
+}
+
+fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+ return fetch_op_N(u8, .Nand, ptr, val, model);
+}
+
+fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+ return fetch_op_N(u16, .Nand, ptr, val, model);
+}
+
+fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+ return fetch_op_N(u32, .Nand, ptr, val, model);
+}
+
+fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+ return fetch_op_N(u64, .Nand, ptr, val, model);
}
comptime {
if (supports_atomic_ops) {
- const fetch_add_u8 = fetchFn(u8, .Add);
- const fetch_add_u16 = fetchFn(u16, .Add);
- const fetch_add_u32 = fetchFn(u32, .Add);
- const fetch_add_u64 = fetchFn(u64, .Add);
- @export(fetch_add_u8, .{ .name = "__atomic_fetch_add_1", .linkage = linkage });
- @export(fetch_add_u16, .{ .name = "__atomic_fetch_add_2", .linkage = linkage });
- @export(fetch_add_u32, .{ .name = "__atomic_fetch_add_4", .linkage = linkage });
- @export(fetch_add_u64, .{ .name = "__atomic_fetch_add_8", .linkage = linkage });
-
- const fetch_sub_u8 = fetchFn(u8, .Sub);
- const fetch_sub_u16 = fetchFn(u16, .Sub);
- const fetch_sub_u32 = fetchFn(u32, .Sub);
- const fetch_sub_u64 = fetchFn(u64, .Sub);
- @export(fetch_sub_u8, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage });
- @export(fetch_sub_u16, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage });
- @export(fetch_sub_u32, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage });
- @export(fetch_sub_u64, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage });
-
- const fetch_and_u8 = fetchFn(u8, .And);
- const fetch_and_u16 = fetchFn(u16, .And);
- const fetch_and_u32 = fetchFn(u32, .And);
- const fetch_and_u64 = fetchFn(u64, .And);
- @export(fetch_and_u8, .{ .name = "__atomic_fetch_and_1", .linkage = linkage });
- @export(fetch_and_u16, .{ .name = "__atomic_fetch_and_2", .linkage = linkage });
- @export(fetch_and_u32, .{ .name = "__atomic_fetch_and_4", .linkage = linkage });
- @export(fetch_and_u64, .{ .name = "__atomic_fetch_and_8", .linkage = linkage });
-
- const fetch_or_u8 = fetchFn(u8, .Or);
- const fetch_or_u16 = fetchFn(u16, .Or);
- const fetch_or_u32 = fetchFn(u32, .Or);
- const fetch_or_u64 = fetchFn(u64, .Or);
- @export(fetch_or_u8, .{ .name = "__atomic_fetch_or_1", .linkage = linkage });
- @export(fetch_or_u16, .{ .name = "__atomic_fetch_or_2", .linkage = linkage });
- @export(fetch_or_u32, .{ .name = "__atomic_fetch_or_4", .linkage = linkage });
- @export(fetch_or_u64, .{ .name = "__atomic_fetch_or_8", .linkage = linkage });
-
- const fetch_xor_u8 = fetchFn(u8, .Xor);
- const fetch_xor_u16 = fetchFn(u16, .Xor);
- const fetch_xor_u32 = fetchFn(u32, .Xor);
- const fetch_xor_u64 = fetchFn(u64, .Xor);
- @export(fetch_xor_u8, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage });
- @export(fetch_xor_u16, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage });
- @export(fetch_xor_u32, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage });
- @export(fetch_xor_u64, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage });
-
- const fetch_nand_u8 = fetchFn(u8, .Nand);
- const fetch_nand_u16 = fetchFn(u16, .Nand);
- const fetch_nand_u32 = fetchFn(u32, .Nand);
- const fetch_nand_u64 = fetchFn(u64, .Nand);
- @export(fetch_nand_u8, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage });
- @export(fetch_nand_u16, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage });
- @export(fetch_nand_u32, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage });
- @export(fetch_nand_u64, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage });
+ @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage });
+ @export(__atomic_store, .{ .name = "__atomic_store", .linkage = linkage });
+ @export(__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage });
+ @export(__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage });
+
+ @export(__atomic_fetch_add_1, .{ .name = "__atomic_fetch_add_1", .linkage = linkage });
+ @export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage });
+ @export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage });
+ @export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage });
+
+ @export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage });
+ @export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage });
+ @export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage });
+ @export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage });
+
+ @export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage });
+ @export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage });
+ @export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage });
+ @export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage });
+
+ @export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage });
+ @export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage });
+ @export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage });
+ @export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage });
+
+ @export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage });
+ @export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage });
+ @export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage });
+ @export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage });
+
+ @export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage });
+ @export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage });
+ @export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage });
+ @export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage });
+
+ @export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage });
+ @export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage });
+ @export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage });
+ @export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage });
+
+ @export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage });
+ @export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage });
+ @export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage });
+ @export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage });
+
+ @export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage });
+ @export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage });
+ @export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage });
+ @export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage });
+
+ @export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage });
+ @export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage });
+ @export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage });
+ @export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage });
}
}
diff --git a/lib/std/special/compiler_rt/bswap.zig b/lib/std/special/compiler_rt/bswap.zig
index 0646f68c1a..f1d2138811 100644
--- a/lib/std/special/compiler_rt/bswap.zig
+++ b/lib/std/special/compiler_rt/bswap.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
// bswap - byteswap
-// - bswapXi2_generic for unoptimized big and little endian
+// - bswapXi2 for unoptimized big and little endian
// ie for u32
// DE AD BE EF <- little|big endian
// FE BE AD DE <- big|little endian
@@ -11,64 +11,64 @@ const builtin = @import("builtin");
// 00 00 ff 00 << 1*8 (2n right byte)
// 00 00 00 ff << 3*8 (rightmost byte)
-fn bswapXi2_generic(comptime T: type) fn (a: T) callconv(.C) T {
- return struct {
- fn f(a: T) callconv(.C) T {
- @setRuntimeSafety(builtin.is_test);
- switch (@bitSizeOf(T)) {
- 32 => {
- // zig fmt: off
- return (((a & 0xff000000) >> 24)
- | ((a & 0x00ff0000) >> 8 )
- | ((a & 0x0000ff00) << 8 )
- | ((a & 0x000000ff) << 24));
- // zig fmt: on
- },
- 64 => {
- // zig fmt: off
- return (((a & 0xff00000000000000) >> 56)
- | ((a & 0x00ff000000000000) >> 40 )
- | ((a & 0x0000ff0000000000) >> 24 )
- | ((a & 0x000000ff00000000) >> 8 )
- | ((a & 0x00000000ff000000) << 8 )
- | ((a & 0x0000000000ff0000) << 24 )
- | ((a & 0x000000000000ff00) << 40 )
- | ((a & 0x00000000000000ff) << 56));
- // zig fmt: on
- },
- 128 => {
- // zig fmt: off
- return (((a & 0xff000000000000000000000000000000) >> 120)
- | ((a & 0x00ff0000000000000000000000000000) >> 104)
- | ((a & 0x0000ff00000000000000000000000000) >> 88 )
- | ((a & 0x000000ff000000000000000000000000) >> 72 )
- | ((a & 0x00000000ff0000000000000000000000) >> 56 )
- | ((a & 0x0000000000ff00000000000000000000) >> 40 )
- | ((a & 0x000000000000ff000000000000000000) >> 24 )
- | ((a & 0x00000000000000ff0000000000000000) >> 8 )
- | ((a & 0x0000000000000000ff00000000000000) << 8 )
- | ((a & 0x000000000000000000ff000000000000) << 24 )
- | ((a & 0x00000000000000000000ff0000000000) << 40 )
- | ((a & 0x0000000000000000000000ff00000000) << 56 )
- | ((a & 0x000000000000000000000000ff000000) << 72 )
- | ((a & 0x00000000000000000000000000ff0000) << 88 )
- | ((a & 0x0000000000000000000000000000ff00) << 104)
- | ((a & 0x000000000000000000000000000000ff) << 120));
- // zig fmt: on
- },
- else => {
- unreachable;
- },
- }
- }
- }.f;
+inline fn bswapXi2(comptime T: type, a: T) T {
+ @setRuntimeSafety(builtin.is_test);
+ switch (@bitSizeOf(T)) {
+ 32 => {
+ // zig fmt: off
+ return (((a & 0xff000000) >> 24)
+ | ((a & 0x00ff0000) >> 8 )
+ | ((a & 0x0000ff00) << 8 )
+ | ((a & 0x000000ff) << 24));
+ // zig fmt: on
+ },
+ 64 => {
+ // zig fmt: off
+ return (((a & 0xff00000000000000) >> 56)
+ | ((a & 0x00ff000000000000) >> 40 )
+ | ((a & 0x0000ff0000000000) >> 24 )
+ | ((a & 0x000000ff00000000) >> 8 )
+ | ((a & 0x00000000ff000000) << 8 )
+ | ((a & 0x0000000000ff0000) << 24 )
+ | ((a & 0x000000000000ff00) << 40 )
+ | ((a & 0x00000000000000ff) << 56));
+ // zig fmt: on
+ },
+ 128 => {
+ // zig fmt: off
+ return (((a & 0xff000000000000000000000000000000) >> 120)
+ | ((a & 0x00ff0000000000000000000000000000) >> 104)
+ | ((a & 0x0000ff00000000000000000000000000) >> 88 )
+ | ((a & 0x000000ff000000000000000000000000) >> 72 )
+ | ((a & 0x00000000ff0000000000000000000000) >> 56 )
+ | ((a & 0x0000000000ff00000000000000000000) >> 40 )
+ | ((a & 0x000000000000ff000000000000000000) >> 24 )
+ | ((a & 0x00000000000000ff0000000000000000) >> 8 )
+ | ((a & 0x0000000000000000ff00000000000000) << 8 )
+ | ((a & 0x000000000000000000ff000000000000) << 24 )
+ | ((a & 0x00000000000000000000ff0000000000) << 40 )
+ | ((a & 0x0000000000000000000000ff00000000) << 56 )
+ | ((a & 0x000000000000000000000000ff000000) << 72 )
+ | ((a & 0x00000000000000000000000000ff0000) << 88 )
+ | ((a & 0x0000000000000000000000000000ff00) << 104)
+ | ((a & 0x000000000000000000000000000000ff) << 120));
+ // zig fmt: on
+ },
+ else => unreachable,
+ }
}
-pub const __bswapsi2 = bswapXi2_generic(u32);
+pub fn __bswapsi2(a: u32) callconv(.C) u32 {
+ return bswapXi2(u32, a);
+}
-pub const __bswapdi2 = bswapXi2_generic(u64);
+pub fn __bswapdi2(a: u64) callconv(.C) u64 {
+ return bswapXi2(u64, a);
+}
-pub const __bswapti2 = bswapXi2_generic(u128);
+pub fn __bswapti2(a: u128) callconv(.C) u128 {
+ return bswapXi2(u128, a);
+}
test {
_ = @import("bswapsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/cmp.zig b/lib/std/special/compiler_rt/cmp.zig
index 630948ba28..9eb4227527 100644
--- a/lib/std/special/compiler_rt/cmp.zig
+++ b/lib/std/special/compiler_rt/cmp.zig
@@ -11,28 +11,40 @@ const builtin = @import("builtin");
// a == b => 1
// a > b => 2
-fn XcmpXi2_generic(comptime T: type) fn (a: T, b: T) callconv(.C) i32 {
- return struct {
- fn f(a: T, b: T) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- var cmp1: i32 = 0;
- var cmp2: i32 = 0;
- if (a > b)
- cmp1 = 1;
- if (a < b)
- cmp2 = 1;
- return cmp1 - cmp2 + 1;
- }
- }.f;
-}
-
-pub const __cmpsi2 = XcmpXi2_generic(i32);
-pub const __cmpdi2 = XcmpXi2_generic(i64);
-pub const __cmpti2 = XcmpXi2_generic(i128);
-
-pub const __ucmpsi2 = XcmpXi2_generic(u32);
-pub const __ucmpdi2 = XcmpXi2_generic(u64);
-pub const __ucmpti2 = XcmpXi2_generic(u128);
+inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
+ @setRuntimeSafety(builtin.is_test);
+ var cmp1: i32 = 0;
+ var cmp2: i32 = 0;
+ if (a > b)
+ cmp1 = 1;
+ if (a < b)
+ cmp2 = 1;
+ return cmp1 - cmp2 + 1;
+}
+
+pub fn __cmpsi2(a: i32, b: i32) callconv(.C) i32 {
+ return XcmpXi2(i32, a, b);
+}
+
+pub fn __cmpdi2(a: i64, b: i64) callconv(.C) i32 {
+ return XcmpXi2(i64, a, b);
+}
+
+pub fn __cmpti2(a: i128, b: i128) callconv(.C) i32 {
+ return XcmpXi2(i128, a, b);
+}
+
+pub fn __ucmpsi2(a: u32, b: u32) callconv(.C) i32 {
+ return XcmpXi2(u32, a, b);
+}
+
+pub fn __ucmpdi2(a: u64, b: u64) callconv(.C) i32 {
+ return XcmpXi2(u64, a, b);
+}
+
+pub fn __ucmpti2(a: u128, b: u128) callconv(.C) i32 {
+ return XcmpXi2(u128, a, b);
+}
test {
_ = @import("cmpsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/count0bits.zig b/lib/std/special/compiler_rt/count0bits.zig
index 29ba800b43..1f6d28ae0b 100644
--- a/lib/std/special/compiler_rt/count0bits.zig
+++ b/lib/std/special/compiler_rt/count0bits.zig
@@ -2,44 +2,40 @@ const std = @import("std");
const builtin = @import("builtin");
// clz - count leading zeroes
-// - clzXi2_generic for unoptimized little and big endian
+// - clzXi2 for unoptimized little and big endian
// - __clzsi2_thumb1: assume a != 0
// - __clzsi2_arm32: assume a != 0
// ctz - count trailing zeroes
-// - ctzXi2_generic for unoptimized little and big endian
+// - ctzXi2 for unoptimized little and big endian
// ffs - find first set
// * ffs = (a == 0) => 0, (a != 0) => ctz + 1
// * dont pay for `if (x == 0) return shift;` inside ctz
-// - ffsXi2_generic for unoptimized little and big endian
-
-fn clzXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
- return struct {
- fn f(a: T) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
- var x = switch (@bitSizeOf(T)) {
- 32 => @bitCast(u32, a),
- 64 => @bitCast(u64, a),
- 128 => @bitCast(u128, a),
- else => unreachable,
- };
- var n: T = @bitSizeOf(T);
- // Count first bit set using binary search, from Hacker's Delight
- var y: @TypeOf(x) = 0;
- comptime var shift: u8 = @bitSizeOf(T);
- inline while (shift > 0) {
- shift = shift >> 1;
- y = x >> shift;
- if (y != 0) {
- n = n - shift;
- x = y;
- }
- }
- return @intCast(i32, n - @bitCast(T, x));
+// - ffsXi2 for unoptimized little and big endian
+
+inline fn clzXi2(comptime T: type, a: T) i32 {
+ @setRuntimeSafety(builtin.is_test);
+
+ var x = switch (@bitSizeOf(T)) {
+ 32 => @bitCast(u32, a),
+ 64 => @bitCast(u64, a),
+ 128 => @bitCast(u128, a),
+ else => unreachable,
+ };
+ var n: T = @bitSizeOf(T);
+ // Count first bit set using binary search, from Hacker's Delight
+ var y: @TypeOf(x) = 0;
+ comptime var shift: u8 = @bitSizeOf(T);
+ inline while (shift > 0) {
+ shift = shift >> 1;
+ y = x >> shift;
+ if (y != 0) {
+ n = n - shift;
+ x = y;
}
- }.f;
+ }
+ return @intCast(i32, n - @bitCast(T, x));
}
fn __clzsi2_thumb1() callconv(.Naked) void {
@@ -125,103 +121,113 @@ fn __clzsi2_arm32() callconv(.Naked) void {
unreachable;
}
-pub const __clzsi2 = impl: {
- switch (builtin.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => {
- const use_thumb1 =
- (builtin.cpu.arch.isThumb() or
- std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
- !std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
-
- if (use_thumb1) {
- break :impl __clzsi2_thumb1;
- }
- // From here on we're either targeting Thumb2 or ARM.
- else if (!builtin.cpu.arch.isThumb()) {
- break :impl __clzsi2_arm32;
- }
- // Use the generic implementation otherwise.
- else break :impl clzXi2_generic(i32);
- },
- else => break :impl clzXi2_generic(i32),
- }
+fn clzsi2_generic(a: i32) callconv(.C) i32 {
+ return clzXi2(i32, a);
+}
+
+pub const __clzsi2 = switch (builtin.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => impl: {
+ const use_thumb1 =
+ (builtin.cpu.arch.isThumb() or
+ std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
+ !std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
+
+ if (use_thumb1) {
+ break :impl __clzsi2_thumb1;
+ }
+ // From here on we're either targeting Thumb2 or ARM.
+ else if (!builtin.cpu.arch.isThumb()) {
+ break :impl __clzsi2_arm32;
+ }
+ // Use the generic implementation otherwise.
+ else break :impl clzsi2_generic;
+ },
+ else => clzsi2_generic,
};
-pub const __clzdi2 = clzXi2_generic(i64);
-
-pub const __clzti2 = clzXi2_generic(i128);
-
-fn ctzXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
- return struct {
- fn f(a: T) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
- var x = switch (@bitSizeOf(T)) {
- 32 => @bitCast(u32, a),
- 64 => @bitCast(u64, a),
- 128 => @bitCast(u128, a),
- else => unreachable,
- };
- var n: T = 1;
- // Number of trailing zeroes as binary search, from Hacker's Delight
- var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
- comptime var shift = @bitSizeOf(T);
- if (x == 0) return shift;
- inline while (shift > 1) {
- shift = shift >> 1;
- mask = mask >> shift;
- if ((x & mask) == 0) {
- n = n + shift;
- x = x >> shift;
- }
- }
- return @intCast(i32, n - @bitCast(T, (x & 1)));
+pub fn __clzdi2(a: i64) callconv(.C) i32 {
+ return clzXi2(i64, a);
+}
+
+pub fn __clzti2(a: i128) callconv(.C) i32 {
+ return clzXi2(i128, a);
+}
+
+inline fn ctzXi2(comptime T: type, a: T) i32 {
+ @setRuntimeSafety(builtin.is_test);
+
+ var x = switch (@bitSizeOf(T)) {
+ 32 => @bitCast(u32, a),
+ 64 => @bitCast(u64, a),
+ 128 => @bitCast(u128, a),
+ else => unreachable,
+ };
+ var n: T = 1;
+ // Number of trailing zeroes as binary search, from Hacker's Delight
+ var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
+ comptime var shift = @bitSizeOf(T);
+ if (x == 0) return shift;
+ inline while (shift > 1) {
+ shift = shift >> 1;
+ mask = mask >> shift;
+ if ((x & mask) == 0) {
+ n = n + shift;
+ x = x >> shift;
}
- }.f;
+ }
+ return @intCast(i32, n - @bitCast(T, (x & 1)));
+}
+
+pub fn __ctzsi2(a: i32) callconv(.C) i32 {
+ return ctzXi2(i32, a);
}
-pub const __ctzsi2 = ctzXi2_generic(i32);
-
-pub const __ctzdi2 = ctzXi2_generic(i64);
-
-pub const __ctzti2 = ctzXi2_generic(i128);
-
-fn ffsXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
- return struct {
- fn f(a: T) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
-
- var x = switch (@bitSizeOf(T)) {
- 32 => @bitCast(u32, a),
- 64 => @bitCast(u64, a),
- 128 => @bitCast(u128, a),
- else => unreachable,
- };
- var n: T = 1;
- // adapted from Number of trailing zeroes (see ctzXi2_generic)
- var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
- comptime var shift = @bitSizeOf(T);
- // In contrast to ctz return 0
- if (x == 0) return 0;
- inline while (shift > 1) {
- shift = shift >> 1;
- mask = mask >> shift;
- if ((x & mask) == 0) {
- n = n + shift;
- x = x >> shift;
- }
- }
- // return ctz + 1
- return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1);
+pub fn __ctzdi2(a: i64) callconv(.C) i32 {
+ return ctzXi2(i64, a);
+}
+
+pub fn __ctzti2(a: i128) callconv(.C) i32 {
+ return ctzXi2(i128, a);
+}
+
+inline fn ffsXi2(comptime T: type, a: T) i32 {
+ @setRuntimeSafety(builtin.is_test);
+
+ var x = switch (@bitSizeOf(T)) {
+ 32 => @bitCast(u32, a),
+ 64 => @bitCast(u64, a),
+ 128 => @bitCast(u128, a),
+ else => unreachable,
+ };
+ var n: T = 1;
+ // adapted from Number of trailing zeroes (see ctzXi2)
+ var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
+ comptime var shift = @bitSizeOf(T);
+ // In contrast to ctz return 0
+ if (x == 0) return 0;
+ inline while (shift > 1) {
+ shift = shift >> 1;
+ mask = mask >> shift;
+ if ((x & mask) == 0) {
+ n = n + shift;
+ x = x >> shift;
}
- }.f;
+ }
+ // return ctz + 1
+ return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1);
}
-pub const __ffssi2 = ffsXi2_generic(i32);
+pub fn __ffssi2(a: i32) callconv(.C) i32 {
+ return ffsXi2(i32, a);
+}
-pub const __ffsdi2 = ffsXi2_generic(i64);
+pub fn __ffsdi2(a: i64) callconv(.C) i32 {
+ return ffsXi2(i64, a);
+}
-pub const __ffsti2 = ffsXi2_generic(i128);
+pub fn __ffsti2(a: i128) callconv(.C) i32 {
+ return ffsXi2(i128, a);
+}
test {
_ = @import("clzsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/divdf3.zig b/lib/std/special/compiler_rt/divdf3.zig
index ebb139f324..2148902de2 100644
--- a/lib/std/special/compiler_rt/divdf3.zig
+++ b/lib/std/special/compiler_rt/divdf3.zig
@@ -35,7 +35,7 @@ pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;
diff --git a/lib/std/special/compiler_rt/divsf3.zig b/lib/std/special/compiler_rt/divsf3.zig
index af4cfaaa86..5e7dc7bb44 100644
--- a/lib/std/special/compiler_rt/divsf3.zig
+++ b/lib/std/special/compiler_rt/divsf3.zig
@@ -34,7 +34,7 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;
diff --git a/lib/std/special/compiler_rt/divtf3.zig b/lib/std/special/compiler_rt/divtf3.zig
index 4dce86087d..fc26c60266 100644
--- a/lib/std/special/compiler_rt/divtf3.zig
+++ b/lib/std/special/compiler_rt/divtf3.zig
@@ -33,7 +33,7 @@ pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;
diff --git a/lib/std/special/compiler_rt/fixuint.zig b/lib/std/special/compiler_rt/fixuint.zig
index c51b80fbdb..6bfbcf6d65 100644
--- a/lib/std/special/compiler_rt/fixuint.zig
+++ b/lib/std/special/compiler_rt/fixuint.zig
@@ -1,7 +1,7 @@
const is_test = @import("builtin").is_test;
const Log2Int = @import("std").math.Log2Int;
-pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t {
+pub inline fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t {
@setRuntimeSafety(is_test);
const rep_t = switch (fp_t) {
diff --git a/lib/std/special/compiler_rt/floatXisf.zig b/lib/std/special/compiler_rt/floatXisf.zig
index 9fc4c71100..de3f4495cb 100644
--- a/lib/std/special/compiler_rt/floatXisf.zig
+++ b/lib/std/special/compiler_rt/floatXisf.zig
@@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const FLT_MANT_DIG = 24;
-fn __floatXisf(comptime T: type, arg: T) f32 {
+inline fn floatXisf(comptime T: type, arg: T) f32 {
@setRuntimeSafety(builtin.is_test);
const bits = @typeInfo(T).Int.bits;
@@ -71,18 +71,15 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
}
pub fn __floatdisf(arg: i64) callconv(.C) f32 {
- @setRuntimeSafety(builtin.is_test);
- return @call(.{ .modifier = .always_inline }, __floatXisf, .{ i64, arg });
+ return floatXisf(i64, arg);
}
pub fn __floattisf(arg: i128) callconv(.C) f32 {
- @setRuntimeSafety(builtin.is_test);
- return @call(.{ .modifier = .always_inline }, __floatXisf, .{ i128, arg });
+ return floatXisf(i128, arg);
}
pub fn __aeabi_l2f(arg: i64) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatdisf, .{arg});
+ return floatXisf(i64, arg);
}
test {
diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig
index 23d5cb1e3c..ef551d1911 100644
--- a/lib/std/special/compiler_rt/floatsiXf.zig
+++ b/lib/std/special/compiler_rt/floatsiXf.zig
@@ -2,7 +2,7 @@ const builtin = @import("builtin");
const std = @import("std");
const maxInt = std.math.maxInt;
-fn floatsiXf(comptime T: type, a: i32) T {
+inline fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
const bits = @typeInfo(T).Float.bits;
@@ -56,27 +56,27 @@ fn floatsiXf(comptime T: type, a: i32) T {
pub fn __floatsisf(arg: i32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
- return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
+ return floatsiXf(f32, arg);
}
pub fn __floatsidf(arg: i32) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
- return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
+ return floatsiXf(f64, arg);
}
pub fn __floatsitf(arg: i32) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
- return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
+ return floatsiXf(f128, arg);
}
pub fn __aeabi_i2d(arg: i32) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatsidf, .{arg});
+ return floatsiXf(f64, arg);
}
pub fn __aeabi_i2f(arg: i32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatsisf, .{arg});
+ return floatsiXf(f32, arg);
}
fn test_one_floatsitf(a: i32, expected: u128) !void {
diff --git a/lib/std/special/compiler_rt/floatundisf.zig b/lib/std/special/compiler_rt/floatundisf.zig
index aca30ee309..ffbe3ef252 100644
--- a/lib/std/special/compiler_rt/floatundisf.zig
+++ b/lib/std/special/compiler_rt/floatundisf.zig
@@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const FLT_MANT_DIG = 24;
-pub fn __floatundisf(arg: u64) callconv(.C) f32 {
+inline fn floatundisf(arg: u64) f32 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0;
@@ -56,9 +56,12 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
return @bitCast(f32, result);
}
+pub fn __floatundisf(arg: u64) callconv(.C) f32 {
+ return floatundisf(arg);
+}
+
pub fn __aeabi_ul2f(arg: u64) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatundisf, .{arg});
+ return floatundisf(arg);
}
fn test__floatundisf(a: u64, expected: f32) !void {
diff --git a/lib/std/special/compiler_rt/floatunsidf.zig b/lib/std/special/compiler_rt/floatunsidf.zig
index 555d4f5657..f474c1de8f 100644
--- a/lib/std/special/compiler_rt/floatunsidf.zig
+++ b/lib/std/special/compiler_rt/floatunsidf.zig
@@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const implicitBit = @as(u64, 1) << 52;
-pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
+inline fn floatunsidf(arg: u32) f64 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0.0;
@@ -18,9 +18,12 @@ pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
return @bitCast(f64, mant | (exp + 1023) << 52);
}
+pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
+ return floatunsidf(arg);
+}
+
pub fn __aeabi_ui2d(arg: u32) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatunsidf, .{arg});
+ return floatunsidf(arg);
}
fn test_one_floatunsidf(a: u32, expected: u64) !void {
diff --git a/lib/std/special/compiler_rt/floatunsisf.zig b/lib/std/special/compiler_rt/floatunsisf.zig
index c8a654ff7a..d267baee01 100644
--- a/lib/std/special/compiler_rt/floatunsisf.zig
+++ b/lib/std/special/compiler_rt/floatunsisf.zig
@@ -6,7 +6,7 @@ const significandBits = 23;
const exponentBias = 127;
const implicitBit = @as(u32, 1) << significandBits;
-pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
+inline fn floatunsisf(arg: u32) f32 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0.0;
@@ -38,9 +38,12 @@ pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
return @bitCast(f32, result);
}
+pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
+ return floatunsisf(arg);
+}
+
pub fn __aeabi_ui2f(arg: u32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __floatunsisf, .{arg});
+ return floatunsisf(arg);
}
fn test_one_floatunsisf(a: u32, expected: u32) !void {
diff --git a/lib/std/special/compiler_rt/mulXf3.zig b/lib/std/special/compiler_rt/mulXf3.zig
index 1e9171c8cb..48c31f47b1 100644
--- a/lib/std/special/compiler_rt/mulXf3.zig
+++ b/lib/std/special/compiler_rt/mulXf3.zig
@@ -56,7 +56,7 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
+ if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;
diff --git a/lib/std/special/compiler_rt/negXi2.zig b/lib/std/special/compiler_rt/negXi2.zig
index de89c8deb7..15102b5df7 100644
--- a/lib/std/special/compiler_rt/negXi2.zig
+++ b/lib/std/special/compiler_rt/negXi2.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
// neg - negate (the number)
-// - negXi2_generic for unoptimized little and big endian
+// - negXi2 for unoptimized little and big endian
// sfffffff = 2^31-1
// two's complement inverting bits and add 1 would result in -INT_MIN == 0
@@ -11,20 +11,22 @@ const builtin = @import("builtin");
// * size optimized builds
// * machines that dont support carry operations
-fn negXi2_generic(comptime T: type) fn (a: T) callconv(.C) T {
- return struct {
- fn f(a: T) callconv(.C) T {
- @setRuntimeSafety(builtin.is_test);
- return -a;
- }
- }.f;
+inline fn negXi2(comptime T: type, a: T) T {
+ @setRuntimeSafety(builtin.is_test);
+ return -a;
}
-pub const __negsi2 = negXi2_generic(i32);
+pub fn __negsi2(a: i32) callconv(.C) i32 {
+ return negXi2(i32, a);
+}
-pub const __negdi2 = negXi2_generic(i64);
+pub fn __negdi2(a: i64) callconv(.C) i64 {
+ return negXi2(i64, a);
+}
-pub const __negti2 = negXi2_generic(i128);
+pub fn __negti2(a: i128) callconv(.C) i128 {
+ return negXi2(i128, a);
+}
test {
_ = @import("negsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/negv.zig b/lib/std/special/compiler_rt/negv.zig
index 99525a6e5b..09abb040d5 100644
--- a/lib/std/special/compiler_rt/negv.zig
+++ b/lib/std/special/compiler_rt/negv.zig
@@ -3,26 +3,31 @@
// - negvXi4_generic for unoptimized version
// assume -0 == 0 is gracefully handled by the hardware
-fn negvXi_generic(comptime ST: type) fn (a: ST) callconv(.C) ST {
- return struct {
- fn f(a: ST) callconv(.C) ST {
- const UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
- else => unreachable,
- };
- const N: UT = @bitSizeOf(ST);
- const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
- if (a == min)
- @panic("compiler_rt negv: overflow");
- return -a;
- }
- }.f;
+inline fn negvXi(comptime ST: type, a: ST) ST {
+ const UT = switch (ST) {
+ i32 => u32,
+ i64 => u64,
+ i128 => u128,
+ else => unreachable,
+ };
+ const N: UT = @bitSizeOf(ST);
+ const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
+ if (a == min)
+ @panic("compiler_rt negv: overflow");
+ return -a;
+}
+
+pub fn __negvsi2(a: i32) callconv(.C) i32 {
+ return negvXi(i32, a);
+}
+
+pub fn __negvdi2(a: i64) callconv(.C) i64 {
+ return negvXi(i64, a);
+}
+
+pub fn __negvti2(a: i128) callconv(.C) i128 {
+ return negvXi(i128, a);
}
-pub const __negvsi2 = negvXi_generic(i32);
-pub const __negvdi2 = negvXi_generic(i64);
-pub const __negvti2 = negvXi_generic(i128);
test {
_ = @import("negvsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/parity.zig b/lib/std/special/compiler_rt/parity.zig
index 1c47aa3c73..ae634b0790 100644
--- a/lib/std/special/compiler_rt/parity.zig
+++ b/lib/std/special/compiler_rt/parity.zig
@@ -4,34 +4,36 @@ const builtin = @import("builtin");
// parity - if number of bits set is even => 0, else => 1
// - pariytXi2_generic for big and little endian
-fn parityXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
- return struct {
- fn f(a: T) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
+inline fn parityXi2(comptime T: type, a: T) i32 {
+ @setRuntimeSafety(builtin.is_test);
- var x = switch (@bitSizeOf(T)) {
- 32 => @bitCast(u32, a),
- 64 => @bitCast(u64, a),
- 128 => @bitCast(u128, a),
- else => unreachable,
- };
- // Bit Twiddling Hacks: Compute parity in parallel
- comptime var shift: u8 = @bitSizeOf(T) / 2;
- inline while (shift > 2) {
- x ^= x >> shift;
- shift = shift >> 1;
- }
- x &= 0xf;
- return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
- }
- }.f;
+ var x = switch (@bitSizeOf(T)) {
+ 32 => @bitCast(u32, a),
+ 64 => @bitCast(u64, a),
+ 128 => @bitCast(u128, a),
+ else => unreachable,
+ };
+ // Bit Twiddling Hacks: Compute parity in parallel
+ comptime var shift: u8 = @bitSizeOf(T) / 2;
+ inline while (shift > 2) {
+ x ^= x >> shift;
+ shift = shift >> 1;
+ }
+ x &= 0xf;
+ return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
}
-pub const __paritysi2 = parityXi2_generic(i32);
+pub fn __paritysi2(a: i32) callconv(.C) i32 {
+ return parityXi2(i32, a);
+}
-pub const __paritydi2 = parityXi2_generic(i64);
+pub fn __paritydi2(a: i64) callconv(.C) i32 {
+ return parityXi2(i64, a);
+}
-pub const __parityti2 = parityXi2_generic(i128);
+pub fn __parityti2(a: i128) callconv(.C) i32 {
+ return parityXi2(i128, a);
+}
test {
_ = @import("paritysi2_test.zig");
diff --git a/lib/std/special/compiler_rt/popcount.zig b/lib/std/special/compiler_rt/popcount.zig
index 72513895db..362b232fb8 100644
--- a/lib/std/special/compiler_rt/popcount.zig
+++ b/lib/std/special/compiler_rt/popcount.zig
@@ -10,35 +10,37 @@ const std = @import("std");
// TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
// subsubsection "Working with the rightmost bits" and "Sideways addition".
-fn popcountXi2_generic(comptime ST: type) fn (a: ST) callconv(.C) i32 {
- return struct {
- fn f(a: ST) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const UT = switch (ST) {
- i32 => u32,
- i64 => u64,
- i128 => u128,
- else => unreachable,
- };
- var x = @bitCast(UT, a);
- x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos
- x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles
- + (x & (~@as(UT, 0) / 5));
- x += x >> 4;
- x &= ~@as(UT, 0) / 17; // 0x0F...0F, aggregate bytes
- // 8 most significant bits of x + (x<<8) + (x<<16) + ..
- x *%= ~@as(UT, 0) / 255; // 0x01...01
- x >>= (@bitSizeOf(ST) - 8);
- return @intCast(i32, x);
- }
- }.f;
+inline fn popcountXi2(comptime ST: type, a: ST) i32 {
+ @setRuntimeSafety(builtin.is_test);
+ const UT = switch (ST) {
+ i32 => u32,
+ i64 => u64,
+ i128 => u128,
+ else => unreachable,
+ };
+ var x = @bitCast(UT, a);
+ x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos
+ x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles
+ + (x & (~@as(UT, 0) / 5));
+ x += x >> 4;
+ x &= ~@as(UT, 0) / 17; // 0x0F...0F, aggregate bytes
+ // 8 most significant bits of x + (x<<8) + (x<<16) + ..
+ x *%= ~@as(UT, 0) / 255; // 0x01...01
+ x >>= (@bitSizeOf(ST) - 8);
+ return @intCast(i32, x);
}
-pub const __popcountsi2 = popcountXi2_generic(i32);
+pub fn __popcountsi2(a: i32) callconv(.C) i32 {
+ return popcountXi2(i32, a);
+}
-pub const __popcountdi2 = popcountXi2_generic(i64);
+pub fn __popcountdi2(a: i64) callconv(.C) i32 {
+ return popcountXi2(i64, a);
+}
-pub const __popcountti2 = popcountXi2_generic(i128);
+pub fn __popcountti2(a: i128) callconv(.C) i32 {
+ return popcountXi2(i128, a);
+}
test {
_ = @import("popcountsi2_test.zig");
diff --git a/lib/std/special/compiler_rt/shift.zig b/lib/std/special/compiler_rt/shift.zig
index b20516e46c..edcf246daf 100644
--- a/lib/std/special/compiler_rt/shift.zig
+++ b/lib/std/special/compiler_rt/shift.zig
@@ -19,7 +19,7 @@ fn Dwords(comptime T: type, comptime signed_half: bool) type {
// Arithmetic shift left
// Precondition: 0 <= b < bits_in_dword
-pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
+pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -42,7 +42,7 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
// Arithmetic shift right
// Precondition: 0 <= b < T.bit_count
-pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
+pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, true);
const S = Log2Int(dwords.HalfT);
@@ -69,7 +69,7 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
// Logical shift right
// Precondition: 0 <= b < T.bit_count
-pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
+pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@@ -91,32 +91,32 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
}
pub fn __ashldi3(a: i64, b: i32) callconv(.C) i64 {
- return @call(.{ .modifier = .always_inline }, ashlXi3, .{ i64, a, b });
+ return ashlXi3(i64, a, b);
}
pub fn __ashlti3(a: i128, b: i32) callconv(.C) i128 {
- return @call(.{ .modifier = .always_inline }, ashlXi3, .{ i128, a, b });
+ return ashlXi3(i128, a, b);
}
pub fn __ashrdi3(a: i64, b: i32) callconv(.C) i64 {
- return @call(.{ .modifier = .always_inline }, ashrXi3, .{ i64, a, b });
+ return ashrXi3(i64, a, b);
}
pub fn __ashrti3(a: i128, b: i32) callconv(.C) i128 {
- return @call(.{ .modifier = .always_inline }, ashrXi3, .{ i128, a, b });
+ return ashrXi3(i128, a, b);
}
pub fn __lshrdi3(a: i64, b: i32) callconv(.C) i64 {
- return @call(.{ .modifier = .always_inline }, lshrXi3, .{ i64, a, b });
+ return lshrXi3(i64, a, b);
}
pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
- return @call(.{ .modifier = .always_inline }, lshrXi3, .{ i128, a, b });
+ return lshrXi3(i128, a, b);
}
pub fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
- return __ashldi3(a, b);
+ return ashlXi3(i64, a, b);
}
pub fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return __ashrdi3(a, b);
+ return ashrXi3(i64, a, b);
}
pub fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
- return __lshrdi3(a, b);
+ return lshrXi3(i64, a, b);
}
test {