aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build.zig19
-rw-r--r--deps/SoftFloat-3e-prebuilt/platform.h16
-rw-r--r--lib/compiler_rt.zig24
-rw-r--r--lib/compiler_rt/addhf3.zig12
-rw-r--r--lib/compiler_rt/cmphf2.zig50
-rw-r--r--lib/compiler_rt/divhf3.zig11
-rw-r--r--lib/compiler_rt/extendhfdf2.zig12
-rw-r--r--lib/compiler_rt/extendhfsf2.zig9
-rw-r--r--lib/compiler_rt/gehf2.zig31
-rw-r--r--lib/compiler_rt/mulf3.zig5
-rw-r--r--lib/compiler_rt/mulhf3.zig12
-rw-r--r--lib/compiler_rt/neghf2.zig11
-rw-r--r--lib/compiler_rt/subhf3.zig12
-rw-r--r--lib/compiler_rt/tan.zig6
-rw-r--r--lib/compiler_rt/truncsfhf2.zig9
-rw-r--r--lib/compiler_rt/unordhf2.zig12
-rw-r--r--lib/compiler_rt/unordxf2.zig12
-rw-r--r--lib/std/Progress.zig282
-rw-r--r--lib/std/atomic/Atomic.zig4
-rw-r--r--lib/std/c/darwin.zig4
-rw-r--r--lib/std/event/loop.zig25
-rw-r--r--lib/std/fs.zig10
-rw-r--r--lib/std/fs/test.zig16
-rw-r--r--lib/std/math.zig13
-rw-r--r--lib/std/math/big/int.zig29
-rw-r--r--lib/std/math/big/int_test.zig13
-rw-r--r--lib/std/os.zig7
-rw-r--r--lib/std/os/linux.zig2
-rw-r--r--lib/std/os/linux/io_uring.zig6
-rw-r--r--lib/std/os/posix_spawn.zig16
-rw-r--r--lib/std/os/windows.zig5
-rw-r--r--lib/std/segmented_list.zig35
-rw-r--r--lib/std/target.zig4
-rw-r--r--src/Sema.zig66
-rw-r--r--src/clang_options_data.zig23
-rw-r--r--src/codegen/llvm.zig414
-rw-r--r--src/main.zig4
-rw-r--r--src/stage1/analyze.cpp6
-rw-r--r--src/stage1/codegen.cpp178
-rw-r--r--src/stage1/config.h.in2
-rw-r--r--src/stage1/softfloat.hpp14
-rw-r--r--src/stage1/target.cpp6
-rw-r--r--src/type.zig54
-rw-r--r--src/value.zig17
-rw-r--r--test/behavior.zig5
-rw-r--r--test/behavior/align.zig2
-rw-r--r--test/behavior/bugs/11816.zig1
-rw-r--r--test/behavior/bugs/12723.zig11
-rw-r--r--test/behavior/bugs/12801-1.zig1
-rw-r--r--test/behavior/bugs/12801-2.zig1
-rw-r--r--test/behavior/bugs/12891.zig20
-rw-r--r--test/behavior/bugs/12972.zig17
-rw-r--r--test/behavior/enum.zig7
-rw-r--r--test/behavior/eval.zig2
-rw-r--r--test/behavior/muladd.zig11
-rw-r--r--test/behavior/packed-struct.zig1
-rw-r--r--test/behavior/vector.zig10
-rw-r--r--test/standalone.zig4
-rw-r--r--test/standalone/emit_asm_and_bin/build.zig11
-rw-r--r--test/standalone/emit_asm_and_bin/main.zig1
-rw-r--r--test/tests.zig24
-rw-r--r--tools/update_clang_options.zig10
62 files changed, 1229 insertions, 428 deletions
diff --git a/build.zig b/build.zig
index 3c106bd314..aec623f642 100644
--- a/build.zig
+++ b/build.zig
@@ -339,7 +339,10 @@ pub fn build(b: *Builder) !void {
// That means we also have to rely on stage1 compiled c++ files. We parse config.h to find
// the information passed on to us from cmake.
if (cfg.cmake_prefix_path.len > 0) {
- b.addSearchPrefix(cfg.cmake_prefix_path);
+ var it = mem.tokenize(u8, cfg.cmake_prefix_path, ";");
+ while (it.next()) |path| {
+ b.addSearchPrefix(path);
+ }
}
try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx);
@@ -554,6 +557,8 @@ fn addCmakeCfgOptionsToExe(
}) catch unreachable);
assert(cfg.lld_include_dir.len != 0);
exe.addIncludePath(cfg.lld_include_dir);
+ exe.addIncludePath(cfg.llvm_include_dir);
+ exe.addLibraryPath(cfg.llvm_lib_dir);
addCMakeLibraryList(exe, cfg.clang_libraries);
addCMakeLibraryList(exe, cfg.lld_libraries);
addCMakeLibraryList(exe, cfg.llvm_libraries);
@@ -684,6 +689,8 @@ const CMakeConfig = struct {
lld_include_dir: []const u8,
lld_libraries: []const u8,
clang_libraries: []const u8,
+ llvm_lib_dir: []const u8,
+ llvm_include_dir: []const u8,
llvm_libraries: []const u8,
dia_guids_lib: []const u8,
};
@@ -745,6 +752,8 @@ fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
.lld_include_dir = undefined,
.lld_libraries = undefined,
.clang_libraries = undefined,
+ .llvm_lib_dir = undefined,
+ .llvm_include_dir = undefined,
.llvm_libraries = undefined,
.dia_guids_lib = undefined,
};
@@ -782,6 +791,14 @@ fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
.prefix = "#define ZIG_DIA_GUIDS_LIB ",
.field = "dia_guids_lib",
},
+ .{
+ .prefix = "#define ZIG_LLVM_INCLUDE_PATH ",
+ .field = "llvm_include_dir",
+ },
+ .{
+ .prefix = "#define ZIG_LLVM_LIB_PATH ",
+ .field = "llvm_lib_dir",
+ },
// .prefix = ZIG_LLVM_LINK_MODE parsed manually below
};
diff --git a/deps/SoftFloat-3e-prebuilt/platform.h b/deps/SoftFloat-3e-prebuilt/platform.h
index 588c548c60..2c4a0ec88e 100644
--- a/deps/SoftFloat-3e-prebuilt/platform.h
+++ b/deps/SoftFloat-3e-prebuilt/platform.h
@@ -3,6 +3,10 @@
#if defined(__BIG_ENDIAN__)
#define BIGENDIAN 1
+#elif defined(_BIG_ENDIAN) && (_BIG_ENDIAN == 1)
+#define BIGENDIAN 1
+#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#define BIGENDIAN 1
#elif defined(__ARMEB__)
#define BIGENDIAN 1
#elif defined(__THUMBEB__)
@@ -15,18 +19,12 @@
#define BIGENDIAN 1
#elif defined(__MIPSEB__)
#define BIGENDIAN 1
-#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define BIGENDIAN 1
#elif defined(__sparc)
#define BIGENDIAN 1
#elif defined(__sparc__)
#define BIGENDIAN 1
#elif defined(_POWER)
#define BIGENDIAN 1
-#elif defined(__powerpc__)
-#define BIGENDIAN 1
-#elif defined(__ppc__)
-#define BIGENDIAN 1
#elif defined(__hpux)
#define BIGENDIAN 1
#elif defined(__hppa)
@@ -39,6 +37,10 @@
#if defined(__LITTLE_ENDIAN__)
#define LITTLEENDIAN 1
+#elif defined(_LITTLE_ENDIAN) && (_LITTLE_ENDIAN == 1)
+#define LITTLEENDIAN 1
+#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define LITTLEENDIAN 1
#elif defined(__ARMEL__)
#define LITTLEENDIAN 1
#elif defined(__THUMBEL__)
@@ -51,8 +53,6 @@
#define LITTLEENDIAN 1
#elif defined(__MIPSEL__)
#define LITTLEENDIAN 1
-#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define LITTLEENDIAN 1
#elif defined(__i386__)
#define LITTLEENDIAN 1
#elif defined(__alpha__)
diff --git a/lib/compiler_rt.zig b/lib/compiler_rt.zig
index d261c49ff1..1597b7e135 100644
--- a/lib/compiler_rt.zig
+++ b/lib/compiler_rt.zig
@@ -4,17 +4,20 @@ comptime {
_ = @import("compiler_rt/atomics.zig");
_ = @import("compiler_rt/addf3.zig");
+ _ = @import("compiler_rt/addhf3.zig");
_ = @import("compiler_rt/addsf3.zig");
_ = @import("compiler_rt/adddf3.zig");
_ = @import("compiler_rt/addtf3.zig");
_ = @import("compiler_rt/addxf3.zig");
+ _ = @import("compiler_rt/subhf3.zig");
_ = @import("compiler_rt/subsf3.zig");
_ = @import("compiler_rt/subdf3.zig");
_ = @import("compiler_rt/subtf3.zig");
_ = @import("compiler_rt/subxf3.zig");
_ = @import("compiler_rt/mulf3.zig");
+ _ = @import("compiler_rt/mulhf3.zig");
_ = @import("compiler_rt/mulsf3.zig");
_ = @import("compiler_rt/muldf3.zig");
_ = @import("compiler_rt/multf3.zig");
@@ -34,51 +37,58 @@ comptime {
_ = @import("compiler_rt/divxc3.zig");
_ = @import("compiler_rt/divtc3.zig");
+ _ = @import("compiler_rt/neghf2.zig");
_ = @import("compiler_rt/negsf2.zig");
_ = @import("compiler_rt/negdf2.zig");
_ = @import("compiler_rt/negtf2.zig");
_ = @import("compiler_rt/negxf2.zig");
_ = @import("compiler_rt/comparef.zig");
+ _ = @import("compiler_rt/cmphf2.zig");
_ = @import("compiler_rt/cmpsf2.zig");
_ = @import("compiler_rt/cmpdf2.zig");
_ = @import("compiler_rt/cmptf2.zig");
_ = @import("compiler_rt/cmpxf2.zig");
+ _ = @import("compiler_rt/gehf2.zig");
_ = @import("compiler_rt/gesf2.zig");
_ = @import("compiler_rt/gedf2.zig");
- _ = @import("compiler_rt/getf2.zig");
_ = @import("compiler_rt/gexf2.zig");
+ _ = @import("compiler_rt/getf2.zig");
+ _ = @import("compiler_rt/unordhf2.zig");
_ = @import("compiler_rt/unordsf2.zig");
_ = @import("compiler_rt/unorddf2.zig");
+ _ = @import("compiler_rt/unordxf2.zig");
_ = @import("compiler_rt/unordtf2.zig");
_ = @import("compiler_rt/extendf.zig");
- _ = @import("compiler_rt/extenddftf2.zig");
- _ = @import("compiler_rt/extenddfxf2.zig");
_ = @import("compiler_rt/extendhfsf2.zig");
+ _ = @import("compiler_rt/extendhfdf2.zig");
_ = @import("compiler_rt/extendhftf2.zig");
_ = @import("compiler_rt/extendhfxf2.zig");
_ = @import("compiler_rt/extendsfdf2.zig");
_ = @import("compiler_rt/extendsftf2.zig");
_ = @import("compiler_rt/extendsfxf2.zig");
+ _ = @import("compiler_rt/extenddftf2.zig");
+ _ = @import("compiler_rt/extenddfxf2.zig");
_ = @import("compiler_rt/extendxftf2.zig");
_ = @import("compiler_rt/truncf.zig");
_ = @import("compiler_rt/truncsfhf2.zig");
_ = @import("compiler_rt/truncdfhf2.zig");
_ = @import("compiler_rt/truncdfsf2.zig");
+ _ = @import("compiler_rt/truncxfhf2.zig");
+ _ = @import("compiler_rt/truncxfsf2.zig");
+ _ = @import("compiler_rt/truncxfdf2.zig");
_ = @import("compiler_rt/trunctfhf2.zig");
_ = @import("compiler_rt/trunctfsf2.zig");
_ = @import("compiler_rt/trunctfdf2.zig");
_ = @import("compiler_rt/trunctfxf2.zig");
- _ = @import("compiler_rt/truncxfhf2.zig");
- _ = @import("compiler_rt/truncxfsf2.zig");
- _ = @import("compiler_rt/truncxfdf2.zig");
- _ = @import("compiler_rt/divtf3.zig");
+ _ = @import("compiler_rt/divhf3.zig");
_ = @import("compiler_rt/divsf3.zig");
_ = @import("compiler_rt/divdf3.zig");
_ = @import("compiler_rt/divxf3.zig");
+ _ = @import("compiler_rt/divtf3.zig");
_ = @import("compiler_rt/sin.zig");
_ = @import("compiler_rt/cos.zig");
_ = @import("compiler_rt/sincos.zig");
diff --git a/lib/compiler_rt/addhf3.zig b/lib/compiler_rt/addhf3.zig
new file mode 100644
index 0000000000..12086aef38
--- /dev/null
+++ b/lib/compiler_rt/addhf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__addhf3, .{ .name = "__addhf3", .linkage = common.linkage });
+}
+
+fn __addhf3(a: f16, b: f16) callconv(.C) f16 {
+ return addf3(f16, a, b);
+}
diff --git a/lib/compiler_rt/cmphf2.zig b/lib/compiler_rt/cmphf2.zig
new file mode 100644
index 0000000000..d5ee3f1daa
--- /dev/null
+++ b/lib/compiler_rt/cmphf2.zig
@@ -0,0 +1,50 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__eqhf2, .{ .name = "__eqhf2", .linkage = common.linkage });
+ @export(__nehf2, .{ .name = "__nehf2", .linkage = common.linkage });
+ @export(__lehf2, .{ .name = "__lehf2", .linkage = common.linkage });
+ @export(__cmphf2, .{ .name = "__cmphf2", .linkage = common.linkage });
+ @export(__lthf2, .{ .name = "__lthf2", .linkage = common.linkage });
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lehf2`, `__eqhf2`, `__nehf2`, `__cmphf2`,
+/// and `__lthf2`.
+fn __cmphf2(a: f16, b: f16) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f16, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+pub fn __lehf2(a: f16, b: f16) callconv(.C) i32 {
+ return __cmphf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
+/// to have the same return value.
+pub fn __eqhf2(a: f16, b: f16) callconv(.C) i32 {
+ return __cmphf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
+/// to have the same return value.
+pub fn __nehf2(a: f16, b: f16) callconv(.C) i32 {
+ return __cmphf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+pub fn __lthf2(a: f16, b: f16) callconv(.C) i32 {
+ return __cmphf2(a, b);
+}
diff --git a/lib/compiler_rt/divhf3.zig b/lib/compiler_rt/divhf3.zig
new file mode 100644
index 0000000000..ad73a5ddb2
--- /dev/null
+++ b/lib/compiler_rt/divhf3.zig
@@ -0,0 +1,11 @@
+const common = @import("common.zig");
+const divsf3 = @import("./divsf3.zig");
+
+comptime {
+ @export(__divhf3, .{ .name = "__divhf3", .linkage = common.linkage });
+}
+
+pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 {
+ // TODO: more efficient implementation
+ return @floatCast(f16, divsf3.__divsf3(a, b));
+}
diff --git a/lib/compiler_rt/extendhfdf2.zig b/lib/compiler_rt/extendhfdf2.zig
new file mode 100644
index 0000000000..f7a94f58ef
--- /dev/null
+++ b/lib/compiler_rt/extendhfdf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhfdf2, .{ .name = "__extendhfdf2", .linkage = common.linkage });
+}
+
+pub fn __extendhfdf2(a: common.F16T) callconv(.C) f64 {
+ return extendf(f64, f16, @bitCast(u16, a));
+}
diff --git a/lib/compiler_rt/extendhfsf2.zig b/lib/compiler_rt/extendhfsf2.zig
index a6bf5f5be5..0c204ec188 100644
--- a/lib/compiler_rt/extendhfsf2.zig
+++ b/lib/compiler_rt/extendhfsf2.zig
@@ -5,22 +5,17 @@ pub const panic = common.panic;
comptime {
if (common.gnu_f16_abi) {
- @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
+ @export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
} else if (common.want_aeabi) {
@export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
- } else {
- @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
}
+ @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
}
pub fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
return extendf(f32, f16, @bitCast(u16, a));
}
-fn __gnu_h2f_ieee(a: common.F16T) callconv(.C) f32 {
- return extendf(f32, f16, @bitCast(u16, a));
-}
-
fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
return extendf(f32, f16, @bitCast(u16, a));
}
diff --git a/lib/compiler_rt/gehf2.zig b/lib/compiler_rt/gehf2.zig
new file mode 100644
index 0000000000..651cbf943f
--- /dev/null
+++ b/lib/compiler_rt/gehf2.zig
@@ -0,0 +1,31 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__gehf2, .{ .name = "__gehf2", .linkage = common.linkage });
+ @export(__gthf2, .{ .name = "__gthf2", .linkage = common.linkage });
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+pub fn __gehf2(a: f16, b: f16) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f16, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+pub fn __gthf2(a: f16, b: f16) callconv(.C) i32 {
+ return __gehf2(a, b);
+}
+
+fn __aeabi_fcmpge(a: f16, b: f16) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f16, comparef.GE, a, b) != .Less);
+}
+
+fn __aeabi_fcmpgt(a: f16, b: f16) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f16, comparef.LE, a, b) == .Greater);
+}
diff --git a/lib/compiler_rt/mulf3.zig b/lib/compiler_rt/mulf3.zig
index 770721cb80..b02bd81671 100644
--- a/lib/compiler_rt/mulf3.zig
+++ b/lib/compiler_rt/mulf3.zig
@@ -32,8 +32,9 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
const infRep = @bitCast(Z, math.inf(T));
const minNormalRep = @bitCast(Z, math.floatMin(T));
- const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
- const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
+ const ZExp = if (typeWidth >= 32) u32 else Z;
+ const aExponent = @truncate(ZExp, (@bitCast(Z, a) >> significandBits) & maxExponent);
+ const bExponent = @truncate(ZExp, (@bitCast(Z, b) >> significandBits) & maxExponent);
const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask);
diff --git a/lib/compiler_rt/mulhf3.zig b/lib/compiler_rt/mulhf3.zig
new file mode 100644
index 0000000000..45251548be
--- /dev/null
+++ b/lib/compiler_rt/mulhf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulhf3, .{ .name = "__mulhf3", .linkage = common.linkage });
+}
+
+pub fn __mulhf3(a: f16, b: f16) callconv(.C) f16 {
+ return mulf3(f16, a, b);
+}
diff --git a/lib/compiler_rt/neghf2.zig b/lib/compiler_rt/neghf2.zig
new file mode 100644
index 0000000000..fe55a751d8
--- /dev/null
+++ b/lib/compiler_rt/neghf2.zig
@@ -0,0 +1,11 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__neghf2, .{ .name = "__neghf2", .linkage = common.linkage });
+}
+
+fn __neghf2(a: f16) callconv(.C) f16 {
+ return common.fneg(a);
+}
diff --git a/lib/compiler_rt/subhf3.zig b/lib/compiler_rt/subhf3.zig
new file mode 100644
index 0000000000..b14da2d794
--- /dev/null
+++ b/lib/compiler_rt/subhf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__subhf3, .{ .name = "__subhf3", .linkage = common.linkage });
+}
+
+fn __subhf3(a: f16, b: f16) callconv(.C) f16 {
+ const neg_b = @bitCast(f16, @bitCast(u16, b) ^ (@as(u16, 1) << 15));
+ return a + neg_b;
+}
diff --git a/lib/compiler_rt/tan.zig b/lib/compiler_rt/tan.zig
index 9c44e4c682..8b8f8287a3 100644
--- a/lib/compiler_rt/tan.zig
+++ b/lib/compiler_rt/tan.zig
@@ -24,8 +24,10 @@ comptime {
@export(tanf, .{ .name = "tanf", .linkage = common.linkage });
@export(tan, .{ .name = "tan", .linkage = common.linkage });
@export(__tanx, .{ .name = "__tanx", .linkage = common.linkage });
- const tanq_sym_name = if (common.want_ppc_abi) "tanf128" else "tanq";
- @export(tanq, .{ .name = tanq_sym_name, .linkage = common.linkage });
+ if (common.want_ppc_abi) {
+ @export(tanq, .{ .name = "tanf128", .linkage = common.linkage });
+ }
+ @export(tanq, .{ .name = "tanq", .linkage = common.linkage });
@export(tanl, .{ .name = "tanl", .linkage = common.linkage });
}
diff --git a/lib/compiler_rt/truncsfhf2.zig b/lib/compiler_rt/truncsfhf2.zig
index 489fb8658d..010e257923 100644
--- a/lib/compiler_rt/truncsfhf2.zig
+++ b/lib/compiler_rt/truncsfhf2.zig
@@ -5,22 +5,17 @@ pub const panic = common.panic;
comptime {
if (common.gnu_f16_abi) {
- @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage });
+ @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage });
} else if (common.want_aeabi) {
@export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = common.linkage });
- } else {
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage });
}
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage });
}
pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T {
return @bitCast(common.F16T, truncf(f16, f32, a));
}
-fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T {
- return @bitCast(common.F16T, truncf(f16, f32, a));
-}
-
fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
return @bitCast(common.F16T, truncf(f16, f32, a));
}
diff --git a/lib/compiler_rt/unordhf2.zig b/lib/compiler_rt/unordhf2.zig
new file mode 100644
index 0000000000..0c2aea629a
--- /dev/null
+++ b/lib/compiler_rt/unordhf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__unordhf2, .{ .name = "__unordhf2", .linkage = common.linkage });
+}
+
+pub fn __unordhf2(a: f16, b: f16) callconv(.C) i32 {
+ return comparef.unordcmp(f16, a, b);
+}
diff --git a/lib/compiler_rt/unordxf2.zig b/lib/compiler_rt/unordxf2.zig
new file mode 100644
index 0000000000..e456096370
--- /dev/null
+++ b/lib/compiler_rt/unordxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__unordxf2, .{ .name = "__unordxf2", .linkage = common.linkage });
+}
+
+pub fn __unordxf2(a: f80, b: f80) callconv(.C) i32 {
+ return comparef.unordcmp(f80, a, b);
+}
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 61c1ee2a32..6097fde41b 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -1,23 +1,30 @@
-//! This API non-allocating, non-fallible, and thread-safe.
+//! This is a non-allocating, non-fallible, and thread-safe API for printing
+//! progress indicators to the terminal.
//! The tradeoff is that users of this API must provide the storage
//! for each `Progress.Node`.
//!
+//! This library purposefully keeps its output simple and is ASCII-compatible.
+//!
//! Initialize the struct directly, overriding these fields as desired:
//! * `refresh_rate_ms`
//! * `initial_delay_ms`
+//! * `dont_print_on_dumb`
+//! * `max_width`
const std = @import("std");
const builtin = @import("builtin");
const windows = std.os.windows;
const testing = std.testing;
const assert = std.debug.assert;
+const os = std.os;
+const time = std.time;
const Progress = @This();
/// `null` if the current node (and its children) should
/// not print on update()
terminal: ?std.fs.File = undefined,
-/// Is this a windows API terminal (note: this is not the same as being run on windows
+/// Is this a Windows API terminal (note: this is not the same as being run on Windows
/// because other terminals exist like MSYS/git-bash)
is_windows_terminal: bool = false,
@@ -35,21 +42,31 @@ root: Node = undefined,
/// Keeps track of how much time has passed since the beginning.
/// Used to compare with `initial_delay_ms` and `refresh_rate_ms`.
-timer: ?std.time.Timer = null,
+timer: ?time.Timer = null,
/// When the previous refresh was written to the terminal.
/// Used to compare with `refresh_rate_ms`.
prev_refresh_timestamp: u64 = undefined,
-/// This buffer represents the maximum number of bytes written to the terminal
-/// with each refresh.
-output_buffer: [100]u8 = undefined,
+/// This is the maximum number of bytes that can be written to the terminal each refresh.
+/// Anything larger than this is truncated.
+// we can bump this up if we need to
+output_buffer: [256]u8 = undefined,
+output_buffer_slice: []u8 = undefined,
+
+/// This is the maximum number of bytes written to the terminal with each refresh.
+///
+/// It is recommended to leave this as `null` so that `start` can automatically decide an
+/// optimal width for the terminal.
+///
+/// Note that this will be clamped to at least 4 and output will appear malformed if it is < 4.
+max_width: ?usize = null,
/// How many nanoseconds between writing updates to the terminal.
-refresh_rate_ns: u64 = 50 * std.time.ns_per_ms,
+refresh_rate_ns: u64 = 50 * time.ns_per_ms,
-/// How many nanoseconds to keep the output hidden
-initial_delay_ns: u64 = 500 * std.time.ns_per_ms,
+/// How many nanoseconds to keep the output hidden.
+initial_delay_ns: u64 = 500 * time.ns_per_ms,
done: bool = true,
@@ -62,11 +79,14 @@ update_mutex: std.Thread.Mutex = .{},
/// we can move the cursor back later.
columns_written: usize = undefined,
+const truncation_suffix = "... ";
+
/// Represents one unit of progress. Each node can have children nodes, or
/// one can use integers with `update`.
pub const Node = struct {
context: *Progress,
parent: ?*Node,
+ /// The name that will be displayed for this node.
name: []const u8,
/// Must be handled atomically to be thread-safe.
recently_updated_child: ?*Node = null,
@@ -139,9 +159,13 @@ pub const Node = struct {
/// Create a new progress node.
/// Call `Node.end` when done.
-/// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this
-/// API to return Progress rather than accept it as a parameter.
/// `estimated_total_items` value of 0 means unknown.
+///
+/// Note that as soon as work is started and progress output is printed,
+/// `std.Progress` expects you to lean back and wait and not resize the terminal.
+/// Resizing the terminal during progress output may result in malformed output.
+// TODO: solve https://github.com/ziglang/zig/issues/2765 and then change this
+// API to return Progress rather than accept it as a parameter.
pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *Node {
const stderr = std.io.getStdErr();
self.terminal = null;
@@ -155,6 +179,7 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *N
// we are in a "dumb" terminal like in acme or writing to a file
self.terminal = stderr;
}
+ self.calculateMaxWidth();
self.root = Node{
.context = self,
.parent = null,
@@ -164,11 +189,83 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *N
};
self.columns_written = 0;
self.prev_refresh_timestamp = 0;
- self.timer = std.time.Timer.start() catch null;
+ self.timer = time.Timer.start() catch null;
self.done = false;
return &self.root;
}
+fn calculateMaxWidth(self: *Progress) void {
+ if (self.max_width == null) {
+ if (self.terminal) |terminal| {
+ // choose an optimal width and account for progress output that could have been printed
+ // before us by another `std.Progress` instance
+ const terminal_width = self.getTerminalWidth(terminal.handle) catch 100;
+ const chars_already_printed = self.getTerminalCursorColumn(terminal) catch 0;
+ self.max_width = terminal_width - chars_already_printed;
+ } else {
+ self.max_width = 100;
+ }
+ }
+ self.max_width = std.math.clamp(
+ self.max_width.?,
+ truncation_suffix.len, // make sure we can at least truncate
+ self.output_buffer.len - 1,
+ );
+}
+
+fn getTerminalWidth(self: Progress, file_handle: os.fd_t) !u16 {
+ if (builtin.os.tag == .linux) {
+ // TODO: figure out how to get this working on FreeBSD, macOS etc. too.
+ // they too should have capabilities to figure out the cursor column.
+ var winsize: os.linux.winsize = undefined;
+ switch (os.errno(os.linux.ioctl(file_handle, os.linux.T.IOCGWINSZ, @ptrToInt(&winsize)))) {
+ .SUCCESS => return winsize.ws_col,
+ else => return error.Unexpected,
+ }
+ } else if (builtin.os.tag == .windows) {
+ std.debug.assert(self.is_windows_terminal);
+ var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
+ if (windows.kernel32.GetConsoleScreenBufferInfo(file_handle, &info) != windows.TRUE)
+ return error.Unexpected;
+ return @intCast(u16, info.dwSize.X);
+ } else {
+ return error.Unsupported;
+ }
+}
+
+fn getTerminalCursorColumn(self: Progress, file: std.fs.File) !u16 {
+ // TODO: figure out how to get this working on FreeBSD, macOS etc. too.
+ // they too should have termios or capabilities to figure out the terminal width.
+ if (builtin.os.tag == .linux and self.supports_ansi_escape_codes) {
+ // First, disable echo and enable non-canonical mode
+ // (so that no enter press required for us to read the output of the escape sequence below)
+ const original_termios = try os.tcgetattr(file.handle);
+ var new_termios = original_termios;
+ new_termios.lflag &= ~(os.linux.ECHO | os.linux.ICANON);
+ try os.tcsetattr(file.handle, .NOW, new_termios);
+ defer os.tcsetattr(file.handle, .NOW, original_termios) catch {
+ // Sorry for ruining your terminal
+ };
+
+ try file.writeAll("\x1b[6n");
+ var buf: ["\x1b[65536;65536R".len]u8 = undefined;
+ const output = try file.reader().readUntilDelimiter(&buf, 'R');
+ var splitter = std.mem.split(u8, output, ";");
+ _ = splitter.next().?; // skip first half
+ const column_half = splitter.next() orelse return error.UnexpectedEnd;
+ const column = try std.fmt.parseUnsigned(u16, column_half, 10);
+ return column - 1; // it's one-based
+ } else if (builtin.os.tag == .windows) {
+ std.debug.assert(self.is_windows_terminal);
+ var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
+ if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE)
+ return error.Unexpected;
+ return @intCast(u16, info.dwCursorPosition.X);
+ } else {
+ return error.Unsupported;
+ }
+}
+
/// Updates the terminal if enough time has passed since last update. Thread-safe.
pub fn maybeRefresh(self: *Progress) void {
if (self.timer) |*timer| {
@@ -198,14 +295,16 @@ fn refreshWithHeldLock(self: *Progress) void {
const file = self.terminal orelse return;
+ // prepare for printing unprintable characters
+ self.output_buffer_slice = &self.output_buffer;
+
var end: usize = 0;
if (self.columns_written > 0) {
// restore the cursor position by moving the cursor
- // `columns_written` cells to the left, then clear the rest of the
- // line
+ // `columns_written` cells to the left, then clear the rest of the line
if (self.supports_ansi_escape_codes) {
- end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len;
- end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len;
+ end += (std.fmt.bufPrint(self.output_buffer_slice[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len;
+ end += (std.fmt.bufPrint(self.output_buffer_slice[end..], "\x1b[0K", .{}) catch unreachable).len;
} else if (builtin.os.tag == .windows) winapi: {
std.debug.assert(self.is_windows_terminal);
@@ -247,47 +346,53 @@ fn refreshWithHeldLock(self: *Progress) void {
unreachable;
} else {
// we are in a "dumb" terminal like in acme or writing to a file
- self.output_buffer[end] = '\n';
+ self.output_buffer_slice[end] = '\n';
end += 1;
}
self.columns_written = 0;
}
+ // from here on we will write printable characters. we also make sure the unprintable characters
+ // we possibly wrote previously don't affect whether we truncate the line in `bufWrite`.
+ const unprintables = end;
+ end = 0;
+ self.output_buffer_slice = self.output_buffer[unprintables..@minimum(self.output_buffer.len, unprintables + self.max_width.?)];
+
if (!self.done) {
- var need_ellipse = false;
+ var need_ellipsis = false;
var maybe_node: ?*Node = &self.root;
while (maybe_node) |node| {
- if (need_ellipse) {
+ if (need_ellipsis) {
self.bufWrite(&end, "... ", .{});
}
- need_ellipse = false;
- const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
+ need_ellipsis = false;
+ const estimated_total_items = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
const current_item = completed_items + 1;
- if (node.name.len != 0 or eti > 0) {
+ if (node.name.len != 0 or estimated_total_items > 0) {
if (node.name.len != 0) {
self.bufWrite(&end, "{s}", .{node.name});
- need_ellipse = true;
+ need_ellipsis = true;
}
- if (eti > 0) {
- if (need_ellipse) self.bufWrite(&end, " ", .{});
- self.bufWrite(&end, "[{d}/{d}] ", .{ current_item, eti });
- need_ellipse = false;
+ if (estimated_total_items > 0) {
+ if (need_ellipsis) self.bufWrite(&end, " ", .{});
+ self.bufWrite(&end, "[{d}/{d}] ", .{ current_item, estimated_total_items });
+ need_ellipsis = false;
} else if (completed_items != 0) {
- if (need_ellipse) self.bufWrite(&end, " ", .{});
+ if (need_ellipsis) self.bufWrite(&end, " ", .{});
self.bufWrite(&end, "[{d}] ", .{current_item});
- need_ellipse = false;
+ need_ellipsis = false;
}
}
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .Acquire);
}
- if (need_ellipse) {
+ if (need_ellipsis) {
self.bufWrite(&end, "... ", .{});
}
}
- _ = file.write(self.output_buffer[0..end]) catch {
+ _ = file.write(self.output_buffer[0 .. end + unprintables]) catch {
// Stop trying to write to this file once it errors.
self.terminal = null;
};
@@ -310,32 +415,113 @@ pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void {
}
fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void {
- if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| {
+ if (std.fmt.bufPrint(self.output_buffer_slice[end.*..], format, args)) |written| {
const amt = written.len;
end.* += amt;
self.columns_written += amt;
} else |err| switch (err) {
error.NoSpaceLeft => {
- self.columns_written += self.output_buffer.len - end.*;
- end.* = self.output_buffer.len;
- const suffix = "... ";
- std.mem.copy(u8, self.output_buffer[self.output_buffer.len - suffix.len ..], suffix);
+ // truncate the line with a suffix.
+ // for example if we have "hello world" (len=11) and 10 is the limit,
+ // it would become "hello w... "
+ self.columns_written += self.output_buffer_slice.len - end.*;
+ end.* = self.output_buffer_slice.len;
+ std.mem.copy(
+ u8,
+ self.output_buffer_slice[self.output_buffer_slice.len - truncation_suffix.len ..],
+ truncation_suffix,
+ );
},
}
}
-test "basic functionality" {
- var disable = true;
- if (disable) {
- // This test is disabled because it uses time.sleep() and is therefore slow. It also
- // prints bogus progress data to stderr.
+// By default these tests are disabled because they use time.sleep()
+// and are therefore slow. They also prints bogus progress data to stderr.
+const skip_tests = true;
+
+test "behavior on buffer overflow" {
+ if (skip_tests)
return error.SkipZigTest;
+
+ // uncomment this to move the cursor
+ //std.debug.print("{s}", .{"A" ** 300});
+
+ var progress = Progress{};
+
+ const long_string = "A" ** 300;
+ var node = progress.start(long_string, 0);
+
+ const speed_factor = time.ns_per_s / 4;
+
+ time.sleep(speed_factor);
+ node.activate();
+ time.sleep(speed_factor);
+ node.end();
+}
+
+test "multiple tasks with long names" {
+ if (skip_tests)
+ return error.SkipZigTest;
+
+ var progress = Progress{};
+
+ const tasks = [_][]const u8{
+ "A" ** 99,
+ "A" ** 100,
+ "A" ** 101,
+ "A" ** 102,
+ "A" ** 103,
+ };
+
+ const speed_factor = time.ns_per_s / 6;
+
+ for (tasks) |task| {
+ var node = progress.start(task, 3);
+ time.sleep(speed_factor);
+ node.activate();
+
+ time.sleep(speed_factor);
+ node.completeOne();
+ time.sleep(speed_factor);
+ node.completeOne();
+ time.sleep(speed_factor);
+ node.completeOne();
+
+ node.end();
}
+}
+
+test "very short max width" {
+ if (skip_tests)
+ return error.SkipZigTest;
+
+ var progress = Progress{ .max_width = 4 };
+
+ const task = "A" ** 300;
+
+ const speed_factor = time.ns_per_s / 2;
+
+ var node = progress.start(task, 3);
+ time.sleep(speed_factor);
+ node.activate();
+
+ time.sleep(speed_factor);
+ node.completeOne();
+ time.sleep(speed_factor);
+ node.completeOne();
+
+ node.end();
+}
+
+test "basic functionality" {
+ if (skip_tests)
+ return error.SkipZigTest;
+
var progress = Progress{};
const root_node = progress.start("", 100);
defer root_node.end();
- const speed_factor = std.time.ns_per_ms;
+ const speed_factor = time.ns_per_ms;
const sub_task_names = [_][]const u8{
"reticulating splines",
@@ -352,24 +538,24 @@ test "basic functionality" {
next_sub_task = (next_sub_task + 1) % sub_task_names.len;
node.completeOne();
- std.time.sleep(5 * speed_factor);
+ time.sleep(5 * speed_factor);
node.completeOne();
node.completeOne();
- std.time.sleep(5 * speed_factor);
+ time.sleep(5 * speed_factor);
node.completeOne();
node.completeOne();
- std.time.sleep(5 * speed_factor);
+ time.sleep(5 * speed_factor);
node.end();
- std.time.sleep(5 * speed_factor);
+ time.sleep(5 * speed_factor);
}
{
var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0);
node.activate();
- std.time.sleep(10 * speed_factor);
+ time.sleep(10 * speed_factor);
progress.refresh();
- std.time.sleep(10 * speed_factor);
+ time.sleep(10 * speed_factor);
node.end();
}
}
diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig
index 57866d21d6..6d5a0fe4fc 100644
--- a/lib/std/atomic/Atomic.zig
+++ b/lib/std/atomic/Atomic.zig
@@ -374,6 +374,10 @@ const atomic_rmw_orderings = [_]Ordering{
};
test "Atomic.swap" {
+ // TODO: Re-enable when LLVM is released with a bugfix for isel of
+ // atomic load (currently fixed on trunk, broken on 15.0.2)
+ if (builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
+
inline for (atomic_rmw_orderings) |ordering| {
var x = Atomic(usize).init(5);
try testing.expectEqual(x.swap(10, ordering), 5);
diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig
index 79b500b63d..0dfe7a2500 100644
--- a/lib/std/c/darwin.zig
+++ b/lib/std/c/darwin.zig
@@ -80,11 +80,11 @@ pub extern "c" fn posix_memalign(memptr: *?*anyopaque, alignment: usize, size: u
pub const posix_spawnattr_t = *opaque {};
pub const posix_spawn_file_actions_t = *opaque {};
pub extern "c" fn posix_spawnattr_init(attr: *posix_spawnattr_t) c_int;
-pub extern "c" fn posix_spawnattr_destroy(attr: *posix_spawnattr_t) void;
+pub extern "c" fn posix_spawnattr_destroy(attr: *posix_spawnattr_t) c_int;
pub extern "c" fn posix_spawnattr_setflags(attr: *posix_spawnattr_t, flags: c_short) c_int;
pub extern "c" fn posix_spawnattr_getflags(attr: *const posix_spawnattr_t, flags: *c_short) c_int;
pub extern "c" fn posix_spawn_file_actions_init(actions: *posix_spawn_file_actions_t) c_int;
-pub extern "c" fn posix_spawn_file_actions_destroy(actions: *posix_spawn_file_actions_t) void;
+pub extern "c" fn posix_spawn_file_actions_destroy(actions: *posix_spawn_file_actions_t) c_int;
pub extern "c" fn posix_spawn_file_actions_addclose(actions: *posix_spawn_file_actions_t, filedes: fd_t) c_int;
pub extern "c" fn posix_spawn_file_actions_addopen(
actions: *posix_spawn_file_actions_t,
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 038ead12b5..7350a06607 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -69,7 +69,7 @@ pub const Loop = struct {
};
pub const EventFd = switch (builtin.os.tag) {
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventFd,
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventFd,
.linux => struct {
base: ResumeNode,
epoll_op: u32,
@@ -88,7 +88,7 @@ pub const Loop = struct {
};
pub const Basic = switch (builtin.os.tag) {
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventBasic,
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventBasic,
.linux => struct {
base: ResumeNode,
},
@@ -269,7 +269,7 @@ pub const Loop = struct {
self.extra_threads[extra_thread_index] = try Thread.spawn(.{}, workerRun, .{self});
}
},
- .macos, .freebsd, .netbsd, .dragonfly => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly => {
self.os_data.kqfd = try os.kqueue();
errdefer os.close(self.os_data.kqfd);
@@ -457,7 +457,7 @@ pub const Loop = struct {
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
os.close(self.os_data.epollfd);
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
os.close(self.os_data.kqfd);
},
.windows => {
@@ -552,7 +552,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN);
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
@@ -564,7 +564,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT);
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
@@ -576,7 +576,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN);
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
},
@@ -645,7 +645,7 @@ pub const Loop = struct {
const eventfd_node = &resume_stack_node.data;
eventfd_node.base.handle = next_tick_node.data;
switch (builtin.os.tag) {
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.kevent);
const empty_kevs = &[0]os.Kevent{};
_ = os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
@@ -708,6 +708,9 @@ pub const Loop = struct {
switch (builtin.os.tag) {
.linux,
.macos,
+ .ios,
+ .tvos,
+ .watchos,
.freebsd,
.netbsd,
.dragonfly,
@@ -802,7 +805,7 @@ pub const Loop = struct {
}
return;
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
const final_kevent = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
const empty_kevs = &[0]os.Kevent{};
// cannot fail because we already added it and this just enables it
@@ -1428,7 +1431,7 @@ pub const Loop = struct {
}
}
},
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
var eventlist: [1]os.Kevent = undefined;
const empty_kevs = &[0]os.Kevent{};
const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
@@ -1554,7 +1557,7 @@ pub const Loop = struct {
const OsData = switch (builtin.os.tag) {
.linux => LinuxOsData,
- .macos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventData,
+ .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => KEventData,
.windows => struct {
io_port: windows.HANDLE,
extra_thread_count: usize,
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 80e5997482..f7027a70c9 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1602,6 +1602,14 @@ pub const Dir = struct {
if (builtin.os.tag == .wasi) {
@compileError("changing cwd is not currently possible in WASI");
}
+ if (builtin.os.tag == .windows) {
+ var dir_path_buffer: [os.windows.PATH_MAX_WIDE]u16 = undefined;
+ var dir_path = try os.windows.GetFinalPathNameByHandle(self.fd, .{}, &dir_path_buffer);
+ if (builtin.link_libc) {
+ return os.chdirW(dir_path);
+ }
+ return os.windows.SetCurrentDirectory(dir_path);
+ }
try os.fchdir(self.fd);
}
@@ -2261,7 +2269,7 @@ pub const Dir = struct {
/// Like `deleteTree`, but only keeps one `Iterator` active at a time to minimize the function's stack size.
/// This is slower than `deleteTree` but uses less stack space.
pub fn deleteTreeMinStackSize(self: Dir, sub_path: []const u8) DeleteTreeError!void {
- return self.deleteTreeMinStackWithKindHint(sub_path, .File);
+ return self.deleteTreeMinStackSizeWithKindHint(sub_path, .File);
}
fn deleteTreeMinStackSizeWithKindHint(self: Dir, sub_path: []const u8, kind_hint: File.Kind) DeleteTreeError!void {
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index f0fb3e01cc..028110ff9c 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -677,6 +677,22 @@ test "makePath, put some files in it, deleteTree" {
}
}
+test "makePath, put some files in it, deleteTreeMinStackSize" {
+ var tmp = tmpDir(.{});
+ defer tmp.cleanup();
+
+ try tmp.dir.makePath("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c");
+ try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "c" ++ fs.path.sep_str ++ "file.txt", "nonsense");
+ try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
+ try tmp.dir.deleteTreeMinStackSize("os_test_tmp");
+ if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
+ _ = dir;
+ @panic("expected error");
+ } else |err| {
+ try testing.expect(err == error.FileNotFound);
+ }
+}
+
test "makePath in a directory that no longer exists" {
if (builtin.os.tag == .windows) return error.SkipZigTest; // Windows returns FileBusy if attempting to remove an open dir
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 4845808055..1077b4d23a 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1063,10 +1063,11 @@ test "negateCast" {
/// return null.
pub fn cast(comptime T: type, x: anytype) ?T {
comptime assert(@typeInfo(T) == .Int); // must pass an integer
- comptime assert(@typeInfo(@TypeOf(x)) == .Int); // must pass an integer
- if (maxInt(@TypeOf(x)) > maxInt(T) and x > maxInt(T)) {
+ const is_comptime = @TypeOf(x) == comptime_int;
+ comptime assert(is_comptime or @typeInfo(@TypeOf(x)) == .Int); // must pass an integer
+ if ((is_comptime or maxInt(@TypeOf(x)) > maxInt(T)) and x > maxInt(T)) {
return null;
- } else if (minInt(@TypeOf(x)) < minInt(T) and x < minInt(T)) {
+ } else if ((is_comptime or minInt(@TypeOf(x)) < minInt(T)) and x < minInt(T)) {
return null;
} else {
return @intCast(T, x);
@@ -1074,12 +1075,18 @@ pub fn cast(comptime T: type, x: anytype) ?T {
}
test "cast" {
+ try testing.expect(cast(u8, 300) == null);
try testing.expect(cast(u8, @as(u32, 300)) == null);
+ try testing.expect(cast(i8, -200) == null);
try testing.expect(cast(i8, @as(i32, -200)) == null);
+ try testing.expect(cast(u8, -1) == null);
try testing.expect(cast(u8, @as(i8, -1)) == null);
+ try testing.expect(cast(u64, -1) == null);
try testing.expect(cast(u64, @as(i8, -1)) == null);
+ try testing.expect(cast(u8, 255).? == @as(u8, 255));
try testing.expect(cast(u8, @as(u32, 255)).? == @as(u8, 255));
+ try testing.expect(@TypeOf(cast(u8, 255).?) == u8);
try testing.expect(@TypeOf(cast(u8, @as(u32, 255)).?) == u8);
}
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 889f0928e0..b875f73b2e 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -21,6 +21,9 @@ const debug_safety = false;
/// Returns the number of limbs needed to store `scalar`, which must be a
/// primitive integer value.
+/// Note: A comptime-known upper bound of this value that may be used
+/// instead if `scalar` is not already comptime-known is
+/// `calcTwosCompLimbCount(@typeInfo(@TypeOf(scalar)).Int.bits)`
pub fn calcLimbLen(scalar: anytype) usize {
if (scalar == 0) {
return 1;
@@ -391,7 +394,18 @@ pub const Mutable = struct {
/// Asserts the result fits in `r`. An upper bound on the number of limbs needed by
/// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`.
pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void {
- var limbs: [calcLimbLen(scalar)]Limb = undefined;
+ // Normally we could just determine the number of limbs needed with calcLimbLen,
+ // but that is not comptime-known when scalar is not a comptime_int. Instead, we
+ // use calcTwosCompLimbCount for a non-comptime_int scalar, which can be pessimistic
+ // in the case that scalar happens to be small in magnitude within its type, but it
+ // is well worth being able to use the stack and not needing an allocator passed in.
+ // Note that Mutable.init still sets len to calcLimbLen(scalar) in any case.
+ const limb_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
+ .ComptimeInt => calcLimbLen(scalar),
+ .Int => |info| calcTwosCompLimbCount(info.bits),
+ else => @compileError("expected scalar to be an int"),
+ };
+ var limbs: [limb_len]Limb = undefined;
const operand = init(&limbs, scalar).toConst();
return add(r, a, operand);
}
@@ -2303,7 +2317,18 @@ pub const Const = struct {
/// Same as `order` but the right-hand operand is a primitive integer.
pub fn orderAgainstScalar(lhs: Const, scalar: anytype) math.Order {
- var limbs: [calcLimbLen(scalar)]Limb = undefined;
+ // Normally we could just determine the number of limbs needed with calcLimbLen,
+ // but that is not comptime-known when scalar is not a comptime_int. Instead, we
+ // use calcTwosCompLimbCount for a non-comptime_int scalar, which can be pessimistic
+ // in the case that scalar happens to be small in magnitude within its type, but it
+ // is well worth being able to use the stack and not needing an allocator passed in.
+ // Note that Mutable.init still sets len to calcLimbLen(scalar) in any case.
+ const limb_len = comptime switch (@typeInfo(@TypeOf(scalar))) {
+ .ComptimeInt => calcLimbLen(scalar),
+ .Int => |info| calcTwosCompLimbCount(info.bits),
+ else => @compileError("expected scalar to be an int"),
+ };
+ var limbs: [limb_len]Limb = undefined;
const rhs = Mutable.init(&limbs, scalar);
return order(lhs, rhs.toConst());
}
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index 5b51106ca4..5685a38d41 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -573,7 +573,7 @@ test "big.int add sign" {
try testing.expect((try a.to(i32)) == -3);
}
-test "big.int add scalar" {
+test "big.int add comptime scalar" {
var a = try Managed.initSet(testing.allocator, 50);
defer a.deinit();
@@ -584,6 +584,17 @@ test "big.int add scalar" {
try testing.expect((try b.to(u32)) == 55);
}
+test "big.int add scalar" {
+ var a = try Managed.initSet(testing.allocator, 123);
+ defer a.deinit();
+
+ var b = try Managed.init(testing.allocator);
+ defer b.deinit();
+ try b.addScalar(&a, @as(u32, 31));
+
+ try testing.expect((try b.to(u32)) == 154);
+}
+
test "big.int addWrap single-single, unsigned" {
var a = try Managed.initSet(testing.allocator, maxInt(u17));
defer a.deinit();
diff --git a/lib/std/os.zig b/lib/std/os.zig
index f0bd3dad95..9270a52adb 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -5781,7 +5781,10 @@ pub fn sendmsg(
}
}
-pub const SendToError = SendMsgError;
+pub const SendToError = SendMsgError || error{
+ /// The destination address is not reachable by the bound address.
+ UnreachableAddress,
+};
/// Transmit a message to another socket.
///
@@ -5858,7 +5861,7 @@ pub fn sendto(
.DESTADDRREQ => unreachable, // The socket is not connection-mode, and no peer address is set.
.FAULT => unreachable, // An invalid user space address was specified for an argument.
.INTR => continue,
- .INVAL => unreachable, // Invalid argument passed.
+ .INVAL => return error.UnreachableAddress,
.ISCONN => unreachable, // connection-mode socket was connected already but a recipient was specified
.MSGSIZE => return error.MessageTooBig,
.NOBUFS => return error.SystemResources,
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 1ed177c86b..9d7980320b 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -3761,6 +3761,8 @@ pub const IORING_CQE_F_BUFFER = 1 << 0;
pub const IORING_CQE_F_MORE = 1 << 1;
/// If set, more data to read after socket recv
pub const IORING_CQE_F_SOCK_NONEMPTY = 1 << 2;
+/// Set for notification CQEs. Can be used to distinct them from sends.
+pub const IORING_CQE_F_NOTIF = 1 << 3;
/// Magic offsets for the application to mmap the data it needs
pub const IORING_OFF_SQ_RING = 0;
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 01fd7fa84b..3bc3fbaf7b 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -2007,7 +2007,8 @@ test "accept/connect/send/recv" {
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0xffffffff,
.res = buffer_recv.len,
- .flags = 0,
+ // ignore IORING_CQE_F_SOCK_NONEMPTY since it is only set on some systems
+ .flags = cqe_recv.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
}, cqe_recv);
try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
@@ -2089,7 +2090,8 @@ test "sendmsg/recvmsg" {
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x22222222,
.res = buffer_recv.len,
- .flags = 0,
+ // ignore IORING_CQE_F_SOCK_NONEMPTY since it is set non-deterministically
+ .flags = cqe_recvmsg.flags & linux.IORING_CQE_F_SOCK_NONEMPTY,
}, cqe_recvmsg);
try testing.expectEqualSlices(u8, buffer_send[0..buffer_recv.len], buffer_recv[0..]);
diff --git a/lib/std/os/posix_spawn.zig b/lib/std/os/posix_spawn.zig
index d36475df7f..32904a9423 100644
--- a/lib/std/os/posix_spawn.zig
+++ b/lib/std/os/posix_spawn.zig
@@ -47,8 +47,12 @@ const posix_spawn = if (builtin.target.isDarwin()) struct {
}
pub fn deinit(self: *Attr) void {
- system.posix_spawnattr_destroy(&self.attr);
- self.* = undefined;
+ defer self.* = undefined;
+ switch (errno(system.posix_spawnattr_destroy(&self.attr))) {
+ .SUCCESS => return,
+ .INVAL => unreachable, // Invalid parameters.
+ else => unreachable,
+ }
}
pub fn get(self: Attr) Error!u16 {
@@ -83,8 +87,12 @@ const posix_spawn = if (builtin.target.isDarwin()) struct {
}
pub fn deinit(self: *Actions) void {
- system.posix_spawn_file_actions_destroy(&self.actions);
- self.* = undefined;
+ defer self.* = undefined;
+ switch (errno(system.posix_spawn_file_actions_destroy(&self.actions))) {
+ .SUCCESS => return,
+ .INVAL => unreachable, // Invalid parameters.
+ else => unreachable,
+ }
}
pub fn open(self: *Actions, fd: fd_t, path: []const u8, flags: u32, mode: mode_t) Error!void {
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index f6d148a317..081bff845c 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -2868,10 +2868,7 @@ pub const PROV_RSA_FULL = 1;
pub const REGSAM = ACCESS_MASK;
pub const ACCESS_MASK = DWORD;
-pub const HKEY = *HKEY__;
-pub const HKEY__ = extern struct {
- unused: c_int,
-};
+pub const HKEY = *opaque {};
pub const LSTATUS = LONG;
pub const FILE_NOTIFY_INFORMATION = extern struct {
diff --git a/lib/std/segmented_list.zig b/lib/std/segmented_list.zig
index 81ad6f6211..5b227b8c50 100644
--- a/lib/std/segmented_list.zig
+++ b/lib/std/segmented_list.zig
@@ -157,13 +157,13 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
- self.items.len = 0;
+ self.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.setCapacity(allocator, 0) catch unreachable;
- self.items.len = 0;
+ self.len = 0;
}
/// Grows or shrinks capacity to match usage.
@@ -403,15 +403,13 @@ test "SegmentedList basic usage" {
}
fn testSegmentedList(comptime prealloc: usize) !void {
- const gpa = std.testing.allocator;
-
- var list: SegmentedList(i32, prealloc) = .{};
- defer list.deinit(gpa);
+ var list = SegmentedList(i32, prealloc){};
+ defer list.deinit(testing.allocator);
{
var i: usize = 0;
while (i < 100) : (i += 1) {
- try list.append(gpa, @intCast(i32, i + 1));
+ try list.append(testing.allocator, @intCast(i32, i + 1));
try testing.expect(list.len == i + 1);
}
}
@@ -454,21 +452,21 @@ fn testSegmentedList(comptime prealloc: usize) !void {
try testing.expect(list.pop().? == 100);
try testing.expect(list.len == 99);
- try list.appendSlice(gpa, &[_]i32{ 1, 2, 3 });
+ try list.appendSlice(testing.allocator, &[_]i32{ 1, 2, 3 });
try testing.expect(list.len == 102);
try testing.expect(list.pop().? == 3);
try testing.expect(list.pop().? == 2);
try testing.expect(list.pop().? == 1);
try testing.expect(list.len == 99);
- try list.appendSlice(gpa, &[_]i32{});
+ try list.appendSlice(testing.allocator, &[_]i32{});
try testing.expect(list.len == 99);
{
var i: i32 = 99;
while (list.pop()) |item| : (i -= 1) {
try testing.expect(item == i);
- list.shrinkCapacity(gpa, list.len);
+ list.shrinkCapacity(testing.allocator, list.len);
}
}
@@ -478,7 +476,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
var i: i32 = 0;
while (i < 100) : (i += 1) {
- try list.append(gpa, i + 1);
+ try list.append(testing.allocator, i + 1);
control[@intCast(usize, i)] = i + 1;
}
@@ -491,7 +489,20 @@ fn testSegmentedList(comptime prealloc: usize) !void {
try testing.expect(std.mem.eql(i32, control[50..], dest[50..]));
}
- try list.setCapacity(gpa, 0);
+ try list.setCapacity(testing.allocator, 0);
+}
+
+test "std.segmented_list clearRetainingCapacity" {
+ var list = SegmentedList(i32, 1){};
+ defer list.deinit(testing.allocator);
+
+ try list.appendSlice(testing.allocator, &[_]i32{ 4, 5 });
+ list.clearRetainingCapacity();
+ try list.append(testing.allocator, 6);
+ try testing.expect(list.at(0).* == 6);
+ try testing.expect(list.len == 1);
+ list.clearRetainingCapacity();
+ try testing.expect(list.len == 0);
}
/// TODO look into why this std.math function was changed in
diff --git a/lib/std/target.zig b/lib/std/target.zig
index c1f367b3d5..201fac222c 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -1772,7 +1772,7 @@ pub const Target = struct {
}
pub inline fn longDoubleIs(target: Target, comptime F: type) bool {
- if (target.abi == .msvc) {
+ if (target.abi == .msvc or (target.abi == .android and target.cpu.arch == .i386)) {
return F == f64;
}
return switch (F) {
@@ -1800,6 +1800,8 @@ pub const Target = struct {
.powerpcle,
.powerpc64,
.powerpc64le,
+ .wasm32,
+ .wasm64,
=> true,
else => false,
diff --git a/src/Sema.zig b/src/Sema.zig
index c81d267bcd..1b2bf84885 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -27730,6 +27730,14 @@ fn cmpNumeric(
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (lhs_val.isUndef())
return sema.addConstUndef(Type.bool);
+ if (lhs_val.isNan()) switch (op) {
+ .neq => return Air.Inst.Ref.bool_true,
+ else => return Air.Inst.Ref.bool_false,
+ };
+ if (lhs_val.isInf()) switch (op) {
+ .gt, .neq => return Air.Inst.Ref.bool_true,
+ .lt, .lte, .eq, .gte => return Air.Inst.Ref.bool_false,
+ };
if (!rhs_is_signed) {
switch (lhs_val.orderAgainstZero()) {
.gt => {},
@@ -27745,8 +27753,7 @@ fn cmpNumeric(
}
}
if (lhs_is_float) {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try lhs_val.toBigInt(&bigint_space, target).toManaged(sema.gpa);
+ var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128));
defer bigint.deinit();
if (lhs_val.floatHasFraction()) {
switch (op) {
@@ -27776,6 +27783,14 @@ fn cmpNumeric(
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
if (rhs_val.isUndef())
return sema.addConstUndef(Type.bool);
+ if (rhs_val.isNan()) switch (op) {
+ .neq => return Air.Inst.Ref.bool_true,
+ else => return Air.Inst.Ref.bool_false,
+ };
+ if (rhs_val.isInf()) switch (op) {
+ .lt, .neq => return Air.Inst.Ref.bool_true,
+ .gt, .lte, .eq, .gte => return Air.Inst.Ref.bool_false,
+ };
if (!lhs_is_signed) {
switch (rhs_val.orderAgainstZero()) {
.gt => {},
@@ -27791,8 +27806,7 @@ fn cmpNumeric(
}
}
if (rhs_is_float) {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try rhs_val.toBigInt(&bigint_space, target).toManaged(sema.gpa);
+ var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128));
defer bigint.deinit();
if (rhs_val.floatHasFraction()) {
switch (op) {
@@ -31169,6 +31183,31 @@ fn floatToInt(
return sema.floatToIntScalar(block, src, val, float_ty, int_ty);
}
+// float is expected to be finite and non-NaN
+fn float128IntPartToBigInt(
+ arena: Allocator,
+ float: f128,
+) !std.math.big.int.Managed {
+ const is_negative = std.math.signbit(float);
+ const floored = @floor(@fabs(float));
+
+ var rational = try std.math.big.Rational.init(arena);
+ defer rational.q.deinit();
+ rational.setFloat(f128, floored) catch |err| switch (err) {
+ error.NonFiniteFloat => unreachable,
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+
+ // The float is reduced in rational.setFloat, so we assert that denominator is equal to one
+ const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
+ assert(rational.q.toConst().eqAbs(big_one));
+
+ if (is_negative) {
+ rational.negate();
+ }
+ return rational.p;
+}
+
fn floatToIntScalar(
sema: *Sema,
block: *Block,
@@ -31191,22 +31230,11 @@ fn floatToIntScalar(
});
}
- const is_negative = std.math.signbit(float);
- const floored = @floor(@fabs(float));
-
- var rational = try std.math.big.Rational.init(sema.arena);
- defer rational.deinit();
- rational.setFloat(f128, floored) catch |err| switch (err) {
- error.NonFiniteFloat => unreachable,
- error.OutOfMemory => return error.OutOfMemory,
- };
-
- // The float is reduced in rational.setFloat, so we assert that denominator is equal to one
- const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true };
- assert(rational.q.toConst().eqAbs(big_one));
+ var big_int = try float128IntPartToBigInt(sema.arena, float);
+ defer big_int.deinit();
- const result_limbs = try sema.arena.dupe(Limb, rational.p.toConst().limbs);
- const result = if (is_negative)
+ const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs);
+ const result = if (!big_int.isPositive())
try Value.Tag.int_big_negative.create(sema.arena, result_limbs)
else
try Value.Tag.int_big_positive.create(sema.arena, result_limbs);
diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig
index edabac2da6..469b8fbded 100644
--- a/src/clang_options_data.zig
+++ b/src/clang_options_data.zig
@@ -2480,7 +2480,14 @@ flagpd1("dwarf-ext-refs"),
},
sepd1("dylib_file"),
flagpd1("dylinker"),
-flagpd1("dynamic"),
+.{
+ .name = "dynamic",
+ .syntax = .flag,
+ .zig_equivalent = .dynamic,
+ .pd1 = true,
+ .pd2 = false,
+ .psl = false,
+},
.{
.name = "dynamiclib",
.syntax = .flag,
@@ -4028,7 +4035,7 @@ flagpd1("menable-unsafe-fp-math"),
m("menqcmd"),
m("mexception-handling"),
m("mexecute-only"),
-flagpd1("mextended-const"),
+m("mextended-const"),
flagpd1("mextern-sdata"),
m("mf16c"),
flagpd1("mfancy-math-387"),
@@ -4037,7 +4044,7 @@ flagpd1("mfix4300"),
flagpd1("mfix-and-continue"),
m("mfix-cmse-cve-2021-35465"),
m("mfix-cortex-a53-835769"),
-flagpd1("mfix-cortex-a57-aes-1742098"),
+m("mfix-cortex-a57-aes-1742098"),
flagpd1("mfix-cortex-a72-aes-1655431"),
m("mfloat128"),
sepd1("mfloat-abi"),
@@ -4188,12 +4195,12 @@ m("mno-enqcmd"),
m("mno-exception-handling"),
flagpd1("mnoexecstack"),
m("mno-execute-only"),
-flagpd1("mno-extended-const"),
+m("mno-extended-const"),
flagpd1("mno-extern-sdata"),
m("mno-f16c"),
m("mno-fix-cmse-cve-2021-35465"),
m("mno-fix-cortex-a53-835769"),
-flagpd1("mno-fix-cortex-a57-aes-1742098"),
+m("mno-fix-cortex-a57-aes-1742098"),
flagpd1("mno-fix-cortex-a72-aes-1655431"),
m("mno-float128"),
m("mno-fma"),
@@ -4272,7 +4279,7 @@ m("mno-prfchw"),
m("mno-ptwrite"),
flagpd1("mno-pure-code"),
m("mno-rdpid"),
-flagpd1("mno-rdpru"),
+m("mno-rdpru"),
m("mno-rdrnd"),
m("mno-rdseed"),
.{
@@ -4382,7 +4389,7 @@ m("mptwrite"),
flagpd1("mpure-code"),
flagpd1("mqdsp6-compat"),
m("mrdpid"),
-flagpd1("mrdpru"),
+m("mrdpru"),
m("mrdrnd"),
m("mrdseed"),
flagpd1("mreassociate"),
@@ -5034,7 +5041,7 @@ sepd1("stack-usage-file"),
.{
.name = "static",
.syntax = .flag,
- .zig_equivalent = .other,
+ .zig_equivalent = .static,
.pd1 = true,
.pd2 = true,
.psl = false,
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index aff7656bd3..4a0978af5b 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -780,7 +780,7 @@ pub const Object = struct {
null;
const emit_asm_path = try locPath(arena, comp.emit_asm, cache_dir);
- const emit_llvm_ir_path = try locPath(arena, comp.emit_llvm_ir, cache_dir);
+ var emit_llvm_ir_path = try locPath(arena, comp.emit_llvm_ir, cache_dir);
const emit_llvm_bc_path = try locPath(arena, comp.emit_llvm_bc, cache_dir);
const emit_asm_msg = emit_asm_path orelse "(none)";
@@ -791,7 +791,34 @@ pub const Object = struct {
emit_asm_msg, emit_bin_msg, emit_llvm_ir_msg, emit_llvm_bc_msg,
});
+ // Unfortunately, LLVM shits the bed when we ask for both binary and assembly.
+ // So we call the entire pipeline multiple times if this is requested.
var error_message: [*:0]const u8 = undefined;
+ if (emit_asm_path != null and emit_bin_path != null) {
+ if (self.target_machine.emitToFile(
+ self.llvm_module,
+ &error_message,
+ comp.bin_file.options.optimize_mode == .Debug,
+ comp.bin_file.options.optimize_mode == .ReleaseSmall,
+ comp.time_report,
+ comp.bin_file.options.tsan,
+ comp.bin_file.options.lto,
+ null,
+ emit_bin_path,
+ emit_llvm_ir_path,
+ null,
+ )) {
+ defer llvm.disposeMessage(error_message);
+
+ log.err("LLVM failed to emit bin={s} ir={s}: {s}", .{
+ emit_bin_msg, emit_llvm_ir_msg, error_message,
+ });
+ return error.FailedToEmit;
+ }
+ emit_bin_path = null;
+ emit_llvm_ir_path = null;
+ }
+
if (self.target_machine.emitToFile(
self.llvm_module,
&error_message,
@@ -2689,7 +2716,7 @@ pub const DeclGen = struct {
return dg.context.intType(bit_count);
},
.Float => switch (t.floatBits(target)) {
- 16 => return dg.context.halfType(),
+ 16 => return if (backendSupportsF16(target)) dg.context.halfType() else dg.context.intType(16),
32 => return dg.context.floatType(),
64 => return dg.context.doubleType(),
80 => return if (backendSupportsF80(target)) dg.context.x86FP80Type() else dg.context.intType(80),
@@ -3204,7 +3231,15 @@ pub const DeclGen = struct {
.Float => {
const llvm_ty = try dg.lowerType(tv.ty);
switch (tv.ty.floatBits(target)) {
- 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
+ 16 => if (intrinsicsAllowed(tv.ty, target)) {
+ return llvm_ty.constReal(tv.val.toFloat(f16));
+ } else {
+ const repr = @bitCast(u16, tv.val.toFloat(f16));
+ const llvm_i16 = dg.context.intType(16);
+ const int = llvm_i16.constInt(repr, .False);
+ return int.constBitCast(llvm_ty);
+ },
+ 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)),
80 => {
const float = tv.val.toFloat(f80);
const repr = std.math.break_f80(float);
@@ -4316,13 +4351,17 @@ pub const FuncGen = struct {
const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
- const val = self.air.value(inst).?;
- const ty = self.air.typeOf(inst);
- const llvm_val = try self.dg.lowerValue(.{ .ty = ty, .val = val });
- if (!isByRef(ty)) {
- gop.value_ptr.* = llvm_val;
- return llvm_val;
- }
+ const llvm_val = try self.resolveValue(.{
+ .ty = self.air.typeOf(inst),
+ .val = self.air.value(inst).?,
+ });
+ gop.value_ptr.* = llvm_val;
+ return llvm_val;
+ }
+
+ fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value {
+ const llvm_val = try self.dg.lowerValue(tv);
+ if (!isByRef(tv.ty)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
@@ -4334,11 +4373,11 @@ pub const FuncGen = struct {
global.setLinkage(.Private);
global.setGlobalConstant(.True);
global.setUnnamedAddr(.True);
- global.setAlignment(ty.abiAlignment(target));
+ global.setAlignment(tv.ty.abiAlignment(target));
// Because of LLVM limitations for lowering certain types such as unions,
// the type of global constants might not match the type it is supposed to
// be, and so we must bitcast the pointer at the usage sites.
- const wanted_llvm_ty = try self.dg.lowerType(ty);
+ const wanted_llvm_ty = try self.dg.lowerType(tv.ty);
const wanted_bitcasted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_actual_addrspace);
const bitcasted_ptr = global.constBitCast(wanted_bitcasted_llvm_ptr_ty);
const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(llvm_wanted_addrspace);
@@ -4346,7 +4385,6 @@ pub const FuncGen = struct {
bitcasted_ptr.constAddrSpaceCast(wanted_llvm_ptr_ty)
else
bitcasted_ptr;
- gop.value_ptr.* = casted_ptr;
return casted_ptr;
}
@@ -7576,13 +7614,25 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
- if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) {
- return softF80TruncOrExt(self, operand, src_bits, dest_bits);
- } else if (!backendSupportsF128(target) and (src_bits == 128 or dest_bits == 128)) {
- return softF128TruncOrExt(self, operand, src_bits, dest_bits);
+
+ if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
+ return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
+ } else {
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
+
+ var fn_name_buf: [64]u8 = undefined;
+ const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__trunc{s}f{s}f2", .{
+ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
+ }) catch unreachable;
+
+ const params = [1]*llvm.Value{operand};
+ const param_types = [1]*llvm.Type{operand_llvm_ty};
+ const llvm_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+
+ return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
}
- const dest_llvm_ty = try self.dg.lowerType(dest_ty);
- return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
@@ -7596,13 +7646,25 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
- if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) {
- return softF80TruncOrExt(self, operand, src_bits, dest_bits);
- } else if (!backendSupportsF128(target) and (src_bits == 128 or dest_bits == 128)) {
- return softF128TruncOrExt(self, operand, src_bits, dest_bits);
+
+ if (intrinsicsAllowed(dest_ty, target) and intrinsicsAllowed(operand_ty, target)) {
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
+ return self.builder.buildFPExt(operand, dest_llvm_ty, "");
+ } else {
+ const operand_llvm_ty = try self.dg.lowerType(operand_ty);
+ const dest_llvm_ty = try self.dg.lowerType(dest_ty);
+
+ var fn_name_buf: [64]u8 = undefined;
+ const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__extend{s}f{s}f2", .{
+ compilerRtFloatAbbrev(src_bits), compilerRtFloatAbbrev(dest_bits),
+ }) catch unreachable;
+
+ const params = [1]*llvm.Value{operand};
+ const param_types = [1]*llvm.Type{operand_llvm_ty};
+ const llvm_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
+
+ return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
}
- const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
- return self.builder.buildFPExt(operand, dest_llvm_ty, "");
}
fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
@@ -8680,12 +8742,78 @@ pub const FuncGen = struct {
return self.builder.buildShuffleVector(a, b, llvm_mask_value, "");
}
+ /// Reduce a vector by repeatedly applying `llvm_fn` to produce an accumulated result.
+ ///
+ /// Equivalent to:
+ /// reduce: {
+ /// var i: usize = 0;
+ /// var accum: T = init;
+ /// while (i < vec.len) : (i += 1) {
+ /// accum = llvm_fn(accum, vec[i]);
+ /// }
+ /// break :reduce accum;
+ /// }
+ ///
+ fn buildReducedCall(
+ self: *FuncGen,
+ llvm_fn: *llvm.Value,
+ operand_vector: *llvm.Value,
+ vector_len: usize,
+ accum_init: *llvm.Value,
+ ) !*llvm.Value {
+ const llvm_usize_ty = try self.dg.lowerType(Type.usize);
+ const llvm_vector_len = llvm_usize_ty.constInt(vector_len, .False);
+ const llvm_result_ty = accum_init.typeOf();
+
+ // Allocate and initialize our mutable variables
+ const i_ptr = self.buildAlloca(llvm_usize_ty, null);
+ _ = self.builder.buildStore(llvm_usize_ty.constInt(0, .False), i_ptr);
+ const accum_ptr = self.buildAlloca(llvm_result_ty, null);
+ _ = self.builder.buildStore(accum_init, accum_ptr);
+
+ // Setup the loop
+ const loop = self.context.appendBasicBlock(self.llvm_func, "ReduceLoop");
+ const loop_exit = self.context.appendBasicBlock(self.llvm_func, "AfterReduce");
+ _ = self.builder.buildBr(loop);
+ {
+ self.builder.positionBuilderAtEnd(loop);
+
+ // while (i < vec.len)
+ const i = self.builder.buildLoad(llvm_usize_ty, i_ptr, "");
+ const cond = self.builder.buildICmp(.ULT, i, llvm_vector_len, "");
+ const loop_then = self.context.appendBasicBlock(self.llvm_func, "ReduceLoopThen");
+
+ _ = self.builder.buildCondBr(cond, loop_then, loop_exit);
+
+ {
+ self.builder.positionBuilderAtEnd(loop_then);
+
+ // accum = f(accum, vec[i]);
+ const accum = self.builder.buildLoad(llvm_result_ty, accum_ptr, "");
+ const element = self.builder.buildExtractElement(operand_vector, i, "");
+ const params = [2]*llvm.Value{ accum, element };
+ const new_accum = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .C, .Auto, "");
+ _ = self.builder.buildStore(new_accum, accum_ptr);
+
+ // i += 1
+ const new_i = self.builder.buildAdd(i, llvm_usize_ty.constInt(1, .False), "");
+ _ = self.builder.buildStore(new_i, i_ptr);
+ _ = self.builder.buildBr(loop);
+ }
+ }
+
+ self.builder.positionBuilderAtEnd(loop_exit);
+ return self.builder.buildLoad(llvm_result_ty, accum_ptr, "");
+ }
+
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
+ const target = self.dg.module.getTarget();
const reduce = self.air.instructions.items(.data)[inst].reduce;
- const operand = try self.resolveInst(reduce.operand);
+ var operand = try self.resolveInst(reduce.operand);
+ const operand_ty = self.air.typeOf(reduce.operand);
const scalar_ty = self.air.typeOfIndex(inst);
// TODO handle the fast math setting
@@ -8696,17 +8824,21 @@ pub const FuncGen = struct {
.Xor => return self.builder.buildXorReduce(operand),
.Min => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()),
- .Float => return self.builder.buildFPMinReduce(operand),
+ .Float => if (intrinsicsAllowed(scalar_ty, target)) {
+ return self.builder.buildFPMinReduce(operand);
+ },
else => unreachable,
},
.Max => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()),
- .Float => return self.builder.buildFPMaxReduce(operand),
+ .Float => if (intrinsicsAllowed(scalar_ty, target)) {
+ return self.builder.buildFPMaxReduce(operand);
+ },
else => unreachable,
},
.Add => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildAddReduce(operand),
- .Float => {
+ .Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const neutral_value = scalar_llvm_ty.constReal(-0.0);
return self.builder.buildFPAddReduce(neutral_value, operand);
@@ -8715,7 +8847,7 @@ pub const FuncGen = struct {
},
.Mul => switch (scalar_ty.zigTypeTag()) {
.Int => return self.builder.buildMulReduce(operand),
- .Float => {
+ .Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const neutral_value = scalar_llvm_ty.constReal(1.0);
return self.builder.buildFPMulReduce(neutral_value, operand);
@@ -8723,6 +8855,44 @@ pub const FuncGen = struct {
else => unreachable,
},
}
+
+ // Reduction could not be performed with intrinsics.
+ // Use a manual loop over a softfloat call instead.
+ var fn_name_buf: [64]u8 = undefined;
+ const float_bits = scalar_ty.floatBits(target);
+ const fn_name = switch (reduce.operation) {
+ .Min => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
+ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
+ }) catch unreachable,
+ .Max => std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
+ libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
+ }) catch unreachable,
+ .Add => std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
+ compilerRtFloatAbbrev(float_bits),
+ }) catch unreachable,
+ .Mul => std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
+ compilerRtFloatAbbrev(float_bits),
+ }) catch unreachable,
+ else => unreachable,
+ };
+ var init_value_payload = Value.Payload.Float_32{
+ .data = switch (reduce.operation) {
+ .Min => std.math.nan(f32),
+ .Max => std.math.nan(f32),
+ .Add => -0.0,
+ .Mul => 1.0,
+ else => unreachable,
+ },
+ };
+
+ const param_llvm_ty = try self.dg.lowerType(scalar_ty);
+ const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty };
+ const libc_fn = self.getLibcFunction(fn_name, &param_types, param_llvm_ty);
+ const init_value = try self.dg.lowerValue(.{
+ .ty = scalar_ty,
+ .val = Value.initPayload(&init_value_payload.base),
+ });
+ return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value);
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
@@ -8849,7 +9019,7 @@ pub const FuncGen = struct {
llvm_usize.constInt(@intCast(c_uint, array_info.len), .False),
};
const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
- const llvm_elem = try self.dg.lowerValue(.{
+ const llvm_elem = try self.resolveValue(.{
.ty = array_info.elem_type,
.val = sent_val,
});
@@ -9012,7 +9182,13 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
- .x86_64, .i386 => return null,
+ .x86_64,
+ .i386,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ => return null,
.arm, .armeb, .thumb, .thumbeb => {
switch (prefetch.rw) {
.write => return null,
@@ -9063,169 +9239,6 @@ pub const FuncGen = struct {
return self.builder.buildAddrSpaceCast(operand, llvm_dest_ty, "");
}
- fn softF80TruncOrExt(
- self: *FuncGen,
- operand: *llvm.Value,
- src_bits: u16,
- dest_bits: u16,
- ) !?*llvm.Value {
- const target = self.dg.module.getTarget();
-
- var param_llvm_ty: *llvm.Type = self.context.intType(80);
- var ret_llvm_ty: *llvm.Type = param_llvm_ty;
- var fn_name: [*:0]const u8 = undefined;
- var arg = operand;
- var final_cast: ?*llvm.Type = null;
-
- assert(src_bits == 80 or dest_bits == 80);
-
- if (src_bits == 80) switch (dest_bits) {
- 16 => {
- // See corresponding condition at definition of
- // __truncxfhf2 in compiler-rt.
- if (target.cpu.arch.isAARCH64()) {
- ret_llvm_ty = self.context.halfType();
- } else {
- ret_llvm_ty = self.context.intType(16);
- final_cast = self.context.halfType();
- }
- fn_name = "__truncxfhf2";
- },
- 32 => {
- ret_llvm_ty = self.context.floatType();
- fn_name = "__truncxfsf2";
- },
- 64 => {
- ret_llvm_ty = self.context.doubleType();
- fn_name = "__truncxfdf2";
- },
- 80 => return operand,
- 128 => {
- ret_llvm_ty = self.context.fp128Type();
- fn_name = "__extendxftf2";
- },
- else => unreachable,
- } else switch (src_bits) {
- 16 => {
- // See corresponding condition at definition of
- // __extendhfxf2 in compiler-rt.
- param_llvm_ty = if (target.cpu.arch.isAARCH64())
- self.context.halfType()
- else
- self.context.intType(16);
- arg = self.builder.buildBitCast(arg, param_llvm_ty, "");
- fn_name = "__extendhfxf2";
- },
- 32 => {
- param_llvm_ty = self.context.floatType();
- fn_name = "__extendsfxf2";
- },
- 64 => {
- param_llvm_ty = self.context.doubleType();
- fn_name = "__extenddfxf2";
- },
- 80 => return operand,
- 128 => {
- param_llvm_ty = self.context.fp128Type();
- fn_name = "__trunctfxf2";
- },
- else => unreachable,
- }
-
- const llvm_fn = self.dg.object.llvm_module.getNamedFunction(fn_name) orelse f: {
- const param_types = [_]*llvm.Type{param_llvm_ty};
- const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
- break :f self.dg.object.llvm_module.addFunction(fn_name, fn_type);
- };
-
- var args: [1]*llvm.Value = .{arg};
- const result = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .C, .Auto, "");
- const final_cast_llvm_ty = final_cast orelse return result;
- return self.builder.buildBitCast(result, final_cast_llvm_ty, "");
- }
-
- fn softF128TruncOrExt(
- self: *FuncGen,
- operand: *llvm.Value,
- src_bits: u16,
- dest_bits: u16,
- ) !?*llvm.Value {
- const target = self.dg.module.getTarget();
-
- var param_llvm_ty: *llvm.Type = self.context.fp128Type();
- var ret_llvm_ty: *llvm.Type = param_llvm_ty;
- var fn_name: [*:0]const u8 = undefined;
- var arg = operand;
- var final_cast: ?*llvm.Type = null;
-
- assert(src_bits == 128 or dest_bits == 128);
-
- // TODO: Implement proper names and compiler-rt functions for this!!
- if (src_bits == 128) switch (dest_bits) {
- 16 => {
- // See corresponding condition at definition of
- // __truncxfhf2 in compiler-rt.
- if (target.cpu.arch.isAARCH64()) {
- ret_llvm_ty = self.context.halfType();
- } else {
- ret_llvm_ty = self.context.intType(16);
- final_cast = self.context.halfType();
- }
- fn_name = "__trunctfhf2";
- },
- 32 => {
- ret_llvm_ty = self.context.floatType();
- fn_name = "__trunctfsf2";
- },
- 64 => {
- ret_llvm_ty = self.context.doubleType();
- fn_name = "__trunctfdf2";
- },
- 80 => {
- ret_llvm_ty = self.context.intType(80);
- fn_name = "__trunctfxf2";
- },
- 128 => return operand,
- else => unreachable,
- } else switch (src_bits) {
- 16 => {
- // See corresponding condition at definition of
- // __extendhftf2 in compiler-rt.
- param_llvm_ty = if (target.cpu.arch.isAARCH64())
- self.context.halfType()
- else
- self.context.intType(16);
- arg = self.builder.buildBitCast(arg, param_llvm_ty, "");
- fn_name = "__extendhftf2";
- },
- 32 => {
- param_llvm_ty = self.context.floatType();
- fn_name = "__extendsftf2";
- },
- 64 => {
- param_llvm_ty = self.context.doubleType();
- fn_name = "__extenddftf2";
- },
- 80 => {
- param_llvm_ty = self.context.intType(80);
- fn_name = "__extendxftf2";
- },
- 128 => return operand,
- else => unreachable,
- }
-
- const llvm_fn = self.dg.object.llvm_module.getNamedFunction(fn_name) orelse f: {
- const param_types = [_]*llvm.Type{param_llvm_ty};
- const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
- break :f self.dg.object.llvm_module.addFunction(fn_name, fn_type);
- };
-
- var args: [1]*llvm.Value = .{arg};
- const result = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .C, .Auto, "");
- const final_cast_llvm_ty = final_cast orelse return result;
- return self.builder.buildBitCast(result, final_cast_llvm_ty, "");
- }
-
fn getErrorNameTable(self: *FuncGen) !*llvm.Value {
if (self.dg.object.error_name_table) |table| {
return table;
@@ -10573,6 +10586,17 @@ fn backendSupportsF80(target: std.Target) bool {
/// if it produces miscompilations.
fn backendSupportsF16(target: std.Target) bool {
return switch (target.cpu.arch) {
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .wasm32,
+ .wasm64,
+ .mips,
+ .mipsel,
+ .mips64,
+ .mips64el,
+ => false,
else => true,
};
}
diff --git a/src/main.zig b/src/main.zig
index 00fe9a3df6..056b5826e3 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -1649,6 +1649,8 @@ fn buildOutputType(
};
}
},
+ .dynamic => link_mode = .Dynamic,
+ .static => link_mode = .Static,
}
}
// Parse linker args.
@@ -4678,6 +4680,8 @@ pub const ClangArgIterator = struct {
weak_framework,
headerpad_max_install_names,
compress_debug_sections,
+ dynamic,
+ static,
};
const Args = struct {
diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp
index a102f2e340..59ca43644a 100644
--- a/src/stage1/analyze.cpp
+++ b/src/stage1/analyze.cpp
@@ -6369,9 +6369,11 @@ void init_const_float(ZigValue *const_val, ZigType *type, double value) {
const_val->data.x_f64 = value;
break;
case 80:
+ zig_double_to_extF80M(value, &const_val->data.x_f80);
+ break;
case 128:
- // if we need this, we should add a function that accepts a float128_t param
- zig_unreachable();
+ zig_double_to_f128M(value, &const_val->data.x_f128);
+ break;
default:
zig_unreachable();
}
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 2a05ba44d1..039b088c1e 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -80,6 +80,7 @@ void codegen_set_strip(CodeGen *g, bool strip) {
}
}
+static LLVMValueRef get_soft_float_fn(CodeGen *g, const char *name, int param_count, LLVMTypeRef param_type, LLVMTypeRef return_type);
static void render_const_val(CodeGen *g, ZigValue *const_val, const char *name);
static void render_const_val_global(CodeGen *g, ZigValue *const_val, const char *name);
static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *name);
@@ -1740,12 +1741,7 @@ static LLVMValueRef gen_soft_float_widen_or_shorten(CodeGen *g, ZigType *actual_
}
}
- LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, fn_name);
- if (func_ref == nullptr) {
- LLVMTypeRef fn_type = LLVMFunctionType(return_type, &param_type, 1, false);
- func_ref = LLVMAddFunction(g->module, fn_name, fn_type);
- }
-
+ LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, 1, param_type, return_type);
result = LLVMBuildCall2(g->builder, LLVMGlobalGetValueType(func_ref), func_ref, &expr_val, 1, "");
// On non-Arm platforms we need to bitcast __trunc<>fhf2 result back to f16
@@ -1770,9 +1766,12 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, Z
uint64_t wanted_bits;
if (scalar_actual_type->id == ZigTypeIdFloat) {
- if ((scalar_actual_type == g->builtin_types.entry_f80
+ if (((scalar_actual_type == g->builtin_types.entry_f80
|| scalar_wanted_type == g->builtin_types.entry_f80)
- && !target_has_f80(g->zig_target))
+ && !target_has_f80(g->zig_target)) ||
+ ((scalar_actual_type == g->builtin_types.entry_f16
+ || scalar_wanted_type == g->builtin_types.entry_f16)
+ && !target_is_arm(g->zig_target)))
{
return gen_soft_float_widen_or_shorten(g, actual_type, wanted_type, expr_val);
}
@@ -3104,6 +3103,7 @@ static LLVMValueRef gen_float_un_op(CodeGen *g, LLVMValueRef operand, ZigType *o
ZigType *elem_type = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.elem_type : operand_type;
if ((elem_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
(elem_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (elem_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target)) ||
op == BuiltinFnIdTan)
{
return gen_soft_float_un_op(g, operand, operand_type, op);
@@ -3694,7 +3694,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable,
ZigType *operand_type = op1->value->type;
ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ? operand_type->data.vector.elem_type : operand_type;
if ((scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
- (scalar_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target))) {
+ (scalar_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (scalar_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
// LLVM incorrectly lowers the soft float calls for f128 as if they operated on `long double`.
// On some targets this will be incorrect, so we manually lower the call ourselves.
LLVMValueRef op1_value = ir_llvm_value(g, op1);
@@ -4028,7 +4029,8 @@ static LLVMValueRef ir_render_cast(CodeGen *g, Stage1Air *executable,
assert(actual_type->id == ZigTypeIdInt);
{
if ((wanted_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
- (wanted_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target))) {
+ (wanted_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (wanted_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
return gen_soft_int_to_float_op(g, expr_val, actual_type, wanted_type);
} else {
if (actual_type->data.integral.is_signed) {
@@ -4046,7 +4048,8 @@ static LLVMValueRef ir_render_cast(CodeGen *g, Stage1Air *executable,
LLVMValueRef result;
if ((actual_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
- (actual_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target))) {
+ (actual_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (actual_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
result = gen_soft_float_to_int_op(g, expr_val, actual_type, wanted_type);
} else {
if (wanted_type->data.integral.is_signed) {
@@ -4400,7 +4403,8 @@ static LLVMValueRef gen_negation(CodeGen *g, Stage1AirInst *inst, Stage1AirInst
operand_type->data.vector.elem_type : operand_type;
if ((scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
- (scalar_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target))) {
+ (scalar_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (scalar_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
return gen_soft_float_neg(g, operand_type, llvm_operand);
}
@@ -6481,6 +6485,55 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, Stage1Air *executable, Stage1A
return result_loc;
}
+static LLVMValueRef ir_render_reduced_call(CodeGen *g, LLVMValueRef llvm_fn, LLVMValueRef operand_vector, size_t vector_len, LLVMValueRef accum_init, ZigType *accum_ty) {
+ LLVMTypeRef llvm_usize_ty = g->builtin_types.entry_usize->llvm_type;
+ LLVMValueRef llvm_vector_len = LLVMConstInt(llvm_usize_ty, vector_len, false);
+ LLVMTypeRef llvm_result_ty = LLVMTypeOf(accum_init);
+
+ // Allocate and initialize our mutable variables
+ LLVMValueRef i_ptr = build_alloca(g, g->builtin_types.entry_usize, "i", 0);
+ LLVMBuildStore(g->builder, LLVMConstInt(llvm_usize_ty, 0, false), i_ptr);
+ LLVMValueRef accum_ptr = build_alloca(g, accum_ty, "accum", 0);
+ LLVMBuildStore(g->builder, accum_init, accum_ptr);
+
+ // Setup the loop
+ LLVMBasicBlockRef loop = LLVMAppendBasicBlock(g->cur_fn_val, "ReduceLoop");
+ LLVMBasicBlockRef loop_exit = LLVMAppendBasicBlock(g->cur_fn_val, "AfterReduce");
+ LLVMBuildBr(g->builder, loop);
+ {
+ LLVMPositionBuilderAtEnd(g->builder, loop);
+
+ // while (i < vec.len)
+ LLVMValueRef i = LLVMBuildLoad2(g->builder, llvm_usize_ty, i_ptr, "");
+ LLVMValueRef cond = LLVMBuildICmp(g->builder, LLVMIntULT, i, llvm_vector_len, "");
+ LLVMBasicBlockRef loop_then = LLVMAppendBasicBlock(g->cur_fn_val, "ReduceLoopThen");
+
+ LLVMBuildCondBr(g->builder, cond, loop_then, loop_exit);
+
+ {
+ LLVMPositionBuilderAtEnd(g->builder, loop_then);
+
+ // accum = f(accum, vec[i]);
+ LLVMValueRef accum = LLVMBuildLoad2(g->builder, llvm_result_ty, accum_ptr, "");
+ LLVMValueRef element = LLVMBuildExtractElement(g->builder, operand_vector, i, "");
+ LLVMValueRef params[] {
+ accum,
+ element
+ };
+ LLVMValueRef new_accum = LLVMBuildCall2(g->builder, LLVMGlobalGetValueType(llvm_fn), llvm_fn, params, 2, "");
+ LLVMBuildStore(g->builder, new_accum, accum_ptr);
+
+ // i += 1
+ LLVMValueRef new_i = LLVMBuildAdd(g->builder, i, LLVMConstInt(llvm_usize_ty, 1, false), "");
+ LLVMBuildStore(g->builder, new_i, i_ptr);
+ LLVMBuildBr(g->builder, loop);
+ }
+ }
+
+ LLVMPositionBuilderAtEnd(g->builder, loop_exit);
+ return LLVMBuildLoad2(g->builder, llvm_result_ty, accum_ptr, "");
+}
+
static LLVMValueRef ir_render_reduce(CodeGen *g, Stage1Air *executable, Stage1AirInstReduce *instruction) {
LLVMValueRef value = ir_llvm_value(g, instruction->value);
@@ -6488,61 +6541,100 @@ static LLVMValueRef ir_render_reduce(CodeGen *g, Stage1Air *executable, Stage1Ai
assert(value_type->id == ZigTypeIdVector);
ZigType *scalar_type = value_type->data.vector.elem_type;
+ bool float_intrinsics_allowed = true;
+ const char *compiler_rt_type_abbrev = nullptr;
+ const char *math_float_prefix = nullptr;
+ const char *math_float_suffix = nullptr;
+ if ((scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
+ (scalar_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (scalar_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
+ float_intrinsics_allowed = false;
+ compiler_rt_type_abbrev = get_compiler_rt_type_abbrev(scalar_type);
+ math_float_prefix = libc_float_prefix(g, scalar_type);
+ math_float_suffix = libc_float_suffix(g, scalar_type);
+ }
+
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &instruction->base));
- LLVMValueRef result_val;
+ char fn_name[64];
+ ZigValue *init_value = nullptr;
switch (instruction->op) {
case ReduceOp_and:
assert(scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdBool);
- result_val = ZigLLVMBuildAndReduce(g->builder, value);
+ return ZigLLVMBuildAndReduce(g->builder, value);
break;
case ReduceOp_or:
assert(scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdBool);
- result_val = ZigLLVMBuildOrReduce(g->builder, value);
+ return ZigLLVMBuildOrReduce(g->builder, value);
break;
case ReduceOp_xor:
assert(scalar_type->id == ZigTypeIdInt || scalar_type->id == ZigTypeIdBool);
- result_val = ZigLLVMBuildXorReduce(g->builder, value);
+ return ZigLLVMBuildXorReduce(g->builder, value);
break;
case ReduceOp_min: {
if (scalar_type->id == ZigTypeIdInt) {
const bool is_signed = scalar_type->data.integral.is_signed;
- result_val = ZigLLVMBuildIntMinReduce(g->builder, value, is_signed);
+ return ZigLLVMBuildIntMinReduce(g->builder, value, is_signed);
} else if (scalar_type->id == ZigTypeIdFloat) {
- result_val = ZigLLVMBuildFPMinReduce(g->builder, value);
+ if (float_intrinsics_allowed) {
+ return ZigLLVMBuildFPMinReduce(g->builder, value);
+ } else {
+ snprintf(fn_name, sizeof(fn_name), "%sfmin%s", math_float_prefix, math_float_suffix);
+ init_value = create_const_float(g, scalar_type, NAN);
+ }
} else zig_unreachable();
} break;
case ReduceOp_max: {
if (scalar_type->id == ZigTypeIdInt) {
const bool is_signed = scalar_type->data.integral.is_signed;
- result_val = ZigLLVMBuildIntMaxReduce(g->builder, value, is_signed);
+ return ZigLLVMBuildIntMaxReduce(g->builder, value, is_signed);
} else if (scalar_type->id == ZigTypeIdFloat) {
- result_val = ZigLLVMBuildFPMaxReduce(g->builder, value);
+ if (float_intrinsics_allowed) {
+ return ZigLLVMBuildFPMaxReduce(g->builder, value);
+ } else {
+ snprintf(fn_name, sizeof(fn_name), "%sfmax%s", math_float_prefix, math_float_suffix);
+ init_value = create_const_float(g, scalar_type, NAN);
+ }
} else zig_unreachable();
} break;
case ReduceOp_add: {
if (scalar_type->id == ZigTypeIdInt) {
- result_val = ZigLLVMBuildAddReduce(g->builder, value);
+ return ZigLLVMBuildAddReduce(g->builder, value);
} else if (scalar_type->id == ZigTypeIdFloat) {
- LLVMValueRef neutral_value = LLVMConstReal(
- get_llvm_type(g, scalar_type), -0.0);
- result_val = ZigLLVMBuildFPAddReduce(g->builder, neutral_value, value);
+ if (float_intrinsics_allowed) {
+ LLVMValueRef neutral_value = LLVMConstReal(
+ get_llvm_type(g, scalar_type), -0.0);
+ return ZigLLVMBuildFPAddReduce(g->builder, neutral_value, value);
+ } else {
+ snprintf(fn_name, sizeof(fn_name), "__add%sf3", compiler_rt_type_abbrev);
+ init_value = create_const_float(g, scalar_type, 0.0);
+ }
} else zig_unreachable();
} break;
case ReduceOp_mul: {
if (scalar_type->id == ZigTypeIdInt) {
- result_val = ZigLLVMBuildMulReduce(g->builder, value);
+ return ZigLLVMBuildMulReduce(g->builder, value);
} else if (scalar_type->id == ZigTypeIdFloat) {
- LLVMValueRef neutral_value = LLVMConstReal(
- get_llvm_type(g, scalar_type), 1.0);
- result_val = ZigLLVMBuildFPMulReduce(g->builder, neutral_value, value);
+ if (float_intrinsics_allowed) {
+ LLVMValueRef neutral_value = LLVMConstReal(
+ get_llvm_type(g, scalar_type), 1.0);
+ return ZigLLVMBuildFPMulReduce(g->builder, neutral_value, value);
+ } else {
+ snprintf(fn_name, sizeof(fn_name), "__mul%sf3", compiler_rt_type_abbrev);
+ init_value = create_const_float(g, scalar_type, 1.0);
+ }
} else zig_unreachable();
} break;
default:
zig_unreachable();
}
- return result_val;
+
+ LLVMValueRef llvm_init_value = gen_const_val(g, init_value, "");
+ uint32_t vector_len = value_type->data.vector.len;
+ LLVMTypeRef llvm_scalar_type = get_llvm_type(g, scalar_type);
+ const LLVMValueRef llvm_fn = get_soft_float_fn(g, fn_name, 2, llvm_scalar_type, llvm_scalar_type);
+ return ir_render_reduced_call(g, llvm_fn, value, vector_len, llvm_init_value, scalar_type);
}
static LLVMValueRef ir_render_fence(CodeGen *g, Stage1Air *executable, Stage1AirInstFence *instruction) {
@@ -6654,6 +6746,10 @@ static LLVMValueRef ir_render_prefetch(CodeGen *g, Stage1Air *executable, Stage1
switch (g->zig_target->arch) {
case ZigLLVM_x86:
case ZigLLVM_x86_64:
+ case ZigLLVM_ppc:
+ case ZigLLVM_ppcle:
+ case ZigLLVM_ppc64:
+ case ZigLLVM_ppc64le:
return nullptr;
default:
break;
@@ -7378,7 +7474,9 @@ static LLVMValueRef ir_render_soft_mul_add(CodeGen *g, Stage1Air *executable, St
uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0;
const char *fn_name;
- if (float_type == g->builtin_types.entry_f32)
+ if (float_type == g->builtin_types.entry_f16)
+ fn_name = "__fmah";
+ else if (float_type == g->builtin_types.entry_f32)
fn_name = "fmaf";
else if (float_type == g->builtin_types.entry_f64)
fn_name = "fma";
@@ -7389,13 +7487,8 @@ static LLVMValueRef ir_render_soft_mul_add(CodeGen *g, Stage1Air *executable, St
else
zig_unreachable();
- LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, fn_name);
- if (func_ref == nullptr) {
- LLVMTypeRef float_type_ref = float_type->llvm_type;
- LLVMTypeRef params[3] = { float_type_ref, float_type_ref, float_type_ref };
- LLVMTypeRef fn_type = LLVMFunctionType(float_type_ref, params, 3, false);
- func_ref = LLVMAddFunction(g->module, fn_name, fn_type);
- }
+ LLVMTypeRef float_type_ref = float_type->llvm_type;
+ LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, 3, float_type_ref, float_type_ref);
LLVMValueRef op1 = ir_llvm_value(g, instruction->op1);
LLVMValueRef op2 = ir_llvm_value(g, instruction->op2);
@@ -7425,7 +7518,8 @@ static LLVMValueRef ir_render_mul_add(CodeGen *g, Stage1Air *executable, Stage1A
ZigType *operand_type = instruction->op1->value->type;
operand_type = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.elem_type : operand_type;
if ((operand_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) ||
- (operand_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target))) {
+ (operand_type == g->builtin_types.entry_f128 && !target_long_double_is_f128(g->zig_target)) ||
+ (operand_type == g->builtin_types.entry_f16 && !target_is_arm(g->zig_target))) {
return ir_render_soft_mul_add(g, executable, instruction, operand_type);
}
LLVMValueRef op1 = ir_llvm_value(g, instruction->op1);
@@ -9744,7 +9838,12 @@ static void define_builtin_types(CodeGen *g) {
}
}
- add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16);
+ if (target_is_arm(g->zig_target)) {
+ add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16);
+ } else {
+ ZigType *u16_ty = get_int_type(g, false, 16);
+ add_fp_entry(g, "f16", 16, get_llvm_type(g, u16_ty), &g->builtin_types.entry_f16);
+ }
add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32);
add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
@@ -9841,6 +9940,7 @@ static void define_builtin_types(CodeGen *g) {
add_fp_entry(g, "c_longdouble", 128, LLVMFP128Type(), &g->builtin_types.entry_c_longdouble);
break;
case ZigLLVM_ppc:
+ case ZigLLVM_ppcle:
case ZigLLVM_ppc64:
case ZigLLVM_ppc64le:
add_fp_entry(g, "c_longdouble", 128, LLVMFP128Type(), &g->builtin_types.entry_c_longdouble);
diff --git a/src/stage1/config.h.in b/src/stage1/config.h.in
index 2be0839996..8d1e688cbe 100644
--- a/src/stage1/config.h.in
+++ b/src/stage1/config.h.in
@@ -22,6 +22,8 @@
#define ZIG_LLD_INCLUDE_PATH "@LLD_INCLUDE_DIRS@"
#define ZIG_LLD_LIBRARIES "@LLD_LIBRARIES@"
#define ZIG_CLANG_LIBRARIES "@CLANG_LIBRARIES@"
+#define ZIG_LLVM_INCLUDE_PATH "@LLVM_INCLUDE_DIRS@"
+#define ZIG_LLVM_LIB_PATH "@LLVM_LIBDIRS@"
#define ZIG_LLVM_LIBRARIES "@LLVM_LIBRARIES@"
#define ZIG_DIA_GUIDS_LIB "@ZIG_DIA_GUIDS_LIB_ESCAPED@"
diff --git a/src/stage1/softfloat.hpp b/src/stage1/softfloat.hpp
index a0d270d55f..b9d886d311 100644
--- a/src/stage1/softfloat.hpp
+++ b/src/stage1/softfloat.hpp
@@ -21,6 +21,20 @@ static inline float16_t zig_double_to_f16(double x) {
return f64_to_f16(y);
}
+static inline void zig_double_to_extF80M(double x, extFloat80_t *result) {
+ float64_t y;
+ static_assert(sizeof(x) == sizeof(y), "");
+ memcpy(&y, &x, sizeof(x));
+ f64_to_extF80M(y, result);
+}
+
+static inline void zig_double_to_f128M(double x, float128_t *result) {
+ float64_t y;
+ static_assert(sizeof(x) == sizeof(y), "");
+ memcpy(&y, &x, sizeof(x));
+ f64_to_f128M(y, result);
+}
+
// Return value is safe to coerce to float even when |x| is NaN or Infinity.
static inline double zig_f16_to_double(float16_t x) {
diff --git a/src/stage1/target.cpp b/src/stage1/target.cpp
index 3031b7e588..dfd91bed8a 100644
--- a/src/stage1/target.cpp
+++ b/src/stage1/target.cpp
@@ -950,7 +950,6 @@ bool target_is_arm(const ZigTarget *target) {
case ZigLLVM_msp430:
case ZigLLVM_nvptx:
case ZigLLVM_nvptx64:
- case ZigLLVM_ppc64le:
case ZigLLVM_r600:
case ZigLLVM_renderscript32:
case ZigLLVM_renderscript64:
@@ -971,6 +970,7 @@ bool target_is_arm(const ZigTarget *target) {
case ZigLLVM_ppc:
case ZigLLVM_ppcle:
case ZigLLVM_ppc64:
+ case ZigLLVM_ppc64le:
case ZigLLVM_ve:
case ZigLLVM_spirv32:
case ZigLLVM_spirv64:
@@ -1125,8 +1125,8 @@ bool target_is_mips(const ZigTarget *target) {
}
bool target_is_ppc(const ZigTarget *target) {
- return target->arch == ZigLLVM_ppc || target->arch == ZigLLVM_ppc64 ||
- target->arch == ZigLLVM_ppc64le;
+ return target->arch == ZigLLVM_ppc || target->arch == ZigLLVM_ppcle ||
+ target->arch == ZigLLVM_ppc64 || target->arch == ZigLLVM_ppc64le;
}
// Returns the minimum alignment for every function pointer on the given
diff --git a/src/type.zig b/src/type.zig
index c1c8054e26..3b46546df0 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2898,12 +2898,30 @@ pub const Type = extern union {
.c_uint => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
.c_long => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
.c_ulong => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
- .c_longlong => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
- .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
+ .c_longlong => switch (target.cpu.arch) {
+ .i386 => switch (target.os.tag) {
+ .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
+ else => return AbiAlignmentAdvanced{ .scalar = 4 },
+ },
+ else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
+ },
+ .c_ulonglong => switch (target.cpu.arch) {
+ .i386 => switch (target.os.tag) {
+ .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
+ else => return AbiAlignmentAdvanced{ .scalar = 4 },
+ },
+ else => return AbiAlignmentAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
+ },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
.f32 => return AbiAlignmentAdvanced{ .scalar = 4 },
- .f64 => return AbiAlignmentAdvanced{ .scalar = 8 },
+ .f64 => switch (target.cpu.arch) {
+ .i386 => switch (target.os.tag) {
+ .windows, .uefi => return AbiAlignmentAdvanced{ .scalar = 8 },
+ else => return AbiAlignmentAdvanced{ .scalar = 4 },
+ },
+ else => return AbiAlignmentAdvanced{ .scalar = 8 },
+ },
.f128 => return AbiAlignmentAdvanced{ .scalar = 16 },
.f80 => switch (target.cpu.arch) {
@@ -2922,7 +2940,10 @@ pub const Type = extern union {
16 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f16, target) },
32 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f32, target) },
64 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f64, target) },
- 80 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f80, target) },
+ 80 => if (target.cpu.arch == .i386 and target.isMinGW())
+ return AbiAlignmentAdvanced{ .scalar = 4 }
+ else
+ return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f80, target) },
128 => return AbiAlignmentAdvanced{ .scalar = abiAlignment(Type.f128, target) },
else => unreachable,
},
@@ -6643,7 +6664,11 @@ pub const CType = enum {
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong => return 64,
.longdouble => switch (target.cpu.arch) {
- .i386, .x86_64 => return 80,
+ .i386 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+ .x86_64 => return 80,
.riscv64,
.aarch64,
@@ -6693,7 +6718,11 @@ pub const CType = enum {
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong => return 64,
.longdouble => switch (target.cpu.arch) {
- .i386, .x86_64 => return 80,
+ .i386 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+ .x86_64 => return 80,
.riscv64,
.aarch64,
@@ -6721,7 +6750,18 @@ pub const CType = enum {
.windows, .uefi => switch (self) {
.short, .ushort => return 16,
.int, .uint, .long, .ulong => return 32,
- .longlong, .ulonglong, .longdouble => return 64,
+ .longlong, .ulonglong => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .i386 => switch (target.abi) {
+ .gnu => return 80,
+ else => return 64,
+ },
+ .x86_64 => switch (target.abi) {
+ .gnu => return 80,
+ else => return 64,
+ },
+ else => return 64,
+ },
},
.macos, .ios, .tvos, .watchos => switch (self) {
diff --git a/src/value.zig b/src/value.zig
index 01df65a715..7a0636dda0 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -1999,6 +1999,11 @@ pub const Value = extern union {
}
return true;
},
+ .float_16 => if (std.math.isNan(lhs.castTag(.float_16).?.data)) return op != .neq,
+ .float_32 => if (std.math.isNan(lhs.castTag(.float_32).?.data)) return op != .neq,
+ .float_64 => if (std.math.isNan(lhs.castTag(.float_64).?.data)) return op != .neq,
+ .float_80 => if (std.math.isNan(lhs.castTag(.float_80).?.data)) return op != .neq,
+ .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op != .neq,
else => {},
}
return (try orderAgainstZeroAdvanced(lhs, sema_kit)).compare(op);
@@ -3596,6 +3601,18 @@ pub const Value = extern union {
};
}
+ /// Returns true if the value is a floating point type and is infinite. Returns false otherwise.
+ pub fn isInf(val: Value) bool {
+ return switch (val.tag()) {
+ .float_16 => std.math.isInf(val.castTag(.float_16).?.data),
+ .float_32 => std.math.isInf(val.castTag(.float_32).?.data),
+ .float_64 => std.math.isInf(val.castTag(.float_64).?.data),
+ .float_80 => std.math.isInf(val.castTag(.float_80).?.data),
+ .float_128 => std.math.isInf(val.castTag(.float_128).?.data),
+ else => false,
+ };
+ }
+
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
diff --git a/test/behavior.zig b/test/behavior.zig
index 468ecd034c..45b607692b 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -89,16 +89,17 @@ test {
_ = @import("behavior/bugs/12551.zig");
_ = @import("behavior/bugs/12644.zig");
_ = @import("behavior/bugs/12680.zig");
- _ = @import("behavior/bugs/12776.zig");
_ = @import("behavior/bugs/12786.zig");
_ = @import("behavior/bugs/12794.zig");
_ = @import("behavior/bugs/12801-1.zig");
_ = @import("behavior/bugs/12801-2.zig");
_ = @import("behavior/bugs/12885.zig");
_ = @import("behavior/bugs/12890.zig");
+ _ = @import("behavior/bugs/12891.zig");
_ = @import("behavior/bugs/12911.zig");
_ = @import("behavior/bugs/12928.zig");
_ = @import("behavior/bugs/12945.zig");
+ _ = @import("behavior/bugs/12972.zig");
_ = @import("behavior/bugs/12984.zig");
_ = @import("behavior/bugs/13068.zig");
_ = @import("behavior/bugs/13128.zig");
@@ -186,6 +187,8 @@ test {
_ = @import("behavior/packed_struct_explicit_backing_int.zig");
_ = @import("behavior/empty_union.zig");
_ = @import("behavior/inline_switch.zig");
+ _ = @import("behavior/bugs/12723.zig");
+ _ = @import("behavior/bugs/12776.zig");
}
if (builtin.os.tag != .wasi) {
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index a131cc8df7..2ebdda341a 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -566,6 +566,8 @@ test "@alignCast null" {
}
test "alignment of slice element" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
const a: []align(1024) const u8 = undefined;
try expect(@TypeOf(&a[0]) == *align(1024) const u8);
}
diff --git a/test/behavior/bugs/11816.zig b/test/behavior/bugs/11816.zig
index 639212e098..5b6c9bd319 100644
--- a/test/behavior/bugs/11816.zig
+++ b/test/behavior/bugs/11816.zig
@@ -3,6 +3,7 @@ const builtin = @import("builtin");
test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var x: u32 = 3;
const val: usize = while (true) switch (x) {
diff --git a/test/behavior/bugs/12723.zig b/test/behavior/bugs/12723.zig
new file mode 100644
index 0000000000..6768444545
--- /dev/null
+++ b/test/behavior/bugs/12723.zig
@@ -0,0 +1,11 @@
+const expect = @import("std").testing.expect;
+
+// This test causes a compile error on stage1 regardless of whether
+// the body of the test is comptime-gated or not. To workaround this,
+// we gate the inclusion of the test file.
+test "Non-exhaustive enum backed by comptime_int" {
+ const E = enum(comptime_int) { a, b, c, _ };
+ comptime var e: E = .a;
+ e = @intToEnum(E, 378089457309184723749);
+ try expect(@enumToInt(e) == 378089457309184723749);
+}
diff --git a/test/behavior/bugs/12801-1.zig b/test/behavior/bugs/12801-1.zig
index ff94382d1f..b1f565e47f 100644
--- a/test/behavior/bugs/12801-1.zig
+++ b/test/behavior/bugs/12801-1.zig
@@ -8,6 +8,7 @@ fn capacity_() u64 {
test {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try std.testing.expect((@This(){}).capacity() == 64);
}
diff --git a/test/behavior/bugs/12801-2.zig b/test/behavior/bugs/12801-2.zig
index f98fcfbcff..298e4f96c1 100644
--- a/test/behavior/bugs/12801-2.zig
+++ b/test/behavior/bugs/12801-2.zig
@@ -14,6 +14,7 @@ const Auto = struct {
}
};
test {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
diff --git a/test/behavior/bugs/12891.zig b/test/behavior/bugs/12891.zig
new file mode 100644
index 0000000000..97126bde4a
--- /dev/null
+++ b/test/behavior/bugs/12891.zig
@@ -0,0 +1,20 @@
+const std = @import("std");
+const builtin = @import("builtin");
+
+test "issue12891" {
+ const f = 10.0;
+ var i: usize = 0;
+ try std.testing.expect(i < f);
+}
+test "nan" {
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ const f = comptime std.math.nan(f64);
+ var i: usize = 0;
+ try std.testing.expect(!(f < i));
+}
+test "inf" {
+ const f = comptime std.math.inf(f64);
+ var i: usize = 0;
+ try std.testing.expect(f > i);
+}
diff --git a/test/behavior/bugs/12972.zig b/test/behavior/bugs/12972.zig
new file mode 100644
index 0000000000..0e01782705
--- /dev/null
+++ b/test/behavior/bugs/12972.zig
@@ -0,0 +1,17 @@
+const builtin = @import("builtin");
+
+pub fn f(_: [:null]const ?u8) void {}
+
+test {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+
+ const c: u8 = 42;
+ f(&[_:null]?u8{c});
+ f(&.{c});
+
+ var v: u8 = 42;
+ f(&[_:null]?u8{v});
+ f(&.{v});
+}
diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig
index 114090c78e..5cb89ffa32 100644
--- a/test/behavior/enum.zig
+++ b/test/behavior/enum.zig
@@ -1169,10 +1169,3 @@ test "Non-exhaustive enum with nonstandard int size behaves correctly" {
const E = enum(u15) { _ };
try expect(@sizeOf(E) == @sizeOf(u15));
}
-
-test "Non-exhaustive enum backed by comptime_int" {
- const E = enum(comptime_int) { a, b, c, _ };
- comptime var e: E = .a;
- e = @intToEnum(E, 378089457309184723749);
- try expect(@enumToInt(e) == 378089457309184723749);
-}
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index da93ebc831..c2d3162919 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -1339,6 +1339,8 @@ test "lazy value is resolved as slice operand" {
}
test "break from inline loop depends on runtime condition" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
+
const S = struct {
fn foo(a: u8) bool {
return a == 4;
diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig
index 861b786a56..1ce5ffb1e7 100644
--- a/test/behavior/muladd.zig
+++ b/test/behavior/muladd.zig
@@ -71,17 +71,6 @@ test "@mulAdd f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.os.tag == .macos and builtin.cpu.arch == .aarch64) {
- // https://github.com/ziglang/zig/issues/9900
- return error.SkipZigTest;
- }
-
- if (builtin.zig_backend == .stage1 and
- builtin.cpu.arch == .i386 and builtin.os.tag == .linux)
- {
- return error.SkipZigTest;
- }
-
comptime try testMulAdd128();
try testMulAdd128();
}
diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig
index 46adee083d..5a878112b5 100644
--- a/test/behavior/packed-struct.zig
+++ b/test/behavior/packed-struct.zig
@@ -585,6 +585,7 @@ test "runtime init of unnamed packed struct type" {
}
test "packed struct passed to callconv(.C) function" {
+ if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig
index 7205c53c46..80fa2021d8 100644
--- a/test/behavior/vector.zig
+++ b/test/behavior/vector.zig
@@ -506,18 +506,12 @@ test "vector division operators" {
}
fn doTheTest() !void {
- // https://github.com/ziglang/zig/issues/4952
- if (builtin.target.os.tag != .windows) {
- try doTheTestDiv(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, -1.0, -2.0 });
- }
+ try doTheTestDiv(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, -1.0, -2.0 });
try doTheTestDiv(f32, [4]f32{ 4.0, -4.0, 4.0, -4.0 }, [4]f32{ 1.0, 2.0, -1.0, -2.0 });
try doTheTestDiv(f64, [4]f64{ 4.0, -4.0, 4.0, -4.0 }, [4]f64{ 1.0, 2.0, -1.0, -2.0 });
- // https://github.com/ziglang/zig/issues/4952
- if (builtin.target.os.tag != .windows) {
- try doTheTestMod(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, 0.5, 3.0 });
- }
+ try doTheTestMod(f16, [4]f16{ 4.0, -4.0, 4.0, -4.0 }, [4]f16{ 1.0, 2.0, 0.5, 3.0 });
try doTheTestMod(f32, [4]f32{ 4.0, -4.0, 4.0, -4.0 }, [4]f32{ 1.0, 2.0, 0.5, 3.0 });
try doTheTestMod(f64, [4]f64{ 4.0, -4.0, 4.0, -4.0 }, [4]f64{ 1.0, 2.0, 0.5, 3.0 });
diff --git a/test/standalone.zig b/test/standalone.zig
index d3dedb59e6..9c600dfee5 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -53,6 +53,9 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
if (builtin.cpu.arch.isAARCH64() and builtin.zig_backend == .stage2_llvm) {
cases.addBuildFile("test/c_abi/build.zig", .{});
}
+ if (builtin.cpu.arch == .i386 and builtin.zig_backend == .stage2_llvm) {
+ cases.addBuildFile("test/c_abi/build.zig", .{});
+ }
// C ABI tests only pass for the Wasm target when using stage2
cases.addBuildFile("test/c_abi/build_wasm.zig", .{
.requires_stage2 = true,
@@ -99,4 +102,5 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
//cases.add("tools/update_spirv_features.zig");
cases.addBuildFile("test/standalone/issue_13030/build.zig", .{ .build_modes = true });
+ cases.addBuildFile("test/standalone/emit_asm_and_bin/build.zig", .{});
}
diff --git a/test/standalone/emit_asm_and_bin/build.zig b/test/standalone/emit_asm_and_bin/build.zig
new file mode 100644
index 0000000000..43b7bb791d
--- /dev/null
+++ b/test/standalone/emit_asm_and_bin/build.zig
@@ -0,0 +1,11 @@
+const Builder = @import("std").build.Builder;
+
+pub fn build(b: *Builder) void {
+ const main = b.addTest("main.zig");
+ main.setBuildMode(b.standardReleaseOptions());
+ main.emit_asm = .{ .emit_to = b.pathFromRoot("main.s") };
+ main.emit_bin = .{ .emit_to = b.pathFromRoot("main") };
+
+ const test_step = b.step("test", "Run test");
+ test_step.dependOn(&main.step);
+}
diff --git a/test/standalone/emit_asm_and_bin/main.zig b/test/standalone/emit_asm_and_bin/main.zig
new file mode 100644
index 0000000000..902b554db0
--- /dev/null
+++ b/test/standalone/emit_asm_and_bin/main.zig
@@ -0,0 +1 @@
+pub fn main() void {}
diff --git a/test/tests.zig b/test/tests.zig
index 53e58156a4..aef549d4f9 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -317,6 +317,30 @@ const test_targets = blk: {
.{
.target = .{
+ .cpu_arch = .powerpc64le,
+ .os_tag = .linux,
+ .abi = .none,
+ },
+ },
+ .{
+ .target = .{
+ .cpu_arch = .powerpc64le,
+ .os_tag = .linux,
+ .abi = .musl,
+ },
+ .link_libc = true,
+ },
+ .{
+ .target = .{
+ .cpu_arch = .powerpc64le,
+ .os_tag = .linux,
+ .abi = .gnu,
+ },
+ .link_libc = true,
+ },
+
+ .{
+ .target = .{
.cpu_arch = .riscv64,
.os_tag = .linux,
.abi = .none,
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index 92e0757ac7..4f76453967 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -492,6 +492,14 @@ const known_options = [_]KnownOpt{
.name = "compress-debug-sections=",
.ident = "compress_debug_sections",
},
+ .{
+ .name = "dynamic",
+ .ident = "dynamic",
+ },
+ .{
+ .name = "static",
+ .ident = "static",
+ },
};
const blacklisted_options = [_][]const u8{};
@@ -798,7 +806,7 @@ fn objSyntax(obj: *json.ObjectMap) ?Syntax {
} else if (std.mem.eql(u8, superclass, "CLRemainingArgsJoined")) {
return .remaining_args_joined;
} else if (std.mem.eql(u8, superclass, "MultiArg")) {
- return .{ .multi_arg = num_args };
+ return Syntax{ .multi_arg = num_args };
}
}
const name = obj.get("Name").?.String;