aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2019-12-06 15:49:47 -0500
committerGitHub <noreply@github.com>2019-12-06 15:49:47 -0500
commit525b1e8fb4abc38143a6ae47272fd5d016ba7eeb (patch)
treeee16b05de3828936971df6d977e75d5825b10547 /lib
parentd28aa38db71a861fd2036efbb22e57c1d34a5615 (diff)
parent656cc33f8d49cb5e79cd3f9f8f56963747d43ed6 (diff)
downloadzig-525b1e8fb4abc38143a6ae47272fd5d016ba7eeb.tar.gz
zig-525b1e8fb4abc38143a6ae47272fd5d016ba7eeb.zip
Merge pull request #3856 from ziglang/builtin-call
introduce `@call` and remove other builtin calls
Diffstat (limited to 'lib')
-rw-r--r--lib/std/builtin.zig38
-rw-r--r--lib/std/hash/auto_hash.zig4
-rw-r--r--lib/std/hash/cityhash.zig15
-rw-r--r--lib/std/hash/murmur.zig18
-rw-r--r--lib/std/hash/siphash.zig19
-rw-r--r--lib/std/hash/wyhash.zig6
-rw-r--r--lib/std/math/big/int.zig14
-rw-r--r--lib/std/os/linux.zig2
-rw-r--r--lib/std/special/compiler_rt/arm/aeabi_dcmp.zig10
-rw-r--r--lib/std/special/compiler_rt/arm/aeabi_fcmp.zig10
-rw-r--r--lib/std/special/compiler_rt/divti3.zig5
-rw-r--r--lib/std/special/compiler_rt/extendXfYf2.zig8
-rw-r--r--lib/std/special/compiler_rt/floatsiXf.zig6
-rw-r--r--lib/std/special/compiler_rt/modti3.zig5
-rw-r--r--lib/std/special/compiler_rt/multi3.zig5
-rw-r--r--lib/std/special/compiler_rt/stack_probe.zig12
-rw-r--r--lib/std/special/compiler_rt/umodti3.zig5
-rw-r--r--lib/std/special/start.zig14
18 files changed, 133 insertions, 63 deletions
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 89acb0df60..35188b61e3 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -372,6 +372,44 @@ pub const Version = struct {
patch: u32,
};
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const CallOptions = struct {
+ modifier: Modifier = .auto,
+ stack: ?[]align(std.Target.stack_align) u8 = null,
+
+ pub const Modifier = enum {
+ /// Equivalent to function call syntax.
+ auto,
+
+ /// Prevents tail call optimization. This guarantees that the return
+ /// address will point to the callsite, as opposed to the callsite's
+ /// callsite. If the call is otherwise required to be tail-called
+ /// or inlined, a compile error is emitted instead.
+ never_tail,
+
+ /// Guarantees that the call will not be inlined. If the call is
+ /// otherwise required to be inlined, a compile error is emitted instead.
+ never_inline,
+
+ /// Asserts that the function call will not suspend. This allows a
+ /// non-async function to call an async function.
+ no_async,
+
+ /// Guarantees that the call will be generated with tail call optimization.
+ /// If this is not possible, a compile error is emitted instead.
+ always_tail,
+
+ /// Guarantees that the call will inlined at the callsite.
+ /// If this is not possible, a compile error is emitted instead.
+ always_inline,
+
+ /// Evaluates the call at compile-time. If the call cannot be completed at
+ /// compile-time, a compile error is emitted instead.
+ compile_time,
+ };
+};
+
/// This function type is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn;
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index bd5479d093..07ee1d3d00 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -92,7 +92,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
// Help the optimizer see that hashing an int is easy by inlining!
// TODO Check if the situation is better after #561 is resolved.
- .Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
+ .Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
@@ -101,7 +101,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
.ErrorSet => hash(hasher, @errorToInt(key), strat),
.AnyFrame, .Fn => hash(hasher, @ptrToInt(key), strat),
- .Pointer => @inlineCall(hashPointer, hasher, key, strat),
+ .Pointer => @call(.{ .modifier = .always_inline }, hashPointer, .{ hasher, key, strat }),
.Optional => if (key) |k| hash(hasher, k, strat),
diff --git a/lib/std/hash/cityhash.zig b/lib/std/hash/cityhash.zig
index 5038c3758e..0f78140c9d 100644
--- a/lib/std/hash/cityhash.zig
+++ b/lib/std/hash/cityhash.zig
@@ -197,7 +197,7 @@ pub const CityHash64 = struct {
}
fn hashLen16(u: u64, v: u64) u64 {
- return @inlineCall(hash128To64, u, v);
+ return @call(.{ .modifier = .always_inline }, hash128To64, .{ u, v });
}
fn hashLen16Mul(low: u64, high: u64, mul: u64) u64 {
@@ -210,7 +210,7 @@ pub const CityHash64 = struct {
}
fn hash128To64(low: u64, high: u64) u64 {
- return @inlineCall(hashLen16Mul, low, high, 0x9ddfea08eb382d69);
+ return @call(.{ .modifier = .always_inline }, hashLen16Mul, .{ low, high, 0x9ddfea08eb382d69 });
}
fn hashLen0To16(str: []const u8) u64 {
@@ -291,7 +291,14 @@ pub const CityHash64 = struct {
}
fn weakHashLen32WithSeeds(ptr: [*]const u8, a: u64, b: u64) WeakPair {
- return @inlineCall(weakHashLen32WithSeedsHelper, fetch64(ptr), fetch64(ptr + 8), fetch64(ptr + 16), fetch64(ptr + 24), a, b);
+ return @call(.{ .modifier = .always_inline }, weakHashLen32WithSeedsHelper, .{
+ fetch64(ptr),
+ fetch64(ptr + 8),
+ fetch64(ptr + 16),
+ fetch64(ptr + 24),
+ a,
+ b,
+ });
}
pub fn hash(str: []const u8) u64 {
@@ -339,7 +346,7 @@ pub const CityHash64 = struct {
}
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
- return @inlineCall(Self.hashWithSeeds, str, k2, seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeeds, .{ str, k2, seed });
}
pub fn hashWithSeeds(str: []const u8, seed0: u64, seed1: u64) u64 {
diff --git a/lib/std/hash/murmur.zig b/lib/std/hash/murmur.zig
index d3379a81f7..0163927010 100644
--- a/lib/std/hash/murmur.zig
+++ b/lib/std/hash/murmur.zig
@@ -8,7 +8,7 @@ pub const Murmur2_32 = struct {
const Self = @This();
pub fn hash(str: []const u8) u32 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
@@ -44,7 +44,7 @@ pub const Murmur2_32 = struct {
}
pub fn hashUint32(v: u32) u32 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
@@ -64,7 +64,7 @@ pub const Murmur2_32 = struct {
}
pub fn hashUint64(v: u64) u32 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
@@ -93,7 +93,7 @@ pub const Murmur2_64 = struct {
const Self = @This();
pub fn hash(str: []const u8) u64 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
@@ -127,7 +127,7 @@ pub const Murmur2_64 = struct {
}
pub fn hashUint32(v: u32) u64 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u64 {
@@ -144,7 +144,7 @@ pub const Murmur2_64 = struct {
}
pub fn hashUint64(v: u64) u64 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u64 {
@@ -172,7 +172,7 @@ pub const Murmur3_32 = struct {
}
pub fn hash(str: []const u8) u32 {
- return @inlineCall(Self.hashWithSeed, str, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
}
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
@@ -220,7 +220,7 @@ pub const Murmur3_32 = struct {
}
pub fn hashUint32(v: u32) u32 {
- return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
}
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
@@ -246,7 +246,7 @@ pub const Murmur3_32 = struct {
}
pub fn hashUint64(v: u64) u32 {
- return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
+ return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
}
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
diff --git a/lib/std/hash/siphash.zig b/lib/std/hash/siphash.zig
index 6b4cc2b16b..ccef47c4b2 100644
--- a/lib/std/hash/siphash.zig
+++ b/lib/std/hash/siphash.zig
@@ -11,7 +11,7 @@ const testing = std.testing;
const math = std.math;
const mem = std.mem;
-const Endian = @import("builtin").Endian;
+const Endian = std.builtin.Endian;
pub fn SipHash64(comptime c_rounds: usize, comptime d_rounds: usize) type {
return SipHash(u64, c_rounds, d_rounds);
@@ -62,7 +62,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
var off: usize = 0;
while (off < b.len) : (off += 8) {
- @inlineCall(self.round, b[off .. off + 8]);
+ @call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 8]});
}
self.msg_len +%= @truncate(u8, b.len);
@@ -84,9 +84,12 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
self.v2 ^= 0xff;
}
+ // TODO this is a workaround, should be able to supply the value without a separate variable
+ const inl = std.builtin.CallOptions{ .modifier = .always_inline };
+
comptime var i: usize = 0;
inline while (i < d_rounds) : (i += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
const b1 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
@@ -98,7 +101,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
comptime var j: usize = 0;
inline while (j < d_rounds) : (j += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
@@ -111,9 +114,11 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
const m = mem.readIntSliceLittle(u64, b[0..]);
self.v3 ^= m;
+ // TODO this is a workaround, should be able to supply the value without a separate variable
+ const inl = std.builtin.CallOptions{ .modifier = .always_inline };
comptime var i: usize = 0;
inline while (i < c_rounds) : (i += 1) {
- @inlineCall(sipRound, self);
+ @call(inl, sipRound, .{self});
}
self.v0 ^= m;
@@ -140,8 +145,8 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
const aligned_len = input.len - (input.len % 8);
var c = Self.init(key);
- @inlineCall(c.update, input[0..aligned_len]);
- return @inlineCall(c.final, input[aligned_len..]);
+ @call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
+ return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
}
diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig
index 7e35ccc6d2..8d11c700cf 100644
--- a/lib/std/hash/wyhash.zig
+++ b/lib/std/hash/wyhash.zig
@@ -65,7 +65,7 @@ const WyhashStateless = struct {
var off: usize = 0;
while (off < b.len) : (off += 32) {
- @inlineCall(self.round, b[off .. off + 32]);
+ @call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
}
self.msg_len += b.len;
@@ -121,8 +121,8 @@ const WyhashStateless = struct {
const aligned_len = input.len - (input.len % 32);
var c = WyhashStateless.init(seed);
- @inlineCall(c.update, input[0..aligned_len]);
- return @inlineCall(c.final, input[aligned_len..]);
+ @call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
+ return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 0459b0b158..5c84dc462b 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -811,7 +811,7 @@ pub const Int = struct {
var j: usize = 0;
while (j < a_lo.len) : (j += 1) {
- a_lo[j] = @inlineCall(addMulLimbWithCarry, a_lo[j], y[j], xi, &carry);
+ a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry });
}
j = 0;
@@ -1214,7 +1214,11 @@ pub const Int = struct {
const dst_i = src_i + limb_shift;
const src_digit = a[src_i];
- r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
+ Limb,
+ src_digit,
+ Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ });
carry = (src_digit << interior_limb_shift);
}
@@ -1254,7 +1258,11 @@ pub const Int = struct {
const src_digit = a[src_i];
r[dst_i] = carry | (src_digit >> interior_limb_shift);
- carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
+ carry = @call(.{ .modifier = .always_inline }, math.shl, .{
+ Limb,
+ src_digit,
+ Limb.bit_count - @intCast(Limb, interior_limb_shift),
+ });
}
}
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index e3d84e1e63..907fd24db1 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -94,7 +94,7 @@ pub fn fork() usize {
/// the compiler is not aware of how vfork affects control flow and you may
/// see different results in optimized builds.
pub inline fn vfork() usize {
- return @inlineCall(syscall0, SYS_vfork);
+ return @call(.{ .modifier = .always_inline }, syscall0, .{SYS_vfork});
}
pub fn futimens(fd: i32, times: *const [2]timespec) usize {
diff --git a/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig b/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
index 33bfdabcfb..7463c49931 100644
--- a/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
+++ b/lib/std/special/compiler_rt/arm/aeabi_dcmp.zig
@@ -14,31 +14,31 @@ const ConditionalOperator = enum {
pub nakedcc fn __aeabi_dcmpeq() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Eq);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Eq});
unreachable;
}
pub nakedcc fn __aeabi_dcmplt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Lt);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Lt});
unreachable;
}
pub nakedcc fn __aeabi_dcmple() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Le);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Le});
unreachable;
}
pub nakedcc fn __aeabi_dcmpge() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Ge);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Ge});
unreachable;
}
pub nakedcc fn __aeabi_dcmpgt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_dcmp, .Gt);
+ @call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Gt});
unreachable;
}
diff --git a/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig b/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
index cc5efc64fc..9a24641d9a 100644
--- a/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
+++ b/lib/std/special/compiler_rt/arm/aeabi_fcmp.zig
@@ -14,31 +14,31 @@ const ConditionalOperator = enum {
pub nakedcc fn __aeabi_fcmpeq() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Eq);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Eq});
unreachable;
}
pub nakedcc fn __aeabi_fcmplt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Lt);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Lt});
unreachable;
}
pub nakedcc fn __aeabi_fcmple() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Le);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Le});
unreachable;
}
pub nakedcc fn __aeabi_fcmpge() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Ge);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Ge});
unreachable;
}
pub nakedcc fn __aeabi_fcmpgt() noreturn {
@setRuntimeSafety(false);
- @inlineCall(aeabi_fcmp, .Gt);
+ @call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Gt});
unreachable;
}
diff --git a/lib/std/special/compiler_rt/divti3.zig b/lib/std/special/compiler_rt/divti3.zig
index 477ce2cb98..fcb23a50d9 100644
--- a/lib/std/special/compiler_rt/divti3.zig
+++ b/lib/std/special/compiler_rt/divti3.zig
@@ -17,7 +17,10 @@ pub extern fn __divti3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __divti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__divti3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
test "import divti3" {
diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/std/special/compiler_rt/extendXfYf2.zig
index 3bdc5164e2..427bd4ec24 100644
--- a/lib/std/special/compiler_rt/extendXfYf2.zig
+++ b/lib/std/special/compiler_rt/extendXfYf2.zig
@@ -3,19 +3,19 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
pub extern fn __extendsfdf2(a: f32) f64 {
- return @inlineCall(extendXfYf2, f64, f32, @bitCast(u32, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
}
pub extern fn __extenddftf2(a: f64) f128 {
- return @inlineCall(extendXfYf2, f128, f64, @bitCast(u64, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
}
pub extern fn __extendsftf2(a: f32) f128 {
- return @inlineCall(extendXfYf2, f128, f32, @bitCast(u32, a));
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
}
pub extern fn __extendhfsf2(a: u16) f32 {
- return @inlineCall(extendXfYf2, f32, f16, a);
+ return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
}
const CHAR_BIT = 8;
diff --git a/lib/std/special/compiler_rt/floatsiXf.zig b/lib/std/special/compiler_rt/floatsiXf.zig
index 714681834d..917dfb47fc 100644
--- a/lib/std/special/compiler_rt/floatsiXf.zig
+++ b/lib/std/special/compiler_rt/floatsiXf.zig
@@ -55,17 +55,17 @@ fn floatsiXf(comptime T: type, a: i32) T {
pub extern fn __floatsisf(arg: i32) f32 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f32, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
}
pub extern fn __floatsidf(arg: i32) f64 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f64, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
}
pub extern fn __floatsitf(arg: i32) f128 {
@setRuntimeSafety(builtin.is_test);
- return @inlineCall(floatsiXf, f128, arg);
+ return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
}
fn test_one_floatsitf(a: i32, expected: u128) void {
diff --git a/lib/std/special/compiler_rt/modti3.zig b/lib/std/special/compiler_rt/modti3.zig
index 16f2f38ba3..d983ecba5f 100644
--- a/lib/std/special/compiler_rt/modti3.zig
+++ b/lib/std/special/compiler_rt/modti3.zig
@@ -22,7 +22,10 @@ pub extern fn __modti3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __modti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__modti3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __modti3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
test "import modti3" {
diff --git a/lib/std/special/compiler_rt/multi3.zig b/lib/std/special/compiler_rt/multi3.zig
index f3b74b85d9..56ff56cbb2 100644
--- a/lib/std/special/compiler_rt/multi3.zig
+++ b/lib/std/special/compiler_rt/multi3.zig
@@ -16,7 +16,10 @@ pub extern fn __multi3(a: i128, b: i128) i128 {
const v128 = @Vector(2, u64);
pub extern fn __multi3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__multi3, @bitCast(i128, a), @bitCast(i128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
+ @bitCast(i128, a),
+ @bitCast(i128, b),
+ }));
}
fn __mulddi3(a: u64, b: u64) i128 {
diff --git a/lib/std/special/compiler_rt/stack_probe.zig b/lib/std/special/compiler_rt/stack_probe.zig
index c3e534c8ec..6406f3977a 100644
--- a/lib/std/special/compiler_rt/stack_probe.zig
+++ b/lib/std/special/compiler_rt/stack_probe.zig
@@ -182,25 +182,25 @@ fn win_probe_stack_adjust_sp() void {
pub nakedcc fn _chkstk() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_adjust_sp);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
}
pub nakedcc fn __chkstk() void {
@setRuntimeSafety(false);
switch (builtin.arch) {
- .i386 => @inlineCall(win_probe_stack_adjust_sp),
- .x86_64 => @inlineCall(win_probe_stack_only),
+ .i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
+ .x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
}
}
pub nakedcc fn ___chkstk() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_adjust_sp);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
}
pub nakedcc fn __chkstk_ms() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_only);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
}
pub nakedcc fn ___chkstk_ms() void {
@setRuntimeSafety(false);
- @inlineCall(win_probe_stack_only);
+ @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
}
diff --git a/lib/std/special/compiler_rt/umodti3.zig b/lib/std/special/compiler_rt/umodti3.zig
index 7add0b2ffe..9d4a42147c 100644
--- a/lib/std/special/compiler_rt/umodti3.zig
+++ b/lib/std/special/compiler_rt/umodti3.zig
@@ -11,5 +11,8 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
const v128 = @Vector(2, u64);
pub extern fn __umodti3_windows_x86_64(a: v128, b: v128) v128 {
- return @bitCast(v128, @inlineCall(__umodti3, @bitCast(u128, a), @bitCast(u128, b)));
+ return @bitCast(v128, @call(.{ .modifier = .always_inline }, __umodti3, .{
+ @bitCast(u128, a),
+ @bitCast(u128, b),
+ }));
}
diff --git a/lib/std/special/start.zig b/lib/std/special/start.zig
index 9c24e38137..d6c6350ff4 100644
--- a/lib/std/special/start.zig
+++ b/lib/std/special/start.zig
@@ -61,7 +61,7 @@ stdcallcc fn _DllMainCRTStartup(
extern fn wasm_freestanding_start() void {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- _ = @inlineCall(callMain);
+ _ = @call(.{ .modifier = .always_inline }, callMain, .{});
}
extern fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) usize {
@@ -91,7 +91,7 @@ nakedcc fn _start() noreturn {
if (builtin.os == builtin.Os.wasi) {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- std.os.wasi.proc_exit(@inlineCall(callMain));
+ std.os.wasi.proc_exit(@call(.{ .modifier = .always_inline }, callMain, .{}));
}
switch (builtin.arch) {
@@ -127,7 +127,7 @@ nakedcc fn _start() noreturn {
}
// If LLVM inlines stack variables into _start, they will overwrite
// the command line argument data.
- @noInlineCall(posixCallMainAndExit);
+ @call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{});
}
stdcallcc fn WinMainCRTStartup() noreturn {
@@ -186,10 +186,10 @@ fn posixCallMainAndExit() noreturn {
// 0,
//) catch @panic("out of memory");
//std.os.mprotect(new_stack[0..std.mem.page_size], std.os.PROT_NONE) catch {};
- //std.os.exit(@newStackCall(new_stack, callMainWithArgs, argc, argv, envp));
+ //std.os.exit(@call(.{.stack = new_stack}, callMainWithArgs, .{argc, argv, envp}));
}
- std.os.exit(@inlineCall(callMainWithArgs, argc, argv, envp));
+ std.os.exit(@call(.{ .modifier = .always_inline }, callMainWithArgs, .{ argc, argv, envp }));
}
fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
@@ -205,7 +205,7 @@ extern fn main(c_argc: i32, c_argv: [*][*:0]u8, c_envp: [*:null]?[*:0]u8) i32 {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
- return @inlineCall(callMainWithArgs, @intCast(usize, c_argc), c_argv, envp);
+ return @call(.{ .modifier = .always_inline }, callMainWithArgs, .{ @intCast(usize, c_argc), c_argv, envp });
}
// General error message for a malformed return type
@@ -235,7 +235,7 @@ inline fn initEventLoopAndCallMain() u8 {
// This is marked inline because for some reason LLVM in release mode fails to inline it,
// and we want fewer call frames in stack traces.
- return @inlineCall(callMain);
+ return @call(.{ .modifier = .always_inline }, callMain, .{});
}
async fn callMainAsync(loop: *std.event.Loop) u8 {