aboutsummaryrefslogtreecommitdiff
path: root/lib/std/os/linux
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-06-24 16:58:19 -0700
committerGitHub <noreply@github.com>2023-06-24 16:58:19 -0700
commit146b79af153bbd5dafda0ba12a040385c7fc58f8 (patch)
tree67e3db8b444d65c667e314770fc983a7fc8ba293 /lib/std/os/linux
parent13853bef0df3c90633021850cc6d6abaeea03282 (diff)
parent21ac0beb436f49fe49c6982a872f2dc48e4bea5e (diff)
downloadzig-146b79af153bbd5dafda0ba12a040385c7fc58f8.tar.gz
zig-146b79af153bbd5dafda0ba12a040385c7fc58f8.zip
Merge pull request #16163 from mlugg/feat/builtins-infer-dest-ty
Infer destination type of cast builtins using result type
Diffstat (limited to 'lib/std/os/linux')
-rw-r--r--lib/std/os/linux/bpf.zig30
-rw-r--r--lib/std/os/linux/bpf/helpers.zig282
-rw-r--r--lib/std/os/linux/io_uring.zig101
-rw-r--r--lib/std/os/linux/ioctl.zig2
-rw-r--r--lib/std/os/linux/start_pie.zig8
-rw-r--r--lib/std/os/linux/test.zig16
-rw-r--r--lib/std/os/linux/tls.zig6
-rw-r--r--lib/std/os/linux/vdso.zig30
8 files changed, 234 insertions, 241 deletions
diff --git a/lib/std/os/linux/bpf.zig b/lib/std/os/linux/bpf.zig
index 87b92587f9..751e5dc95a 100644
--- a/lib/std/os/linux/bpf.zig
+++ b/lib/std/os/linux/bpf.zig
@@ -643,7 +643,7 @@ pub const Insn = packed struct {
.dst = @intFromEnum(dst),
.src = @intFromEnum(src),
.off = 0,
- .imm = @intCast(i32, @truncate(u32, imm)),
+ .imm = @as(i32, @intCast(@as(u32, @truncate(imm)))),
};
}
@@ -653,7 +653,7 @@ pub const Insn = packed struct {
.dst = 0,
.src = 0,
.off = 0,
- .imm = @intCast(i32, @truncate(u32, imm >> 32)),
+ .imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))),
};
}
@@ -666,11 +666,11 @@ pub const Insn = packed struct {
}
pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn {
- return ld_imm_impl1(dst, @enumFromInt(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd));
+ return ld_imm_impl1(dst, @as(Reg, @enumFromInt(PSEUDO_MAP_FD)), @as(u64, @intCast(map_fd)));
}
pub fn ld_map_fd2(map_fd: fd_t) Insn {
- return ld_imm_impl2(@intCast(u64, map_fd));
+ return ld_imm_impl2(@as(u64, @intCast(map_fd)));
}
pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn {
@@ -786,17 +786,17 @@ test "opcodes" {
// TODO: byteswap instructions
try expect_opcode(0xd4, Insn.le(.half_word, .r1));
- try expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(16)), Insn.le(.half_word, .r1).imm);
try expect_opcode(0xd4, Insn.le(.word, .r1));
- try expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(32)), Insn.le(.word, .r1).imm);
try expect_opcode(0xd4, Insn.le(.double_word, .r1));
- try expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(64)), Insn.le(.double_word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.half_word, .r1));
- try expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(16)), Insn.be(.half_word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.word, .r1));
- try expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(32)), Insn.be(.word, .r1).imm);
try expect_opcode(0xdc, Insn.be(.double_word, .r1));
- try expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm);
+ try expectEqual(@as(i32, @intCast(64)), Insn.be(.double_word, .r1).imm);
// memory instructions
try expect_opcode(0x18, Insn.ld_dw1(.r1, 0));
@@ -804,7 +804,7 @@ test "opcodes" {
// loading a map fd
try expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0));
- try expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src);
+ try expectEqual(@as(u4, @intCast(PSEUDO_MAP_FD)), Insn.ld_map_fd1(.r1, 0).src);
try expect_opcode(0x00, Insn.ld_map_fd2(0));
try expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0));
@@ -1518,7 +1518,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
const rc = linux.bpf(.map_create, &attr, @sizeOf(MapCreateAttr));
switch (errno(rc)) {
- .SUCCESS => return @intCast(fd_t, rc),
+ .SUCCESS => return @as(fd_t, @intCast(rc)),
.INVAL => return error.MapTypeOrAttrInvalid,
.NOMEM => return error.SystemResources,
.PERM => return error.AccessDenied,
@@ -1668,20 +1668,20 @@ pub fn prog_load(
attr.prog_load.prog_type = @intFromEnum(prog_type);
attr.prog_load.insns = @intFromPtr(insns.ptr);
- attr.prog_load.insn_cnt = @intCast(u32, insns.len);
+ attr.prog_load.insn_cnt = @as(u32, @intCast(insns.len));
attr.prog_load.license = @intFromPtr(license.ptr);
attr.prog_load.kern_version = kern_version;
attr.prog_load.prog_flags = flags;
if (log) |l| {
attr.prog_load.log_buf = @intFromPtr(l.buf.ptr);
- attr.prog_load.log_size = @intCast(u32, l.buf.len);
+ attr.prog_load.log_size = @as(u32, @intCast(l.buf.len));
attr.prog_load.log_level = l.level;
}
const rc = linux.bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr));
return switch (errno(rc)) {
- .SUCCESS => @intCast(fd_t, rc),
+ .SUCCESS => @as(fd_t, @intCast(rc)),
.ACCES => error.UnsafeProgram,
.FAULT => unreachable,
.INVAL => error.InvalidProgram,
diff --git a/lib/std/os/linux/bpf/helpers.zig b/lib/std/os/linux/bpf/helpers.zig
index b26e7eda29..027220088e 100644
--- a/lib/std/os/linux/bpf/helpers.zig
+++ b/lib/std/os/linux/bpf/helpers.zig
@@ -11,147 +11,147 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
-pub const map_lookup_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, 1);
-pub const map_update_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, 2);
-pub const map_delete_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, 3);
-pub const probe_read = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 4);
-pub const ktime_get_ns = @ptrFromInt(*const fn () u64, 5);
-pub const trace_printk = @ptrFromInt(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6);
-pub const get_prandom_u32 = @ptrFromInt(*const fn () u32, 7);
-pub const get_smp_processor_id = @ptrFromInt(*const fn () u32, 8);
-pub const skb_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, 9);
-pub const l3_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10);
-pub const l4_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11);
-pub const tail_call = @ptrFromInt(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, 12);
-pub const clone_redirect = @ptrFromInt(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13);
-pub const get_current_pid_tgid = @ptrFromInt(*const fn () u64, 14);
-pub const get_current_uid_gid = @ptrFromInt(*const fn () u64, 15);
-pub const get_current_comm = @ptrFromInt(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, 16);
-pub const get_cgroup_classid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 17);
+pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
+pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
+pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
+pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
+pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
+pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
+pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
+pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
+pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
+pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
+pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
+pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
+pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
+pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
+pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
+pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
+pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
-pub const skb_vlan_push = @ptrFromInt(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18);
-pub const skb_vlan_pop = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 19);
-pub const skb_get_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20);
-pub const skb_set_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21);
-pub const perf_event_read = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64) u64, 22);
-pub const redirect = @ptrFromInt(*const fn (ifindex: u32, flags: u64) c_long, 23);
-pub const get_route_realm = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 24);
-pub const perf_event_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 25);
-pub const skb_load_bytes = @ptrFromInt(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, 26);
-pub const get_stackid = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, 27);
+pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
+pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
+pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
+pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
+pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
+pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
+pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
+pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
+pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
+pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
-pub const csum_diff = @ptrFromInt(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28);
-pub const skb_get_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 29);
-pub const skb_set_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 30);
+pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
+pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
+pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
-pub const skb_change_proto = @ptrFromInt(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31);
-pub const skb_change_type = @ptrFromInt(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32);
-pub const skb_under_cgroup = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, 33);
-pub const get_hash_recalc = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 34);
-pub const get_current_task = @ptrFromInt(*const fn () u64, 35);
-pub const probe_write_user = @ptrFromInt(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, 36);
-pub const current_task_under_cgroup = @ptrFromInt(*const fn (map: *const kern.MapDef, index: u32) c_long, 37);
-pub const skb_change_tail = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38);
-pub const skb_pull_data = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32) c_long, 39);
-pub const csum_update = @ptrFromInt(*const fn (skb: *kern.SkBuff, csum: u32) i64, 40);
-pub const set_hash_invalid = @ptrFromInt(*const fn (skb: *kern.SkBuff) void, 41);
-pub const get_numa_node_id = @ptrFromInt(*const fn () c_long, 42);
-pub const skb_change_head = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43);
-pub const xdp_adjust_head = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44);
-pub const probe_read_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 45);
-pub const get_socket_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 46);
-pub const get_socket_uid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 47);
-pub const set_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, hash: u32) c_long, 48);
-pub const setsockopt = @ptrFromInt(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 49);
-pub const skb_adjust_room = @ptrFromInt(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50);
-pub const redirect_map = @ptrFromInt(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51);
-pub const sk_redirect_map = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52);
-pub const sock_map_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 53);
-pub const xdp_adjust_meta = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54);
-pub const perf_event_read_value = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55);
-pub const perf_prog_read_value = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56);
-pub const getsockopt = @ptrFromInt(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 57);
-pub const override_return = @ptrFromInt(*const fn (regs: *PtRegs, rc: u64) c_long, 58);
-pub const sock_ops_cb_flags_set = @ptrFromInt(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59);
-pub const msg_redirect_map = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60);
-pub const msg_apply_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61);
-pub const msg_cork_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62);
-pub const msg_pull_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63);
-pub const bind = @ptrFromInt(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64);
-pub const xdp_adjust_tail = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65);
-pub const skb_get_xfrm_state = @ptrFromInt(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66);
-pub const get_stack = @ptrFromInt(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 67);
-pub const skb_load_bytes_relative = @ptrFromInt(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, 68);
-pub const fib_lookup = @ptrFromInt(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69);
-pub const sock_hash_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 70);
-pub const msg_redirect_hash = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 71);
-pub const sk_redirect_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 72);
-pub const lwt_push_encap = @ptrFromInt(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, 73);
-pub const lwt_seg6_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, 74);
-pub const lwt_seg6_adjust_srh = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75);
-pub const lwt_seg6_action = @ptrFromInt(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, 76);
-pub const rc_repeat = @ptrFromInt(*const fn (ctx: ?*anyopaque) c_long, 77);
-pub const rc_keydown = @ptrFromInt(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, 78);
-pub const skb_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff) u64, 79);
-pub const get_current_cgroup_id = @ptrFromInt(*const fn () u64, 80);
-pub const get_local_storage = @ptrFromInt(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, 81);
-pub const sk_select_reuseport = @ptrFromInt(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 82);
-pub const skb_ancestor_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83);
-pub const sk_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84);
-pub const sk_lookup_udp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85);
-pub const sk_release = @ptrFromInt(*const fn (sock: *kern.Sock) c_long, 86);
-pub const map_push_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, 87);
-pub const map_pop_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 88);
-pub const map_peek_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 89);
-pub const msg_push_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90);
-pub const msg_pop_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91);
-pub const rc_pointer_rel = @ptrFromInt(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, 92);
-pub const spin_lock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 93);
-pub const spin_unlock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 94);
-pub const sk_fullsock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*SkFullSock, 95);
-pub const tcp_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.TcpSock, 96);
-pub const skb_ecn_set_ce = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 97);
-pub const get_listener_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.Sock, 98);
-pub const skc_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99);
-pub const tcp_check_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100);
-pub const sysctl_get_name = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101);
-pub const sysctl_get_current_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102);
-pub const sysctl_get_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103);
-pub const sysctl_set_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104);
-pub const strtol = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105);
-pub const strtoul = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106);
-pub const sk_storage_get = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, 107);
-pub const sk_storage_delete = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108);
-pub const send_signal = @ptrFromInt(*const fn (sig: u32) c_long, 109);
-pub const tcp_gen_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110);
-pub const skb_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 111);
-pub const probe_read_user = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 112);
-pub const probe_read_kernel = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 113);
-pub const probe_read_user_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 114);
-pub const probe_read_kernel_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 115);
-pub const tcp_send_ack = @ptrFromInt(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, 116);
-pub const send_signal_thread = @ptrFromInt(*const fn (sig: u32) c_long, 117);
-pub const jiffies64 = @ptrFromInt(*const fn () u64, 118);
-pub const read_branch_records = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, 119);
-pub const get_ns_current_pid_tgid = @ptrFromInt(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120);
-pub const xdp_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 121);
-pub const get_netns_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 122);
-pub const get_current_ancestor_cgroup_id = @ptrFromInt(*const fn (ancestor_level: c_int) u64, 123);
-pub const sk_assign = @ptrFromInt(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124);
-pub const ktime_get_boot_ns = @ptrFromInt(*const fn () u64, 125);
-pub const seq_printf = @ptrFromInt(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, 126);
-pub const seq_write = @ptrFromInt(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127);
-pub const sk_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock) u64, 128);
-pub const sk_ancestor_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129);
-pub const ringbuf_output = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130);
-pub const ringbuf_reserve = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131);
-pub const ringbuf_submit = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 132);
-pub const ringbuf_discard = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 133);
-pub const ringbuf_query = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, 134);
-pub const csum_level = @ptrFromInt(*const fn (skb: *kern.SkBuff, level: u64) c_long, 135);
-pub const skc_to_tcp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, 136);
-pub const skc_to_tcp_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, 137);
-pub const skc_to_tcp_timewait_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, 138);
-pub const skc_to_tcp_request_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, 139);
-pub const skc_to_udp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, 140);
-pub const get_task_stack = @ptrFromInt(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 141);
+pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
+pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
+pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
+pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
+pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
+pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
+pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
+pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
+pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
+pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
+pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
+pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
+pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
+pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
+pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
+pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
+pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
+pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
+pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
+pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
+pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
+pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
+pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
+pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
+pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
+pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
+pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
+pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
+pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
+pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
+pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
+pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
+pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
+pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
+pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
+pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
+pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
+pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
+pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
+pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
+pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
+pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
+pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
+pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
+pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
+pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
+pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
+pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
+pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
+pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
+pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
+pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
+pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
+pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
+pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
+pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
+pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
+pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
+pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
+pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
+pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
+pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
+pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
+pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
+pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
+pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
+pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
+pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
+pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
+pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
+pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
+pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
+pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
+pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
+pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
+pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
+pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
+pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
+pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
+pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
+pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
+pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
+pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
+pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
+pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
+pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
+pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
+pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
+pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
+pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
+pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
+pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
+pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
+pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
+pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
+pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
+pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
+pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
+pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
+pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
+pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
+pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
+pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
+pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
+pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
+pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
+pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
+pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
+pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
+pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
+pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig
index 875138cf4f..df8cd20773 100644
--- a/lib/std/os/linux/io_uring.zig
+++ b/lib/std/os/linux/io_uring.zig
@@ -60,7 +60,7 @@ pub const IO_Uring = struct {
.NOSYS => return error.SystemOutdated,
else => |errno| return os.unexpectedErrno(errno),
}
- const fd = @intCast(os.fd_t, res);
+ const fd = @as(os.fd_t, @intCast(res));
assert(fd >= 0);
errdefer os.close(fd);
@@ -198,7 +198,7 @@ pub const IO_Uring = struct {
.INTR => return error.SignalInterrupt,
else => |errno| return os.unexpectedErrno(errno),
}
- return @intCast(u32, res);
+ return @as(u32, @intCast(res));
}
/// Sync internal state with kernel ring state on the SQ side.
@@ -937,8 +937,8 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_FILES,
- @ptrCast(*const anyopaque, fds.ptr),
- @intCast(u32, fds.len),
+ @as(*const anyopaque, @ptrCast(fds.ptr)),
+ @as(u32, @intCast(fds.len)),
);
try handle_registration_result(res);
}
@@ -968,8 +968,8 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_FILES_UPDATE,
- @ptrCast(*const anyopaque, &update),
- @intCast(u32, fds.len),
+ @as(*const anyopaque, @ptrCast(&update)),
+ @as(u32, @intCast(fds.len)),
);
try handle_registration_result(res);
}
@@ -982,7 +982,7 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_EVENTFD,
- @ptrCast(*const anyopaque, &fd),
+ @as(*const anyopaque, @ptrCast(&fd)),
1,
);
try handle_registration_result(res);
@@ -997,7 +997,7 @@ pub const IO_Uring = struct {
const res = linux.io_uring_register(
self.fd,
.REGISTER_EVENTFD_ASYNC,
- @ptrCast(*const anyopaque, &fd),
+ @as(*const anyopaque, @ptrCast(&fd)),
1,
);
try handle_registration_result(res);
@@ -1022,7 +1022,7 @@ pub const IO_Uring = struct {
self.fd,
.REGISTER_BUFFERS,
buffers.ptr,
- @intCast(u32, buffers.len),
+ @as(u32, @intCast(buffers.len)),
);
try handle_registration_result(res);
}
@@ -1122,20 +1122,17 @@ pub const SubmissionQueue = struct {
errdefer os.munmap(mmap_sqes);
assert(mmap_sqes.len == size_sqes);
- const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array]));
- const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0]));
+ const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array]));
+ const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0]));
// We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
- assert(
- p.sq_entries ==
- @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*,
- );
+ assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*);
return SubmissionQueue{
- .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])),
- .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])),
- .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*,
- .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])),
- .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])),
+ .head = @ptrCast(@alignCast(&mmap[p.sq_off.head])),
+ .tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])),
+ .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*,
+ .flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])),
+ .dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])),
.array = array[0..p.sq_entries],
.sqes = sqes[0..p.sq_entries],
.mmap = mmap,
@@ -1160,17 +1157,13 @@ pub const CompletionQueue = struct {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const mmap = sq.mmap;
- const cqes = @ptrCast(
- [*]linux.io_uring_cqe,
- @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]),
- );
- assert(p.cq_entries ==
- @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*);
+ const cqes: [*]linux.io_uring_cqe = @ptrCast(@alignCast(&mmap[p.cq_off.cqes]));
+ assert(p.cq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_entries]))).*);
return CompletionQueue{
- .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])),
- .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])),
- .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*,
- .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])),
+ .head = @ptrCast(@alignCast(&mmap[p.cq_off.head])),
+ .tail = @ptrCast(@alignCast(&mmap[p.cq_off.tail])),
+ .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_mask]))).*,
+ .overflow = @ptrCast(@alignCast(&mmap[p.cq_off.overflow])),
.cqes = cqes[0..p.cq_entries],
};
}
@@ -1233,7 +1226,7 @@ pub fn io_uring_prep_rw(
.fd = fd,
.off = offset,
.addr = addr,
- .len = @intCast(u32, len),
+ .len = @as(u32, @intCast(len)),
.rw_flags = 0,
.user_data = 0,
.buf_index = 0,
@@ -1319,7 +1312,7 @@ pub fn io_uring_prep_epoll_ctl(
op: u32,
ev: ?*linux.epoll_event,
) void {
- io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @intCast(u64, fd));
+ io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @as(u64, @intCast(fd)));
}
pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
@@ -1459,7 +1452,7 @@ pub fn io_uring_prep_fallocate(
.fd = fd,
.off = offset,
.addr = len,
- .len = @intCast(u32, mode),
+ .len = @as(u32, @intCast(mode)),
.rw_flags = 0,
.user_data = 0,
.buf_index = 0,
@@ -1514,7 +1507,7 @@ pub fn io_uring_prep_renameat(
0,
@intFromPtr(new_path),
);
- sqe.len = @bitCast(u32, new_dir_fd);
+ sqe.len = @as(u32, @bitCast(new_dir_fd));
sqe.rw_flags = flags;
}
@@ -1569,7 +1562,7 @@ pub fn io_uring_prep_linkat(
0,
@intFromPtr(new_path),
);
- sqe.len = @bitCast(u32, new_dir_fd);
+ sqe.len = @as(u32, @bitCast(new_dir_fd));
sqe.rw_flags = flags;
}
@@ -1582,8 +1575,8 @@ pub fn io_uring_prep_provide_buffers(
buffer_id: usize,
) void {
const ptr = @intFromPtr(buffers);
- io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @intCast(i32, num), ptr, buffer_len, buffer_id);
- sqe.buf_index = @intCast(u16, group_id);
+ io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @as(i32, @intCast(num)), ptr, buffer_len, buffer_id);
+ sqe.buf_index = @as(u16, @intCast(group_id));
}
pub fn io_uring_prep_remove_buffers(
@@ -1591,8 +1584,8 @@ pub fn io_uring_prep_remove_buffers(
num: usize,
group_id: usize,
) void {
- io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @intCast(i32, num), 0, 0, 0);
- sqe.buf_index = @intCast(u16, group_id);
+ io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @as(i32, @intCast(num)), 0, 0, 0);
+ sqe.buf_index = @as(u16, @intCast(group_id));
}
test "structs/offsets/entries" {
@@ -1886,12 +1879,12 @@ test "write_fixed/read_fixed" {
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x45454545,
- .res = @intCast(i32, buffers[0].iov_len),
+ .res = @as(i32, @intCast(buffers[0].iov_len)),
.flags = 0,
}, cqe_write);
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x12121212,
- .res = @intCast(i32, buffers[1].iov_len),
+ .res = @as(i32, @intCast(buffers[1].iov_len)),
.flags = 0,
}, cqe_read);
@@ -2145,7 +2138,7 @@ test "timeout (after a relative time)" {
}, cqe);
// Tests should not depend on timings: skip test if outside margin.
- if (!std.math.approxEqAbs(f64, ms, @floatFromInt(f64, stopped - started), margin)) return error.SkipZigTest;
+ if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest;
}
test "timeout (after a number of completions)" {
@@ -2637,7 +2630,7 @@ test "renameat" {
);
try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2850,7 +2843,7 @@ test "linkat" {
);
try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
- try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+ try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -2898,7 +2891,7 @@ test "provide_buffers: read" {
// Provide 4 buffers
{
- const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
try testing.expectEqual(@as(u32, buffers[0].len), sqe.len);
@@ -2939,7 +2932,7 @@ test "provide_buffers: read" {
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
// This read should fail
@@ -2971,7 +2964,7 @@ test "provide_buffers: read" {
const reprovided_buffer_id = 2;
{
- _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3003,7 +2996,7 @@ test "provide_buffers: read" {
try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
}
@@ -3030,7 +3023,7 @@ test "remove_buffers" {
// Provide 4 buffers
{
- _ = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3076,7 +3069,7 @@ test "remove_buffers" {
try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+ try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
}
// Final read should _not_ work
@@ -3119,7 +3112,7 @@ test "provide_buffers: accept/connect/send/recv" {
// Provide 4 buffers
{
- const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+ const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
try testing.expectEqual(@as(u32, buffer_len), sqe.len);
@@ -3181,7 +3174,7 @@ test "provide_buffers: accept/connect/send/recv" {
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer);
}
@@ -3213,7 +3206,7 @@ test "provide_buffers: accept/connect/send/recv" {
const reprovided_buffer_id = 2;
{
- _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+ _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
@@ -3259,7 +3252,7 @@ test "provide_buffers: accept/connect/send/recv" {
try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
try testing.expectEqual(@as(i32, buffer_len), cqe.res);
try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
- const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+ const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer);
}
}
diff --git a/lib/std/os/linux/ioctl.zig b/lib/std/os/linux/ioctl.zig
index 96ec96c306..7f5d36b72d 100644
--- a/lib/std/os/linux/ioctl.zig
+++ b/lib/std/os/linux/ioctl.zig
@@ -32,7 +32,7 @@ fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 {
.io_type = io_type,
.nr = nr,
};
- return @bitCast(u32, request);
+ return @as(u32, @bitCast(request));
}
pub fn IO(io_type: u8, nr: u8) u32 {
diff --git a/lib/std/os/linux/start_pie.zig b/lib/std/os/linux/start_pie.zig
index c9b1cb1e92..cf557f9d66 100644
--- a/lib/std/os/linux/start_pie.zig
+++ b/lib/std/os/linux/start_pie.zig
@@ -103,17 +103,17 @@ pub fn relocate(phdrs: []elf.Phdr) void {
// Apply the relocations.
if (rel_addr != 0) {
- const rel = std.mem.bytesAsSlice(elf.Rel, @ptrFromInt([*]u8, rel_addr)[0..rel_size]);
+ const rel = std.mem.bytesAsSlice(elf.Rel, @as([*]u8, @ptrFromInt(rel_addr))[0..rel_size]);
for (rel) |r| {
if (r.r_type() != R_RELATIVE) continue;
- @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr;
+ @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr;
}
}
if (rela_addr != 0) {
- const rela = std.mem.bytesAsSlice(elf.Rela, @ptrFromInt([*]u8, rela_addr)[0..rela_size]);
+ const rela = std.mem.bytesAsSlice(elf.Rela, @as([*]u8, @ptrFromInt(rela_addr))[0..rela_size]);
for (rela) |r| {
if (r.r_type() != R_RELATIVE) continue;
- @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr + @bitCast(usize, r.r_addend);
+ @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr + @as(usize, @bitCast(r.r_addend));
}
}
}
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
index e1ad36b2e5..170bde6334 100644
--- a/lib/std/os/linux/test.zig
+++ b/lib/std/os/linux/test.zig
@@ -50,7 +50,7 @@ test "timer" {
.it_value = time_interval,
};
- err = linux.getErrno(linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null));
+ err = linux.getErrno(linux.timerfd_settime(@as(i32, @intCast(timer_fd)), 0, &new_time, null));
try expect(err == .SUCCESS);
var event = linux.epoll_event{
@@ -58,13 +58,13 @@ test "timer" {
.data = linux.epoll_data{ .ptr = 0 },
};
- err = linux.getErrno(linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL.CTL_ADD, @intCast(i32, timer_fd), &event));
+ err = linux.getErrno(linux.epoll_ctl(@as(i32, @intCast(epoll_fd)), linux.EPOLL.CTL_ADD, @as(i32, @intCast(timer_fd)), &event));
try expect(err == .SUCCESS);
const events_one: linux.epoll_event = undefined;
var events = [_]linux.epoll_event{events_one} ** 8;
- err = linux.getErrno(linux.epoll_wait(@intCast(i32, epoll_fd), &events, 8, -1));
+ err = linux.getErrno(linux.epoll_wait(@as(i32, @intCast(epoll_fd)), &events, 8, -1));
try expect(err == .SUCCESS);
}
@@ -91,11 +91,11 @@ test "statx" {
}
try expect(stat_buf.mode == statx_buf.mode);
- try expect(@bitCast(u32, stat_buf.uid) == statx_buf.uid);
- try expect(@bitCast(u32, stat_buf.gid) == statx_buf.gid);
- try expect(@bitCast(u64, @as(i64, stat_buf.size)) == statx_buf.size);
- try expect(@bitCast(u64, @as(i64, stat_buf.blksize)) == statx_buf.blksize);
- try expect(@bitCast(u64, @as(i64, stat_buf.blocks)) == statx_buf.blocks);
+ try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid);
+ try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize);
+ try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks);
}
test "user and group ids" {
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
index b60a2ed388..94fa0d1a09 100644
--- a/lib/std/os/linux/tls.zig
+++ b/lib/std/os/linux/tls.zig
@@ -205,7 +205,7 @@ fn initTLS(phdrs: []elf.Phdr) void {
// the data stored in the PT_TLS segment is p_filesz and may be less
// than the former
tls_align_factor = phdr.p_align;
- tls_data = @ptrFromInt([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz];
+ tls_data = @as([*]u8, @ptrFromInt(img_base + phdr.p_vaddr))[0..phdr.p_filesz];
tls_data_alloc_size = phdr.p_memsz;
} else {
tls_align_factor = @alignOf(usize);
@@ -263,12 +263,12 @@ fn initTLS(phdrs: []elf.Phdr) void {
.dtv_offset = dtv_offset,
.data_offset = data_offset,
.data_size = tls_data_alloc_size,
- .gdt_entry_number = @bitCast(usize, @as(isize, -1)),
+ .gdt_entry_number = @as(usize, @bitCast(@as(isize, -1))),
};
}
inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T {
- return @ptrCast(*T, @alignCast(@alignOf(T), ptr));
+ return @ptrCast(@alignCast(ptr));
}
/// Initializes all the fields of the static TLS area and returns the computed
diff --git a/lib/std/os/linux/vdso.zig b/lib/std/os/linux/vdso.zig
index c7dc7ae599..50e7ce1dfd 100644
--- a/lib/std/os/linux/vdso.zig
+++ b/lib/std/os/linux/vdso.zig
@@ -8,7 +8,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR);
if (vdso_addr == 0) return 0;
- const eh = @ptrFromInt(*elf.Ehdr, vdso_addr);
+ const eh = @as(*elf.Ehdr, @ptrFromInt(vdso_addr));
var ph_addr: usize = vdso_addr + eh.e_phoff;
var maybe_dynv: ?[*]usize = null;
@@ -19,14 +19,14 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const this_ph = @ptrFromInt(*elf.Phdr, ph_addr);
+ const this_ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
switch (this_ph.p_type) {
// On WSL1 as well as older kernels, the VDSO ELF image is pre-linked in the upper half
// of the memory space (e.g. p_vaddr = 0xffffffffff700000 on WSL1).
// Wrapping operations are used on this line as well as subsequent calculations relative to base
// (lines 47, 78) to ensure no overflow check is tripped.
elf.PT_LOAD => base = vdso_addr +% this_ph.p_offset -% this_ph.p_vaddr,
- elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, vdso_addr + this_ph.p_offset),
+ elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(vdso_addr + this_ph.p_offset)),
else => {},
}
}
@@ -45,11 +45,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
while (dynv[i] != 0) : (i += 2) {
const p = base +% dynv[i + 1];
switch (dynv[i]) {
- elf.DT_STRTAB => maybe_strings = @ptrFromInt([*]u8, p),
- elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p),
- elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]linux.Elf_Symndx, p),
- elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p),
- elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p),
+ elf.DT_STRTAB => maybe_strings = @as([*]u8, @ptrFromInt(p)),
+ elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)),
+ elf.DT_HASH => maybe_hashtab = @as([*]linux.Elf_Symndx, @ptrFromInt(p)),
+ elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)),
+ elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)),
else => {},
}
}
@@ -65,10 +65,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
var i: usize = 0;
while (i < hashtab[1]) : (i += 1) {
- if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue;
- if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info & 0xf)) & OK_TYPES)) continue;
+ if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info >> 4)) & OK_BINDS)) continue;
if (0 == syms[i].st_shndx) continue;
- const sym_name = @ptrCast([*:0]u8, strings + syms[i].st_name);
+ const sym_name = @as([*:0]u8, @ptrCast(strings + syms[i].st_name));
if (!mem.eql(u8, name, mem.sliceTo(sym_name, 0))) continue;
if (maybe_versym) |versym| {
if (!checkver(maybe_verdef.?, versym[i], vername, strings))
@@ -82,15 +82,15 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
var def = def_arg;
- const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff;
while (true) {
if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
break;
if (def.vd_next == 0)
return false;
- def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next);
+ def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next));
}
- const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux);
- const vda_name = @ptrCast([*:0]u8, strings + aux.vda_name);
+ const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux));
+ const vda_name = @as([*:0]u8, @ptrCast(strings + aux.vda_name));
return mem.eql(u8, vername, mem.sliceTo(vda_name, 0));
}