aboutsummaryrefslogtreecommitdiff
path: root/lib/std/os/linux
diff options
context:
space:
mode:
Diffstat (limited to 'lib/std/os/linux')
-rw-r--r--lib/std/os/linux/arm-eabi.zig96
-rw-r--r--lib/std/os/linux/arm64.zig87
-rw-r--r--lib/std/os/linux/riscv64.zig86
-rw-r--r--lib/std/os/linux/test.zig46
-rw-r--r--lib/std/os/linux/tls.zig285
-rw-r--r--lib/std/os/linux/vdso.zig91
-rw-r--r--lib/std/os/linux/x86_64.zig97
7 files changed, 788 insertions, 0 deletions
diff --git a/lib/std/os/linux/arm-eabi.zig b/lib/std/os/linux/arm-eabi.zig
new file mode 100644
index 0000000000..a15234d742
--- /dev/null
+++ b/lib/std/os/linux/arm-eabi.zig
@@ -0,0 +1,96 @@
+pub fn syscall0(number: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number)
+ : "memory"
+ );
+}
+
+pub fn syscall1(number: usize, arg1: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1)
+ : "memory"
+ );
+}
+
+pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1),
+ [arg2] "{r1}" (arg2)
+ : "memory"
+ );
+}
+
+pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1),
+ [arg2] "{r1}" (arg2),
+ [arg3] "{r2}" (arg3)
+ : "memory"
+ );
+}
+
+pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1),
+ [arg2] "{r1}" (arg2),
+ [arg3] "{r2}" (arg3),
+ [arg4] "{r3}" (arg4)
+ : "memory"
+ );
+}
+
+pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1),
+ [arg2] "{r1}" (arg2),
+ [arg3] "{r2}" (arg3),
+ [arg4] "{r3}" (arg4),
+ [arg5] "{r4}" (arg5)
+ : "memory"
+ );
+}
+
+pub fn syscall6(
+ number: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={r0}" (-> usize)
+ : [number] "{r7}" (number),
+ [arg1] "{r0}" (arg1),
+ [arg2] "{r1}" (arg2),
+ [arg3] "{r2}" (arg3),
+ [arg4] "{r3}" (arg4),
+ [arg5] "{r4}" (arg5),
+ [arg6] "{r5}" (arg6)
+ : "memory"
+ );
+}
+
+/// This matches the libc clone function.
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
+
+// LLVM calls this when the read-tp-hard feature is set to false. Currently, there is no way to pass
+// that to llvm via zig, see https://github.com/ziglang/zig/issues/2883.
+// LLVM expects libc to provide this function as __aeabi_read_tp, so it is exported if needed from special/c.zig.
+pub extern fn getThreadPointer() usize {
+ return asm volatile ("mrc p15, 0, %[ret], c13, c0, 3"
+ : [ret] "=r" (-> usize)
+ );
+}
diff --git a/lib/std/os/linux/arm64.zig b/lib/std/os/linux/arm64.zig
new file mode 100644
index 0000000000..28da9af1c6
--- /dev/null
+++ b/lib/std/os/linux/arm64.zig
@@ -0,0 +1,87 @@
+pub fn syscall0(number: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall1(number: usize, arg1: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1),
+ [arg2] "{x1}" (arg2)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1),
+ [arg2] "{x1}" (arg2),
+ [arg3] "{x2}" (arg3)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1),
+ [arg2] "{x1}" (arg2),
+ [arg3] "{x2}" (arg3),
+ [arg4] "{x3}" (arg4)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1),
+ [arg2] "{x1}" (arg2),
+ [arg3] "{x2}" (arg3),
+ [arg4] "{x3}" (arg4),
+ [arg5] "{x4}" (arg5)
+ : "memory", "cc"
+ );
+}
+
+pub fn syscall6(
+ number: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
+ return asm volatile ("svc #0"
+ : [ret] "={x0}" (-> usize)
+ : [number] "{x8}" (number),
+ [arg1] "{x0}" (arg1),
+ [arg2] "{x1}" (arg2),
+ [arg3] "{x2}" (arg3),
+ [arg4] "{x3}" (arg4),
+ [arg5] "{x4}" (arg5),
+ [arg6] "{x5}" (arg6)
+ : "memory", "cc"
+ );
+}
+
+/// This matches the libc clone function.
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
diff --git a/lib/std/os/linux/riscv64.zig b/lib/std/os/linux/riscv64.zig
new file mode 100644
index 0000000000..7bfe0295d3
--- /dev/null
+++ b/lib/std/os/linux/riscv64.zig
@@ -0,0 +1,86 @@
+pub fn syscall0(number: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number)
+ : "memory"
+ );
+}
+
+pub fn syscall1(number: usize, arg1: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1)
+ : "memory"
+ );
+}
+
+pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1),
+ [arg2] "{x11}" (arg2)
+ : "memory"
+ );
+}
+
+pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1),
+ [arg2] "{x11}" (arg2),
+ [arg3] "{x12}" (arg3)
+ : "memory"
+ );
+}
+
+pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1),
+ [arg2] "{x11}" (arg2),
+ [arg3] "{x12}" (arg3),
+ [arg4] "{x13}" (arg4)
+ : "memory"
+ );
+}
+
+pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1),
+ [arg2] "{x11}" (arg2),
+ [arg3] "{x12}" (arg3),
+ [arg4] "{x13}" (arg4),
+ [arg5] "{x14}" (arg5)
+ : "memory"
+ );
+}
+
+pub fn syscall6(
+ number: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
+ return asm volatile ("ecall"
+ : [ret] "={x10}" (-> usize)
+ : [number] "{x17}" (number),
+ [arg1] "{x10}" (arg1),
+ [arg2] "{x11}" (arg2),
+ [arg3] "{x12}" (arg3),
+ [arg4] "{x13}" (arg4),
+ [arg5] "{x14}" (arg5),
+ [arg6] "{x15}" (arg6)
+ : "memory"
+ );
+}
+
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
diff --git a/lib/std/os/linux/test.zig b/lib/std/os/linux/test.zig
new file mode 100644
index 0000000000..97bbcc402d
--- /dev/null
+++ b/lib/std/os/linux/test.zig
@@ -0,0 +1,46 @@
+const std = @import("../../std.zig");
+const builtin = @import("builtin");
+const linux = std.os.linux;
+const mem = std.mem;
+const elf = std.elf;
+const expect = std.testing.expect;
+
+test "getpid" {
+ expect(linux.getpid() != 0);
+}
+
+test "timer" {
+ const epoll_fd = linux.epoll_create();
+ var err: usize = linux.getErrno(epoll_fd);
+ expect(err == 0);
+
+ const timer_fd = linux.timerfd_create(linux.CLOCK_MONOTONIC, 0);
+ expect(linux.getErrno(timer_fd) == 0);
+
+ const time_interval = linux.timespec{
+ .tv_sec = 0,
+ .tv_nsec = 2000000,
+ };
+
+ const new_time = linux.itimerspec{
+ .it_interval = time_interval,
+ .it_value = time_interval,
+ };
+
+ err = linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null);
+ expect(err == 0);
+
+ var event = linux.epoll_event{
+ .events = linux.EPOLLIN | linux.EPOLLOUT | linux.EPOLLET,
+ .data = linux.epoll_data{ .ptr = 0 },
+ };
+
+ err = linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL_CTL_ADD, @intCast(i32, timer_fd), &event);
+ expect(err == 0);
+
+ const events_one: linux.epoll_event = undefined;
+ var events = [_]linux.epoll_event{events_one} ** 8;
+
+ // TODO implicit cast from *[N]T to [*]T
+ err = linux.epoll_wait(@intCast(i32, epoll_fd), @ptrCast([*]linux.epoll_event, &events), 8, -1);
+}
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
new file mode 100644
index 0000000000..167839570e
--- /dev/null
+++ b/lib/std/os/linux/tls.zig
@@ -0,0 +1,285 @@
+const std = @import("std");
+const os = std.os;
+const mem = std.mem;
+const elf = std.elf;
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+
+// This file implements the two TLS variants [1] used by ELF-based systems.
+//
+// The variant I has the following layout in memory:
+// -------------------------------------------------------
+// | DTV | Zig | DTV | Alignment | TLS |
+// | storage | thread data | pointer | | block |
+// ------------------------^------------------------------
+// `-- The thread pointer register points here
+//
+// In this case we allocate additional space for our control structure that's
+// placed _before_ the DTV pointer together with the DTV.
+//
+// NOTE: Some systems such as power64 or mips use this variant with a twist: the
+// alignment is not present and the tp and DTV addresses are offset by a
+// constant.
+//
+// On the other hand the variant II has the following layout in memory:
+// ---------------------------------------
+// | TLS | TCB | Zig | DTV |
+// | block | | thread data | storage |
+// --------^------------------------------
+// `-- The thread pointer register points here
+//
+// The structure of the TCB is not defined by the ABI so we reserve enough space
+// for a single pointer as some architectures such as i386 and x86_64 need a
+// pointer to the TCB block itself at the address pointed by the tp.
+//
+// In this case the control structure and DTV are placed one after another right
+// after the TLS block data.
+//
+// At the moment the DTV is very simple since we only support static TLS, all we
+// need is a two word vector to hold the number of entries (1) and the address
+// of the first TLS block.
+//
+// [1] https://www.akkadia.org/drepper/tls.pdf
+
+const TLSVariant = enum {
+ VariantI,
+ VariantII,
+};
+
+const tls_variant = switch (builtin.arch) {
+ .arm, .armeb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mipsel => TLSVariant.VariantI,
+ .x86_64, .i386 => TLSVariant.VariantII,
+ else => @compileError("undefined tls_variant for this architecture"),
+};
+
+// Controls how many bytes are reserved for the Thread Control Block
+const tls_tcb_size = switch (builtin.arch) {
+ // ARM EABI mandates enough space for two pointers: the first one points to
+ // the DTV while the second one is unspecified but reserved
+ .arm, .armeb, .aarch64, .aarch64_be => 2 * @sizeOf(usize),
+ else => @sizeOf(usize),
+};
+
+// Controls if the TCB should be aligned according to the TLS segment p_align
+const tls_tcb_align_size = switch (builtin.arch) {
+ .arm, .armeb, .aarch64, .aarch64_be => true,
+ else => false,
+};
+
+// Controls if the TP points to the end of the TCB instead of its beginning
+const tls_tp_points_past_tcb = switch (builtin.arch) {
+ .riscv32, .riscv64, .mipsel, .powerpc64, .powerpc64le => true,
+ else => false,
+};
+
+// Check if the architecture-specific parameters look correct
+comptime {
+ if (tls_tcb_align_size and tls_variant != TLSVariant.VariantI) {
+ @compileError("tls_tcb_align_size is only meaningful for variant I TLS");
+ }
+}
+
+// Some architectures add some offset to the tp and dtv addresses in order to
+// make the generated code more efficient
+
+const tls_tp_offset = switch (builtin.arch) {
+ .mipsel => 0x7000,
+ else => 0,
+};
+
+const tls_dtv_offset = switch (builtin.arch) {
+ .mipsel => 0x8000,
+ .riscv32, .riscv64 => 0x800,
+ else => 0,
+};
+
+// Per-thread storage for Zig's use
+const CustomData = packed struct {};
+
+// Dynamic Thread Vector
+const DTV = packed struct {
+ entries: usize,
+ tls_block: [1]usize,
+};
+
+// Holds all the information about the process TLS image
+const TLSImage = struct {
+ data_src: []u8,
+ alloc_size: usize,
+ tcb_offset: usize,
+ dtv_offset: usize,
+ data_offset: usize,
+};
+
+pub var tls_image: ?TLSImage = null;
+
+pub fn setThreadPointer(addr: usize) void {
+ switch (builtin.arch) {
+ .x86_64 => {
+ const rc = std.os.linux.syscall2(std.os.linux.SYS_arch_prctl, std.os.linux.ARCH_SET_FS, addr);
+ assert(rc == 0);
+ },
+ .aarch64 => {
+ asm volatile (
+ \\ msr tpidr_el0, %[addr]
+ :
+ : [addr] "r" (addr)
+ );
+ },
+ .arm => |arm| {
+ const rc = std.os.linux.syscall1(std.os.linux.SYS_set_tls, addr);
+ assert(rc == 0);
+ },
+ .riscv64 => {
+ asm volatile (
+ \\ mv tp, %[addr]
+ :
+ : [addr] "r" (addr)
+ );
+ },
+ else => @compileError("Unsupported architecture"),
+ }
+}
+
+pub fn initTLS() ?*elf.Phdr {
+ var tls_phdr: ?*elf.Phdr = null;
+ var img_base: usize = 0;
+
+ const auxv = std.os.linux.elf_aux_maybe.?;
+ var at_phent: usize = undefined;
+ var at_phnum: usize = undefined;
+ var at_phdr: usize = undefined;
+ var at_hwcap: usize = undefined;
+
+ var i: usize = 0;
+ while (auxv[i].a_type != std.elf.AT_NULL) : (i += 1) {
+ switch (auxv[i].a_type) {
+ elf.AT_PHENT => at_phent = auxv[i].a_un.a_val,
+ elf.AT_PHNUM => at_phnum = auxv[i].a_un.a_val,
+ elf.AT_PHDR => at_phdr = auxv[i].a_un.a_val,
+ elf.AT_HWCAP => at_hwcap = auxv[i].a_un.a_val,
+ else => continue,
+ }
+ }
+
+ // Sanity check
+ assert(at_phent == @sizeOf(elf.Phdr));
+
+ // Search the TLS section
+ const phdrs = (@intToPtr([*]elf.Phdr, at_phdr))[0..at_phnum];
+
+ var gnu_stack: ?*elf.Phdr = null;
+
+ for (phdrs) |*phdr| {
+ switch (phdr.p_type) {
+ elf.PT_PHDR => img_base = at_phdr - phdr.p_vaddr,
+ elf.PT_TLS => tls_phdr = phdr,
+ elf.PT_GNU_STACK => gnu_stack = phdr,
+ else => continue,
+ }
+ }
+
+ if (tls_phdr) |phdr| {
+ // If the cpu is arm-based, check if it supports the TLS register
+ if (builtin.arch == builtin.Arch.arm and at_hwcap & std.os.linux.HWCAP_TLS == 0) {
+ // If the CPU does not support TLS via a coprocessor register,
+ // a kernel helper function can be used instead on certain linux kernels.
+ // See linux/arch/arm/include/asm/tls.h and musl/src/thread/arm/__set_thread_area.c.
+ @panic("TODO: Implement ARM fallback TLS functionality");
+ }
+
+ // Offsets into the allocated TLS area
+ var tcb_offset: usize = undefined;
+ var dtv_offset: usize = undefined;
+ var data_offset: usize = undefined;
+ var thread_data_offset: usize = undefined;
+ // Compute the total size of the ABI-specific data plus our own control
+ // structures
+ const alloc_size = switch (tls_variant) {
+ .VariantI => blk: {
+ var l: usize = 0;
+ dtv_offset = l;
+ l += @sizeOf(DTV);
+ thread_data_offset = l;
+ l += @sizeOf(CustomData);
+ l = mem.alignForward(l, phdr.p_align);
+ tcb_offset = l;
+ if (tls_tcb_align_size) {
+ l += mem.alignForward(tls_tcb_size, phdr.p_align);
+ } else {
+ l += tls_tcb_size;
+ }
+ data_offset = l;
+ l += phdr.p_memsz;
+ break :blk l;
+ },
+ .VariantII => blk: {
+ var l: usize = 0;
+ data_offset = l;
+ l += phdr.p_memsz;
+ l = mem.alignForward(l, phdr.p_align);
+ tcb_offset = l;
+ l += tls_tcb_size;
+ thread_data_offset = l;
+ l += @sizeOf(CustomData);
+ dtv_offset = l;
+ l += @sizeOf(DTV);
+ break :blk l;
+ },
+ };
+
+ tls_image = TLSImage{
+ .data_src = @intToPtr([*]u8, phdr.p_vaddr + img_base)[0..phdr.p_filesz],
+ .alloc_size = alloc_size,
+ .tcb_offset = tcb_offset,
+ .dtv_offset = dtv_offset,
+ .data_offset = data_offset,
+ };
+ }
+
+ return gnu_stack;
+}
+
+pub fn copyTLS(addr: usize) usize {
+ const tls_img = tls_image.?;
+
+ // Be paranoid, clear the area we're going to use
+ @memset(@intToPtr([*]u8, addr), 0, tls_img.alloc_size);
+ // Prepare the DTV
+ const dtv = @intToPtr(*DTV, addr + tls_img.dtv_offset);
+ dtv.entries = 1;
+ dtv.tls_block[0] = addr + tls_img.data_offset + tls_dtv_offset;
+ // Set-up the TCB
+ const tcb_ptr = @intToPtr(*usize, addr + tls_img.tcb_offset);
+ if (tls_variant == TLSVariant.VariantI) {
+ tcb_ptr.* = addr + tls_img.dtv_offset;
+ } else {
+ tcb_ptr.* = addr + tls_img.tcb_offset;
+ }
+ // Copy the data
+ @memcpy(@intToPtr([*]u8, addr + tls_img.data_offset), tls_img.data_src.ptr, tls_img.data_src.len);
+
+ // Return the corrected (if needed) value for the tp register
+ return addr + tls_tp_offset +
+ if (tls_tp_points_past_tcb) tls_img.data_offset else tls_img.tcb_offset;
+}
+
+var main_thread_tls_buffer: [256]u8 align(32) = undefined;
+
+pub fn allocateTLS(size: usize) usize {
+ // Small TLS allocation, use our local buffer
+ if (size < main_thread_tls_buffer.len) {
+ return @ptrToInt(&main_thread_tls_buffer);
+ }
+
+ const slice = os.mmap(
+ null,
+ size,
+ os.PROT_READ | os.PROT_WRITE,
+ os.MAP_PRIVATE | os.MAP_ANONYMOUS,
+ -1,
+ 0,
+ ) catch @panic("out of memory");
+
+ return @ptrToInt(slice.ptr);
+}
diff --git a/lib/std/os/linux/vdso.zig b/lib/std/os/linux/vdso.zig
new file mode 100644
index 0000000000..86d54bfbf8
--- /dev/null
+++ b/lib/std/os/linux/vdso.zig
@@ -0,0 +1,91 @@
+const std = @import("../../std.zig");
+const elf = std.elf;
+const linux = std.os.linux;
+const mem = std.mem;
+const maxInt = std.math.maxInt;
+
+pub fn lookup(vername: []const u8, name: []const u8) usize {
+ const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR);
+ if (vdso_addr == 0) return 0;
+
+ const eh = @intToPtr(*elf.Ehdr, vdso_addr);
+ var ph_addr: usize = vdso_addr + eh.e_phoff;
+ const ph = @intToPtr(*elf.Phdr, ph_addr);
+
+ var maybe_dynv: ?[*]usize = null;
+ var base: usize = maxInt(usize);
+ {
+ var i: usize = 0;
+ while (i < eh.e_phnum) : ({
+ i += 1;
+ ph_addr += eh.e_phentsize;
+ }) {
+ const this_ph = @intToPtr(*elf.Phdr, ph_addr);
+ switch (this_ph.p_type) {
+ elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr,
+ elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, vdso_addr + this_ph.p_offset),
+ else => {},
+ }
+ }
+ }
+ const dynv = maybe_dynv orelse return 0;
+ if (base == maxInt(usize)) return 0;
+
+ var maybe_strings: ?[*]u8 = null;
+ var maybe_syms: ?[*]elf.Sym = null;
+ var maybe_hashtab: ?[*]linux.Elf_Symndx = null;
+ var maybe_versym: ?[*]u16 = null;
+ var maybe_verdef: ?*elf.Verdef = null;
+
+ {
+ var i: usize = 0;
+ while (dynv[i] != 0) : (i += 2) {
+ const p = base + dynv[i + 1];
+ switch (dynv[i]) {
+ elf.DT_STRTAB => maybe_strings = @intToPtr([*]u8, p),
+ elf.DT_SYMTAB => maybe_syms = @intToPtr([*]elf.Sym, p),
+ elf.DT_HASH => maybe_hashtab = @intToPtr([*]linux.Elf_Symndx, p),
+ elf.DT_VERSYM => maybe_versym = @intToPtr([*]u16, p),
+ elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
+ else => {},
+ }
+ }
+ }
+
+ const strings = maybe_strings orelse return 0;
+ const syms = maybe_syms orelse return 0;
+ const hashtab = maybe_hashtab orelse return 0;
+ if (maybe_verdef == null) maybe_versym = null;
+
+ const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
+ const OK_BINDS = (1 << elf.STB_GLOBAL | 1 << elf.STB_WEAK | 1 << elf.STB_GNU_UNIQUE);
+
+ var i: usize = 0;
+ while (i < hashtab[1]) : (i += 1) {
+ if (0 == (u32(1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue;
+ if (0 == (u32(1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue;
+ if (0 == syms[i].st_shndx) continue;
+ if (!mem.eql(u8, name, mem.toSliceConst(u8, strings + syms[i].st_name))) continue;
+ if (maybe_versym) |versym| {
+ if (!checkver(maybe_verdef.?, versym[i], vername, strings))
+ continue;
+ }
+ return base + syms[i].st_value;
+ }
+
+ return 0;
+}
+
+fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
+ var def = def_arg;
+ const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+ while (true) {
+ if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
+ break;
+ if (def.vd_next == 0)
+ return false;
+ def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
+ }
+ const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
+ return mem.eql(u8, vername, mem.toSliceConst(u8, strings + aux.vda_name));
+}
diff --git a/lib/std/os/linux/x86_64.zig b/lib/std/os/linux/x86_64.zig
new file mode 100644
index 0000000000..0f3a36636a
--- /dev/null
+++ b/lib/std/os/linux/x86_64.zig
@@ -0,0 +1,97 @@
+usingnamespace @import("../bits.zig");
+
+pub fn syscall0(number: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall1(number: usize, arg1: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4),
+ [arg5] "{r8}" (arg5)
+ : "rcx", "r11", "memory"
+ );
+}
+
+pub fn syscall6(
+ number: usize,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
+ return asm volatile ("syscall"
+ : [ret] "={rax}" (-> usize)
+ : [number] "{rax}" (number),
+ [arg1] "{rdi}" (arg1),
+ [arg2] "{rsi}" (arg2),
+ [arg3] "{rdx}" (arg3),
+ [arg4] "{r10}" (arg4),
+ [arg5] "{r8}" (arg5),
+ [arg6] "{r9}" (arg6)
+ : "rcx", "r11", "memory"
+ );
+}
+
+/// This matches the libc clone function.
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
+
+pub nakedcc fn restore_rt() void {
+ return asm volatile ("syscall"
+ :
+ : [number] "{rax}" (usize(SYS_rt_sigreturn))
+ : "rcx", "r11"
+ );
+}