aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/tsan/interception/interception.h304
-rw-r--r--lib/tsan/interception/interception_linux.h53
-rw-r--r--lib/tsan/interception/interception_mac.h27
-rw-r--r--lib/tsan/interception/interception_win.h83
-rw-r--r--lib/tsan/sanitizer_common/sancov_flags.h39
-rw-r--r--lib/tsan/sanitizer_common/sancov_flags.inc20
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_addrhashmap.h353
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator.cpp267
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator.h81
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_bytemap.h107
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_checks.h76
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_combined.h201
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_interface.h47
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_internal.h55
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_local_cache.h264
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_primary32.h380
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_primary64.h863
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_report.h39
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_secondary.h326
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_size_class_map.h241
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_allocator_stats.h106
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_asm.h68
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic.h86
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic_clang.h105
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic_clang_mips.h117
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic_clang_other.h97
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic_clang_x86.h113
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_atomic_msvc.h256
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_bitvector.h350
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_bvgraph.h164
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common.cpp348
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common.h1002
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interceptors.inc10175
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interceptors_format.inc562
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interceptors_ioctl.inc609
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc128
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interface.inc41
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_interface_posix.inc13
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_common_syscalls.inc2914
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_coverage_interface.inc33
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_dbghelp.h41
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_deadlock_detector.h410
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_deadlock_detector1.cpp194
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_deadlock_detector2.cpp423
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_deadlock_detector_interface.h92
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_errno.cpp34
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_errno.h39
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_errno_codes.h33
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_file.cpp215
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_file.h106
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_flag_parser.cpp191
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_flag_parser.h204
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_flags.cpp129
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_flags.h67
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_flags.inc250
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_freebsd.h137
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_fuchsia.cpp531
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_fuchsia.h36
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_getauxval.h59
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_glibc_version.h26
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_hash.h43
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc1535
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_interface_internal.h118
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_internal_defs.h459
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_lfstack.h72
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_libc.cpp280
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_libc.h85
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_libignore.cpp129
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_libignore.h115
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_linux.cpp2269
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_linux.h162
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_linux_s390.cpp222
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_list.h166
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_local_address_space_view.h76
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_mac.cpp1228
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_mac.h84
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_malloc_mac.inc420
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_mutex.h223
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_netbsd.cpp343
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_openbsd.cpp115
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_persistent_allocator.cpp18
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_persistent_allocator.h71
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_placement_new.h24
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform.h364
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_interceptors.h609
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.cpp531
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.h656
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_linux.cpp108
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.cpp2586
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.h2422
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.cpp279
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.h382
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.cpp1275
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.h1453
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.cpp366
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.h495
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_posix.cpp398
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_posix.h125
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_printf.cpp358
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps.h100
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_bsd.cpp139
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_common.cpp174
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_fuchsia.cpp80
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_linux.cpp81
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_mac.cpp379
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_procmaps_solaris.cpp67
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_ptrauth.h21
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_quarantine.h317
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_report_decorator.h48
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_ring_buffer.h161
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_rtems.cpp283
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_rtems.h20
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_signal_interceptors.inc86
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_solaris.cpp230
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stackdepot.h71
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stackdepotbase.h177
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stacktrace.h176
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.h71
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stoptheworld.h64
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp42
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_stoptheworld_mac.cpp182
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_suppressions.cpp181
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_suppressions.h56
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer.h223
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer_fuchsia.h42
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer_internal.h165
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer_libbacktrace.h49
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.h48
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_symbolizer_rtems.h40
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_syscall_generic.inc38
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_syscall_linux_aarch64.inc137
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_syscall_linux_arm.inc137
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_syscall_linux_x86_64.inc90
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_syscalls_netbsd.inc3837
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_termination.cpp94
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_thread_registry.cpp351
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_thread_registry.h160
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_tls_get_addr.cpp154
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_tls_get_addr.h62
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_type_traits.cpp20
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_type_traits.h62
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_vector.h124
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_win.cpp1129
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_win.h25
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_win_defs.h174
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.h181
-rw-r--r--lib/tsan/sanitizer_common/sanitizer_win_weak_interception.h32
-rw-r--r--lib/tsan/tsan_clock.cpp655
-rw-r--r--lib/tsan/tsan_clock.h283
-rw-r--r--lib/tsan/tsan_debugging.cpp262
-rw-r--r--lib/tsan/tsan_defs.h195
-rw-r--r--lib/tsan/tsan_dense_alloc.h141
-rw-r--r--lib/tsan/tsan_external.cpp124
-rw-r--r--lib/tsan/tsan_fd.cpp316
-rw-r--r--lib/tsan/tsan_fd.h64
-rw-r--r--lib/tsan/tsan_flags.cpp125
-rw-r--r--lib/tsan/tsan_flags.h34
-rw-r--r--lib/tsan/tsan_flags.inc85
-rw-r--r--lib/tsan/tsan_ignoreset.cpp46
-rw-r--r--lib/tsan/tsan_ignoreset.h37
-rw-r--r--lib/tsan/tsan_interceptors.h76
-rw-r--r--lib/tsan/tsan_interceptors_posix.cpp2854
-rw-r--r--lib/tsan/tsan_interface.cpp160
-rw-r--r--lib/tsan/tsan_interface.h422
-rw-r--r--lib/tsan/tsan_interface_ann.cpp552
-rw-r--r--lib/tsan/tsan_interface_ann.h32
-rw-r--r--lib/tsan/tsan_interface_atomic.cpp955
-rw-r--r--lib/tsan/tsan_interface_inl.h132
-rw-r--r--lib/tsan/tsan_interface_java.cpp267
-rw-r--r--lib/tsan/tsan_interface_java.h99
-rw-r--r--lib/tsan/tsan_malloc_mac.cpp71
-rw-r--r--lib/tsan/tsan_md5.cpp250
-rw-r--r--lib/tsan/tsan_mman.cpp405
-rw-r--r--lib/tsan/tsan_mman.h89
-rw-r--r--lib/tsan/tsan_mutex.cpp289
-rw-r--r--lib/tsan/tsan_mutex.h90
-rw-r--r--lib/tsan/tsan_mutexset.cpp88
-rw-r--r--lib/tsan/tsan_mutexset.h69
-rw-r--r--lib/tsan/tsan_platform.h1028
-rw-r--r--lib/tsan/tsan_ppc_regs.h96
-rw-r--r--lib/tsan/tsan_preinit.cpp26
-rw-r--r--lib/tsan/tsan_report.cpp486
-rw-r--r--lib/tsan/tsan_report.h135
-rw-r--r--lib/tsan/tsan_rtl.cpp1127
-rw-r--r--lib/tsan/tsan_rtl.h893
-rw-r--r--lib/tsan/tsan_rtl_mutex.cpp562
-rw-r--r--lib/tsan/tsan_rtl_proc.cpp60
-rw-r--r--lib/tsan/tsan_rtl_report.cpp754
-rw-r--r--lib/tsan/tsan_rtl_thread.cpp462
-rw-r--r--lib/tsan/tsan_stack_trace.cpp63
-rw-r--r--lib/tsan/tsan_stack_trace.h42
-rw-r--r--lib/tsan/tsan_stat.cpp186
-rw-r--r--lib/tsan/tsan_stat.h191
-rw-r--r--lib/tsan/tsan_suppressions.cpp161
-rw-r--r--lib/tsan/tsan_suppressions.h37
-rw-r--r--lib/tsan/tsan_symbolize.cpp122
-rw-r--r--lib/tsan/tsan_symbolize.h30
-rw-r--r--lib/tsan/tsan_sync.cpp296
-rw-r--r--lib/tsan/tsan_sync.h145
-rw-r--r--lib/tsan/tsan_trace.h75
-rw-r--r--lib/tsan/tsan_update_shadow_word_inl.h69
-rw-r--r--lib/tsan/ubsan/ubsan_flags.h48
-rw-r--r--lib/tsan/ubsan/ubsan_flags.inc28
-rw-r--r--lib/tsan/ubsan/ubsan_init.h33
-rw-r--r--lib/tsan/ubsan/ubsan_platform.h25
205 files changed, 72021 insertions, 0 deletions
diff --git a/lib/tsan/interception/interception.h b/lib/tsan/interception/interception.h
new file mode 100644
index 0000000000..d27a8ccf92
--- /dev/null
+++ b/lib/tsan/interception/interception.h
@@ -0,0 +1,304 @@
+//===-- interception.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Machinery for providing replacements/wrappers for system functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCEPTION_H
+#define INTERCEPTION_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
+ !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \
+ !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
+# error "Interception doesn't work on this operating system."
+#endif
+
+// These typedefs should be used only in the interceptor definitions to replace
+// the standard system types (e.g. SSIZE_T instead of ssize_t)
+typedef __sanitizer::uptr SIZE_T;
+typedef __sanitizer::sptr SSIZE_T;
+typedef __sanitizer::sptr PTRDIFF_T;
+typedef __sanitizer::s64 INTMAX_T;
+typedef __sanitizer::u64 UINTMAX_T;
+typedef __sanitizer::OFF_T OFF_T;
+typedef __sanitizer::OFF64_T OFF64_T;
+
+// How to add an interceptor:
+// Suppose you need to wrap/replace system function (generally, from libc):
+// int foo(const char *bar, double baz);
+// You'll need to:
+// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
+// your source file. See the notes below for cases when
+// INTERCEPTOR_WITH_SUFFIX(...) should be used instead.
+// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
+// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
+// intercepted successfully.
+// You can access original function by calling REAL(foo)(bar, baz).
+// By default, REAL(foo) will be visible only inside your interceptor, and if
+// you want to use it in other parts of RTL, you'll need to:
+// 3a) add DECLARE_REAL(int, foo, const char*, double) to a
+// header file.
+// However, if the call "INTERCEPT_FUNCTION(foo)" and definition for
+// INTERCEPTOR(..., foo, ...) are in different files, you'll instead need to:
+// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
+// to a header file.
+
+// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or
+// DECLARE_REAL(...) are located inside namespaces.
+// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to
+// effectively redirect calls from "foo" to "zoo". In this case
+// you aren't required to implement
+// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
+// but instead you'll have to add
+// DECLARE_REAL(int, foo, const char *bar, double baz) in your
+// source file (to define a pointer to overriden function).
+// 3. Some Mac functions have symbol variants discriminated by
+// additional suffixes, e.g. _$UNIX2003 (see
+// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html
+// for more details). To intercept such functions you need to use the
+// INTERCEPTOR_WITH_SUFFIX(...) macro.
+
+// How it works:
+// To replace system functions on Linux we just need to declare functions
+// with same names in our library and then obtain the real function pointers
+// using dlsym().
+// There is one complication. A user may also intercept some of the functions
+// we intercept. To resolve this we declare our interceptors with __interceptor_
+// prefix, and then make actual interceptors weak aliases to __interceptor_
+// functions.
+//
+// This is not so on Mac OS, where the two-level namespace makes
+// our replacement functions invisible to other libraries. This may be overcomed
+// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
+// libraries in Chromium were noticed when doing so.
+// Instead we create a dylib containing a __DATA,__interpose section that
+// associates library functions with their wrappers. When this dylib is
+// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
+// the calls to interposed functions done through stubs to the wrapper
+// functions.
+// As it's decided at compile time which functions are to be intercepted on Mac,
+// INTERCEPT_FUNCTION() is effectively a no-op on this system.
+
+#if SANITIZER_MAC
+#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
+
+// Just a pair of pointers.
+struct interpose_substitution {
+ const __sanitizer::uptr replacement;
+ const __sanitizer::uptr original;
+};
+
+// For a function foo() create a global pair of pointers { wrap_foo, foo } in
+// the __DATA,__interpose section.
+// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
+#define INTERPOSER(func_name) __attribute__((used)) \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(WRAP(func_name)), \
+ reinterpret_cast<const uptr>(func_name) } \
+}
+
+// For a function foo() and a wrapper function bar() create a global pair
+// of pointers { bar, foo } in the __DATA,__interpose section.
+// As a result all the calls to foo() will be routed to bar() at runtime.
+#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(wrapper_name), \
+ reinterpret_cast<const uptr>(func_name) } \
+}
+
+# define WRAP(x) wrap_##x
+# define WRAPPER_NAME(x) "wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE
+# define DECLARE_WRAPPER(ret_type, func, ...)
+
+#elif SANITIZER_WINDOWS
+# define WRAP(x) __asan_wrap_##x
+# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__);
+# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
+ extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif SANITIZER_RTEMS
+# define WRAP(x) x
+# define WRAPPER_NAME(x) #x
+# define INTERCEPTOR_ATTRIBUTE
+# define DECLARE_WRAPPER(ret_type, func, ...)
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
+// priority than weak ones so weak aliases won't work for indirect calls
+// in position-independent (-fPIC / -fPIE) mode.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
+#elif !SANITIZER_FUCHSIA
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+#endif
+
+#if SANITIZER_FUCHSIA
+// There is no general interception at all on Fuchsia.
+// Sanitizer runtimes just define functions directly to preempt them,
+// and have bespoke ways to access the underlying libc functions.
+# include <zircon/sanitizer.h>
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define REAL(x) __unsanitized_##x
+# define DECLARE_REAL(ret_type, func, ...)
+#elif SANITIZER_RTEMS
+# define REAL(x) __real_ ## x
+# define DECLARE_REAL(ret_type, func, ...) \
+ extern "C" ret_type REAL(func)(__VA_ARGS__);
+#elif !SANITIZER_MAC
+# define PTR_TO_REAL(x) real_##x
+# define REAL(x) __interception::PTR_TO_REAL(x)
+# define FUNC_TYPE(x) x##_type
+
+# define DECLARE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
+#else // SANITIZER_MAC
+# define REAL(x) x
+# define DECLARE_REAL(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__);
+# define ASSIGN_REAL(x, y)
+#endif // SANITIZER_MAC
+
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+ DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__);
+// Declare an interceptor and its wrapper defined in a different translation
+// unit (ex. asm).
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ extern "C" ret_type func(__VA_ARGS__);
+#else
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
+#endif
+
+// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR
+// macros does its job. In exceptional cases you may need to call REAL(foo)
+// without defining INTERCEPTOR(..., foo, ...). For example, if you override
+// foo with an interceptor for other function.
+#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+# define DEFINE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+#else
+# define DEFINE_REAL(ret_type, func, ...)
+#endif
+
+#if SANITIZER_FUCHSIA
+
+// We need to define the __interceptor_func name just to get
+// sanitizer_common/scripts/gen_dynamic_list.py to export func.
+// But we don't need to export __interceptor_func to get that.
+#define INTERCEPTOR(ret_type, func, ...) \
+ extern "C"[[ gnu::alias(#func), gnu::visibility("hidden") ]] ret_type \
+ __interceptor_##func(__VA_ARGS__); \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
+
+#elif !SANITIZER_MAC
+
+#define INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" \
+ INTERCEPTOR_ATTRIBUTE \
+ ret_type WRAP(func)(__VA_ARGS__)
+
+// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
+#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
+ INTERCEPTOR(ret_type, func, __VA_ARGS__)
+
+#else // SANITIZER_MAC
+
+#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) suffix; \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ INTERPOSER(func); \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
+
+#define INTERCEPTOR(ret_type, func, ...) \
+ INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)
+
+#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
+ INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)
+
+// Override |overridee| with |overrider|.
+#define OVERRIDE_FUNCTION(overridee, overrider) \
+ INTERPOSER_2(overridee, WRAP(overrider))
+#endif
+
+#if SANITIZER_WINDOWS
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+ typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ } \
+ extern "C" \
+ INTERCEPTOR_ATTRIBUTE \
+ ret_type __stdcall WRAP(func)(__VA_ARGS__)
+#endif
+
+// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
+// so we use casting via an integral type __interception::uptr,
+// assuming that system is POSIX-compliant. Using other hacks seem
+// challenging, as we don't even pass function type to
+// INTERCEPT_FUNCTION macro, only its name.
+namespace __interception {
+#if defined(_WIN64)
+typedef unsigned long long uptr;
+#else
+typedef unsigned long uptr;
+#endif // _WIN64
+} // namespace __interception
+
+#define INCLUDED_FROM_INTERCEPTION_LIB
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+# include "interception_linux.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
+#elif SANITIZER_MAC
+# include "interception_mac.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_MAC(func, symver)
+#elif SANITIZER_WINDOWS
+# include "interception_win.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_WIN(func, symver)
+#endif
+
+#undef INCLUDED_FROM_INTERCEPTION_LIB
+
+#endif // INTERCEPTION_H
diff --git a/lib/tsan/interception/interception_linux.h b/lib/tsan/interception/interception_linux.h
new file mode 100644
index 0000000000..e578da0cf6
--- /dev/null
+++ b/lib/tsan/interception/interception_linux.h
@@ -0,0 +1,53 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_linux.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_LINUX_H
+#define INTERCEPTION_LINUX_H
+
+namespace __interception {
+bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
+ uptr wrapper);
+bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
+ uptr func, uptr wrapper);
+} // namespace __interception
+
+#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
+ ::__interception::InterceptFunction( \
+ #func, \
+ (::__interception::uptr *) & REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
+
+// Android, Solaris and OpenBSD do not have dlvsym
+#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ ::__interception::InterceptFunction( \
+ #func, symver, \
+ (::__interception::uptr *) & REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
+#else
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
+#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+
+#endif // INTERCEPTION_LINUX_H
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_OPENBSD || SANITIZER_SOLARIS
diff --git a/lib/tsan/interception/interception_mac.h b/lib/tsan/interception/interception_mac.h
new file mode 100644
index 0000000000..eddedb8959
--- /dev/null
+++ b/lib/tsan/interception/interception_mac.h
@@ -0,0 +1,27 @@
+//===-- interception_mac.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_MAC
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_mac.h should be included from interception.h only"
+#endif
+
+#ifndef INTERCEPTION_MAC_H
+#define INTERCEPTION_MAC_H
+
+#define INTERCEPT_FUNCTION_MAC(func)
+#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
+
+#endif // INTERCEPTION_MAC_H
+#endif // SANITIZER_MAC
diff --git a/lib/tsan/interception/interception_win.h b/lib/tsan/interception/interception_win.h
new file mode 100644
index 0000000000..4590013019
--- /dev/null
+++ b/lib/tsan/interception/interception_win.h
@@ -0,0 +1,83 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_WINDOWS
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_win.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_WIN_H
+#define INTERCEPTION_WIN_H
+
+namespace __interception {
+// All the functions in the OverrideFunction() family return true on success,
+// false on failure (including "couldn't find the function").
+
+// Overrides a function by its address.
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
+
+// Overrides a function in a system DLL or DLL CRT by its exported name.
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
+
+// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
+uptr InternalGetProcAddress(void *module, const char *func_name);
+
+// Overrides a function only when it is called from a specific DLL. For example,
+// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
+// affecting other third party libraries.
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func);
+
+#if !SANITIZER_WINDOWS64
+// Exposed for unittests
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+#endif
+
+// Exposed for unittests
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+
+// Exposed for unittests
+void TestOnlyReleaseTrampolineRegions();
+
+} // namespace __interception
+
+#if defined(INTERCEPTION_DYNAMIC_CRT)
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction(#func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+#else
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((::__interception::uptr)func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+#endif
+
+#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
+
+#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
+ ::__interception::OverrideImportedFunction( \
+ user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+
+#endif // INTERCEPTION_WIN_H
+#endif // SANITIZER_WINDOWS
diff --git a/lib/tsan/sanitizer_common/sancov_flags.h b/lib/tsan/sanitizer_common/sancov_flags.h
new file mode 100644
index 0000000000..95d4ee5ca4
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sancov_flags.h
@@ -0,0 +1,39 @@
+//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAGS_H
+#define SANCOV_FLAGS_H
+
+#include "sanitizer_flag_parser.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sancov {
+
+struct SancovFlags {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+
+ void SetDefaults();
+};
+
+extern SancovFlags sancov_flags_dont_use_directly;
+
+inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
+
+void InitializeSancovFlags();
+
+} // namespace __sancov
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
+__sancov_default_options();
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sancov_flags.inc b/lib/tsan/sanitizer_common/sancov_flags.inc
new file mode 100644
index 0000000000..cca33fc359
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sancov_flags.inc
@@ -0,0 +1,20 @@
+//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAG
+#error "Defnine SANCOV_FLAG prior to including this file!"
+#endif
+
+SANCOV_FLAG(bool, symbolize, true,
+ "If set, converage information will be symbolized by sancov tool "
+ "after dumping.")
+
+SANCOV_FLAG(bool, help, false, "Print flags help.")
diff --git a/lib/tsan/sanitizer_common/sanitizer_addrhashmap.h b/lib/tsan/sanitizer_common/sanitizer_addrhashmap.h
new file mode 100644
index 0000000000..a033e788cb
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_addrhashmap.h
@@ -0,0 +1,353 @@
+//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Concurrent uptr->T hashmap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ADDRHASHMAP_H
+#define SANITIZER_ADDRHASHMAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+// Concurrent uptr->T hashmap.
+// T must be a POD type, kSize is preferably a prime but can be any number.
+// Usage example:
+//
+// typedef AddrHashMap<uptr, 11> Map;
+// Map m;
+// {
+// Map::Handle h(&m, addr);
+// use h.operator->() to access the data
+// if h.created() then the element was just created, and the current thread
+// has exclusive access to it
+// otherwise the current thread has only read access to the data
+// }
+// {
+// Map::Handle h(&m, addr, true);
+// this will remove the data from the map in Handle dtor
+// the current thread has exclusive access to the data
+// if !h.exists() then the element never existed
+// }
+template<typename T, uptr kSize>
+class AddrHashMap {
+ private:
+ struct Cell {
+ atomic_uintptr_t addr;
+ T val;
+ };
+
+ struct AddBucket {
+ uptr cap;
+ uptr size;
+ Cell cells[1]; // variable len
+ };
+
+ static const uptr kBucketSize = 3;
+
+ struct Bucket {
+ RWMutex mtx;
+ atomic_uintptr_t add;
+ Cell cells[kBucketSize];
+ };
+
+ public:
+ AddrHashMap();
+
+ class Handle {
+ public:
+ Handle(AddrHashMap<T, kSize> *map, uptr addr);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
+
+ ~Handle();
+ T *operator->();
+ T &operator*();
+ const T &operator*() const;
+ bool created() const;
+ bool exists() const;
+
+ private:
+ friend AddrHashMap<T, kSize>;
+ AddrHashMap<T, kSize> *map_;
+ Bucket *bucket_;
+ Cell *cell_;
+ uptr addr_;
+ uptr addidx_;
+ bool created_;
+ bool remove_;
+ bool create_;
+ };
+
+ private:
+ friend class Handle;
+ Bucket *table_;
+
+ void acquire(Handle *h);
+ void release(Handle *h);
+ uptr calcHash(uptr addr);
+};
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = false;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove, bool create) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = create;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::~Handle() {
+ map_->release(this);
+}
+
+template <typename T, uptr kSize>
+T *AddrHashMap<T, kSize>::Handle::operator->() {
+ return &cell_->val;
+}
+
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::created() const {
+ return created_;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::exists() const {
+ return cell_ != nullptr;
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::AddrHashMap() {
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {
+ uptr addr = h->addr_;
+ uptr hash = calcHash(addr);
+ Bucket *b = &table_[hash];
+
+ h->created_ = false;
+ h->addidx_ = -1U;
+ h->bucket_ = b;
+ h->cell_ = nullptr;
+
+ // If we want to remove the element, we need exclusive access to the bucket,
+ // so skip the lock-free phase.
+ if (h->remove_)
+ goto locked;
+
+ retry:
+ // First try to find an existing element w/o read mutex.
+ CHECK(!h->remove_);
+ // Check the embed cells.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 == addr) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Check the add cells with read lock.
+ if (atomic_load(&b->add, memory_order_relaxed)) {
+ b->mtx.ReadLock();
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ }
+ b->mtx.ReadUnlock();
+ }
+
+ locked:
+ // Re-check existence under write lock.
+ // Embed cells.
+ b->mtx.Lock();
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+
+ // Add cells.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (add) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+ }
+
+ // The element does not exist, no need to create it if we want to remove.
+ if (h->remove_ || !h->create_) {
+ b->mtx.Unlock();
+ return;
+ }
+
+ // Now try to create it under the mutex.
+ h->created_ = true;
+ // See if we have a free embed cell.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == 0) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Store in the add cells.
+ if (!add) {
+ // Allocate a new add array.
+ const uptr kInitSize = 64;
+ add = (AddBucket*)InternalAlloc(kInitSize);
+ internal_memset(add, 0, kInitSize);
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add->size = 0;
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);
+ }
+ if (add->size == add->cap) {
+ // Grow existing add array.
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
+ uptr newsize = oldsize * 2;
+ AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
+ internal_memset(add1, 0, newsize);
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add1->size = add->size;
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
+ InternalFree(add);
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
+ add = add1;
+ }
+ // Store.
+ uptr i = add->size++;
+ Cell *c = &add->cells[i];
+ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
+ h->addidx_ = i;
+ h->cell_ = c;
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::release(Handle *h) {
+ if (!h->cell_)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+}
+
+template<typename T, uptr kSize>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
+ addr += addr << 10;
+ addr ^= addr >> 6;
+ return addr % kSize;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ADDRHASHMAP_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator.cpp b/lib/tsan/sanitizer_common/sanitizer_allocator.cpp
new file mode 100644
index 0000000000..ec77b9cbfe
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator.cpp
@@ -0,0 +1,267 @@
+//===-- sanitizer_allocator.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// This allocator is used inside run-times.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator.h"
+
+#include "sanitizer_allocator_checks.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Default allocator names.
+const char *PrimaryAllocatorName = "SizeClassAllocator";
+const char *SecondaryAllocatorName = "LargeMmapAllocator";
+
+// ThreadSanitizer for Go uses libc malloc/free.
+#if defined(SANITIZER_USE_MALLOC)
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
+extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
+extern "C" void *__libc_realloc(void *ptr, uptr size);
+extern "C" void __libc_free(void *ptr);
+# else
+# include <stdlib.h>
+# define __libc_malloc malloc
+# if !SANITIZER_GO
+static void *__libc_memalign(uptr alignment, uptr size) {
+ void *p;
+ uptr error = posix_memalign(&p, alignment, size);
+ if (error) return nullptr;
+ return p;
+}
+# endif
+# define __libc_realloc realloc
+# define __libc_free free
+# endif
+
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ (void)cache;
+#if !SANITIZER_GO
+ if (alignment == 0)
+ return __libc_malloc(size);
+ else
+ return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ (void)cache;
+ return __libc_realloc(ptr, size);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ (void)cache;
+ __libc_free(ptr);
+}
+
+InternalAllocator *internal_allocator() {
+ return 0;
+}
+
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+
+static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
+static atomic_uint8_t internal_allocator_initialized;
+static StaticSpinMutex internal_alloc_init_mu;
+
+static InternalAllocatorCache internal_allocator_cache;
+static StaticSpinMutex internal_allocator_cache_mu;
+
+InternalAllocator *internal_allocator() {
+ InternalAllocator *internal_allocator_instance =
+ reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
+ if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
+ SpinMutexLock l(&internal_alloc_init_mu);
+ if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
+ 0) {
+ internal_allocator_instance->Init(kReleaseToOSIntervalNever);
+ atomic_store(&internal_allocator_initialized, 1, memory_order_release);
+ }
+ }
+ return internal_allocator_instance;
+}
+
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ if (alignment == 0) alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Allocate(&internal_allocator_cache, size,
+ alignment);
+ }
+ return internal_allocator()->Allocate(cache, size, alignment);
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ uptr alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
+ size, alignment);
+ }
+ return internal_allocator()->Reallocate(cache, ptr, size, alignment);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ if (!cache) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
+ }
+ internal_allocator()->Deallocate(cache, ptr);
+}
+
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+
+const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+
+static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
+ SetAllocatorOutOfMemory();
+ Report("FATAL: %s: internal allocator is out of memory trying to allocate "
+ "0x%zx bytes\n", SanitizerToolName, requested_size);
+ Die();
+}
+
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
+ if (UNLIKELY(!p))
+ ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
+ ((u64*)p)[0] = kBlockMagic;
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
+ if (!addr)
+ return InternalAlloc(size, cache);
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ addr = (char*)addr - sizeof(u64);
+ size = size + sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ void *p = RawInternalRealloc(addr, size, cache);
+ if (UNLIKELY(!p))
+ ReportInternalAllocatorOutOfMemory(size);
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalReallocArray(void *addr, uptr count, uptr size,
+ InternalAllocatorCache *cache) {
+ if (UNLIKELY(CheckForCallocOverflow(count, size))) {
+ Report(
+ "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n",
+ SanitizerToolName, count, size);
+ Die();
+ }
+ return InternalRealloc(addr, count * size, cache);
+}
+
+void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
+ if (UNLIKELY(CheckForCallocOverflow(count, size))) {
+ Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n", SanitizerToolName, count,
+ size);
+ Die();
+ }
+ void *p = InternalAlloc(count * size, cache);
+ if (LIKELY(p))
+ internal_memset(p, 0, count * size);
+ return p;
+}
+
+void InternalFree(void *addr, InternalAllocatorCache *cache) {
+ if (!addr)
+ return;
+ addr = (char*)addr - sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ ((u64*)addr)[0] = 0;
+ RawInternalFree(addr, cache);
+}
+
+// LowLevelAllocator
+constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
+static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
+static LowLevelAllocateCallback low_level_alloc_callback;
+
+void *LowLevelAllocator::Allocate(uptr size) {
+ // Align allocation size.
+ size = RoundUpTo(size, low_level_alloc_min_alignment);
+ if (allocated_end_ - allocated_current_ < (sptr)size) {
+ uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
+ allocated_current_ =
+ (char*)MmapOrDie(size_to_allocate, __func__);
+ allocated_end_ = allocated_current_ + size_to_allocate;
+ if (low_level_alloc_callback) {
+ low_level_alloc_callback((uptr)allocated_current_,
+ size_to_allocate);
+ }
+ }
+ CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
+ void *res = allocated_current_;
+ allocated_current_ += size;
+ return res;
+}
+
+void SetLowLevelAllocateMinAlignment(uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
+}
+
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
+ low_level_alloc_callback = callback;
+}
+
+// Allocator's OOM and other errors handling support.
+
+static atomic_uint8_t allocator_out_of_memory = {0};
+static atomic_uint8_t allocator_may_return_null = {0};
+
+bool IsAllocatorOutOfMemory() {
+ return atomic_load_relaxed(&allocator_out_of_memory);
+}
+
+void SetAllocatorOutOfMemory() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+}
+
+bool AllocatorMayReturnNull() {
+ return atomic_load(&allocator_may_return_null, memory_order_relaxed);
+}
+
+void SetAllocatorMayReturnNull(bool may_return_null) {
+ atomic_store(&allocator_may_return_null, may_return_null,
+ memory_order_relaxed);
+}
+
+void PrintHintAllocatorCannotReturnNull() {
+ Report("HINT: if you don't care about these errors you may set "
+ "allocator_may_return_null=1\n");
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator.h b/lib/tsan/sanitizer_common/sanitizer_allocator.h
new file mode 100644
index 0000000000..23d589888d
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator.h
@@ -0,0 +1,81 @@
+//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_lfstack.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_local_address_space_view.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+// Allows the tools to name their allocations appropriately.
+extern const char *PrimaryAllocatorName;
+extern const char *SecondaryAllocatorName;
+
+// Since flags are immutable and allocator behavior can be changed at runtime
+// (unit tests or ASan on Android are some examples), allocator_may_return_null
+// flag value is cached here and can be altered later.
+bool AllocatorMayReturnNull();
+void SetAllocatorMayReturnNull(bool may_return_null);
+
+// Returns true if allocator detected OOM condition. Can be used to avoid memory
+// hungry operations.
+bool IsAllocatorOutOfMemory();
+// Should be called by a particular allocator when OOM is detected.
+void SetAllocatorOutOfMemory();
+
+void PrintHintAllocatorCannotReturnNull();
+
+// Allocators call these callbacks on mmap/munmap.
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const { }
+};
+
+// Callback type for iterating over chunks.
+typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
+
+INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+}
+
+INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+template<typename T>
+INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ u32 state = *rand_state;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(&state, i + 1)]);
+ *rand_state = state;
+}
+
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_bytemap.h b/lib/tsan/sanitizer_common/sanitizer_allocator_bytemap.h
new file mode 100644
index 0000000000..0084bb62c8
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -0,0 +1,107 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
+class FlatByteMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);
+ return *value_ptr;
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
+
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_checks.h b/lib/tsan/sanitizer_common/sanitizer_allocator_checks.h
new file mode 100644
index 0000000000..fc426f0e74
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_checks.h
@@ -0,0 +1,76 @@
+//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
+// allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_CHECKS_H
+#define SANITIZER_ALLOCATOR_CHECKS_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+// The following is defined in a separate compilation unit to avoid pulling in
+// sanitizer_errno.h in this header, which leads to conflicts when other system
+// headers include errno.h. This is usually the result of an unlikely event,
+// and as such we do not care as much about having it inlined.
+void SetErrnoToENOMEM();
+
+// A common errno setting logic shared by almost all sanitizer allocator APIs.
+INLINE void *SetErrnoOnNull(void *ptr) {
+ if (UNLIKELY(!ptr))
+ SetErrnoToENOMEM();
+ return ptr;
+}
+
+// In case of the check failure, the caller of the following Check... functions
+// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
+// failure handling policy.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment for POSIX implementation,
+// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
+// of alignment.
+INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
+#if SANITIZER_POSIX
+ return alignment != 0 && IsPowerOfTwo(alignment) &&
+ (size & (alignment - 1)) == 0;
+#else
+ return alignment != 0 && size % alignment == 0;
+#endif
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
+ return alignment != 0 && IsPowerOfTwo(alignment) &&
+ (alignment % sizeof(void *)) == 0;
+}
+
+// Returns true if calloc(size, n) call overflows on size*n calculation.
+INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
+ if (!size)
+ return false;
+ uptr max = (uptr)-1L;
+ return (max / size) < n;
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of page_size.
+INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
+ return RoundUpTo(size, page_size) < size;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_CHECKS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_combined.h b/lib/tsan/sanitizer_common/sanitizer_allocator_combined.h
new file mode 100644
index 0000000000..33f89d6d49
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_combined.h
@@ -0,0 +1,201 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator,
+ class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
+class CombinedAllocator {
+ public:
+ using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
+ using SecondaryAllocator =
+ LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
+ LargeMmapAllocatorPtrArray,
+ typename PrimaryAllocator::AddressSpaceView>;
+
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ stats_.InitLinkerInitialized();
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ stats_.Init();
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) {
+ Report("WARNING: %s: CombinedAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, size, alignment);
+ return nullptr;
+ }
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ return res;
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
+ void ForceReleaseToOS() {
+ primary_.ForceReleaseToOS();
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+};
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_interface.h b/lib/tsan/sanitizer_common/sanitizer_allocator_interface.h
new file mode 100644
index 0000000000..c1b27563e2
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_interface.h
@@ -0,0 +1,47 @@
+//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Re-declaration of functions from public sanitizer allocator interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include "sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
+ void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *));
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_malloc_hook(void *ptr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_free_hook(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_purge_allocator();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
+} // extern "C"
+
+#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_internal.h b/lib/tsan/sanitizer_common/sanitizer_allocator_internal.h
new file mode 100644
index 0000000000..32849036fd
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_internal.h
@@ -0,0 +1,55 @@
+//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This allocator is used inside run-times.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERNAL_H
+#define SANITIZER_ALLOCATOR_INTERNAL_H
+
+#include "sanitizer_allocator.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// FIXME: Check if we may use even more compact size class map for internal
+// purposes.
+typedef CompactSizeClassMap InternalSizeClassMap;
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef InternalSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
+
+typedef CombinedAllocator<PrimaryInternalAllocator,
+ LargeMmapAllocatorPtrArrayStatic>
+ InternalAllocator;
+typedef InternalAllocator::AllocatorCache InternalAllocatorCache;
+
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
+ uptr alignment = 0);
+void *InternalRealloc(void *p, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void *InternalReallocArray(void *p, uptr count, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void *InternalCalloc(uptr count, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
+InternalAllocator *internal_allocator();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/tsan/sanitizer_common/sanitizer_allocator_local_cache.h
new file mode 100644
index 0000000000..108dfc231a
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -0,0 +1,264 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(c, allocator, class_id)))
+ return nullptr;
+ DCHECK_GT(c->count, 0);
+ }
+ CompactPtrT chunk = c->chunks[--c->count];
+ stats_.Add(AllocatorStatAllocated, c->class_size);
+ return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ PerClass *c = &per_class_[class_id];
+ InitCache(c);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ while (c->count > 0)
+ Drain(c, allocator, i, c->count);
+ }
+ }
+
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ uptr class_size;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache(PerClass *c) {
+ if (LIKELY(c->max_count))
+ return;
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ const uptr size = Allocator::ClassIdToSize(i);
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
+ c->class_size = size;
+ }
+ DCHECK_NE(c->max_count, 0UL);
+ }
+
+ NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache(c);
+ const uptr num_requested_chunks = c->max_count / 2;
+ if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks)))
+ return false;
+ c->count = num_requested_chunks;
+ return true;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ CHECK_GE(c->count, count);
+ const uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(c, allocator, class_id)))
+ return nullptr;
+ DCHECK_GT(c->count, 0);
+ }
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ PerClass *c = &per_class_[class_id];
+ InitCache(c);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id);
+ c->batch[c->count++] = p;
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ while (c->count > 0)
+ Drain(c, allocator, i);
+ }
+ }
+
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
+ // allocated from kBatchClassID size class (except for those that are needed
+ // for kBatchClassID itself). The goal is to have TransferBatches in a totally
+ // different region of RAM to improve security.
+ static const bool kUseSeparateSizeClassForBatch =
+ Allocator::kUseSeparateSizeClassForBatch;
+
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ uptr class_size;
+ uptr batch_class_id;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache(PerClass *c) {
+ if (LIKELY(c->max_count))
+ return;
+ const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ const uptr size = Allocator::ClassIdToSize(i);
+ const uptr max_cached = TransferBatch::MaxCached(size);
+ c->max_count = 2 * max_cached;
+ c->class_size = size;
+ // Precompute the class id to use to store batches for the current class
+ // id. 0 means the class size is large enough to store a batch within one
+ // of the chunks. If using a separate size class, it will always be
+ // kBatchClassID, except for kBatchClassID itself.
+ if (kUseSeparateSizeClassForBatch) {
+ c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
+ } else {
+ c->batch_class_id = (size <
+ TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
+ batch_class_id : 0;
+ }
+ }
+ DCHECK_NE(c->max_count, 0UL);
+ }
+
+ NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache(c);
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ if (UNLIKELY(!b))
+ return false;
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ return true;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ const uptr count = Min(c->max_count / 2, c->count);
+ const uptr first_idx_to_drain = c->count - count;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ // Failure to allocate a batch while releasing memory is non recoverable.
+ // TODO(alekseys): Figure out how to do it without allocating a new batch.
+ if (UNLIKELY(!b)) {
+ Report("FATAL: Internal error: %s's allocator failed to allocate a "
+ "transfer batch.\n", SanitizerToolName);
+ Die();
+ }
+ b->SetFromArray(&c->batch[first_idx_to_drain], count);
+ c->count -= count;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_primary32.h b/lib/tsan/sanitizer_common/sanitizer_allocator_primary32.h
new file mode 100644
index 0000000000..3b1838b398
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_primary32.h
@@ -0,0 +1,380 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
+// kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+
+struct SizeClassAllocator32FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ kUseSeparateSizeClassForBatch = 2,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator32 {
+ private:
+ static const u64 kTwoLevelByteMapSize1 =
+ (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
+ static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
+
+ public:
+ using AddressSpaceView = typename Params::AddressSpaceView;
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const u64 kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = Params::kRegionSizeLog;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+ using ByteMap = typename conditional<
+ (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
+ FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
+ AddressSpaceView>,
+ TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
+
+ COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
+ (kSpaceSize & (kSpaceSize - 1)) == 0);
+
+ static const bool kRandomShuffleChunks = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
+ static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(void *batch[], uptr count) {
+ DCHECK_LE(count, kMaxNumCached);
+ count_ = count;
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ DCHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) const {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr size) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return (class_id == SizeClassMap::kBatchClassID) ?
+ kBatchSize : SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<Params> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init(s32 release_to_os_interval_ms) {
+ possible_regions.Init();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
+ void ForceReleaseToOS() {
+ // Currently implemented in 64-bit allocator only.
+ }
+
+ void *MapWithCallback(uptr size) {
+ void *res = MmapOrDie(size, PrimaryAllocatorName);
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty()) {
+ if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
+ return nullptr;
+ DCHECK(!sci->free_list.empty());
+ }
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ DCHECK_LT(class_id, kNumClasses);
+ CHECK_GT(b->Count(), 0);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(b);
+ }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
+ mem &= (kSpaceSize - 1);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {}
+
+ static uptr AdditionalSize() { return 0; }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
+ StaticSpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ u32 rand_state;
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
+
+ uptr ComputeRegionId(uptr mem) const {
+ if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
+ mem &= (kSpaceSize - 1);
+ const uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
+ kRegionSize, kRegionSize, PrimaryAllocatorName));
+ if (UNLIKELY(!res))
+ return 0;
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK(IsAligned(res, kRegionSize));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
+ TransferBatch **current_batch, uptr max_count,
+ uptr *pointers_array, uptr count) {
+ // If using a separate class for batches, we do not need to shuffle it.
+ if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
+ class_id != SizeClassMap::kBatchClassID))
+ RandomShuffle(pointers_array, count, &sci->rand_state);
+ TransferBatch *b = *current_batch;
+ for (uptr i = 0; i < count; i++) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
+ if (UNLIKELY(!b))
+ return false;
+ b->Clear();
+ }
+ b->Add((void*)pointers_array[i]);
+ if (b->Count() == max_count) {
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ *current_batch = b;
+ return true;
+ }
+
+ bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ const uptr region = AllocateRegion(stat, class_id);
+ if (UNLIKELY(!region))
+ return false;
+ if (kRandomShuffleChunks)
+ if (UNLIKELY(sci->rand_state == 0))
+ // The random state is initialized from ASLR (PIE) and time.
+ sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
+ const uptr size = ClassIdToSize(class_id);
+ const uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ const uptr max_count = TransferBatch::MaxCached(size);
+ DCHECK_GT(max_count, 0);
+ TransferBatch *b = nullptr;
+ constexpr uptr kShuffleArraySize = 48;
+ uptr shuffle_array[kShuffleArraySize];
+ uptr count = 0;
+ for (uptr i = region; i < region + n_chunks * size; i += size) {
+ shuffle_array[count++] = i;
+ if (count == kShuffleArraySize) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
+ return false;
+ count = 0;
+ }
+ }
+ if (count) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
+ return false;
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ return true;
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_primary64.h b/lib/tsan/sanitizer_common/sanitizer_allocator_primary64.h
new file mode 100644
index 0000000000..1d9a29c70f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_primary64.h
@@ -0,0 +1,863 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ using AddressSpaceView = typename Params::AddressSpaceView;
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
+ CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
+ PrimaryAllocatorName, kSpaceBeg));
+ } else {
+ // Combined allocator expects that an 2^N allocation is always aligned to
+ // 2^N. For this to work, the start of the space needs to be aligned as
+ // high as the largest size class (which also needs to be a power of 2).
+ NonConstSpaceBeg = address_range.InitAligned(
+ TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
+ "SizeClassAllocator: region info");
+ // Check that the RegionInfo array is aligned on the CacheLine size.
+ DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
+ void ForceReleaseToOS() {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+ MaybeReleaseToOS(class_id, true /*force*/);
+ }
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ // Failure to allocate free array space while releasing memory is non
+ // recoverable.
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
+ new_num_freed_chunks))) {
+ Report("FATAL: Internal error: %s's allocator exhausted the free list "
+ "space for size class %zd (%zd bytes).\n", SanitizerToolName,
+ class_id, ClassIdToSize(class_id));
+ Die();
+ }
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->stats.n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id, false /*force*/);
+ }
+
+ NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks)))
+ return false;
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->stats.n_allocated += n_chunks;
+ return true;
+ }
+
+ bool PointerIsMine(const void *p) const {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) const {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->stats.n_allocated - region->stats.n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
+ "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
+ "last released: %6zdK region: 0x%zx\n",
+ region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
+ region->mapped_user >> 10, region->stats.n_allocated,
+ region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
+ rss >> 10, region->rtoi.num_releases,
+ region->rtoi.last_released_bytes >> 10,
+ SpaceBeg() + kRegionSize * class_id);
+ }
+
+ void PrintStats() {
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+
+ uptr total_mapped = 0;
+ uptr total_rss = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user != 0) {
+ total_mapped += region->mapped_user;
+ total_rss += rss_stats[class_id];
+ }
+ n_allocated += region->stats.n_allocated;
+ n_freed += region->stats.n_freed;
+ }
+
+ Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
+ "%zd allocations; remains %zd\n", total_mapped >> 20,
+ total_rss >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ uptr region_allocated_user_size =
+ AddressSpaceView::Load(region)->allocated_user;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region_allocated_user_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ // A packed array of counters. Each counter occupies 2^n bits, enough to store
+ // counter's max_value. Ctor will try to allocate the required buffer via
+ // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
+ // whether the initialization was successful by checking IsAllocated() result.
+ // For the performance sake, none of the accessors check the validity of the
+ // arguments, it is assumed that index is always in [0, n) range and the value
+ // is not incremented past max_value.
+ template<class MemoryMapperT>
+ class PackedCounterArray {
+ public:
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
+ : n(num_counters), memory_mapper(mapper) {
+ CHECK_GT(num_counters, 0);
+ CHECK_GT(max_value, 0);
+ constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's index and offset.
+ uptr counter_size_bits =
+ RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
+ CHECK_LE(counter_size_bits, kMaxCounterBits);
+ counter_size_bits_log = Log2(counter_size_bits);
+ counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
+
+ uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
+ CHECK_GT(packing_ratio, 0);
+ packing_ratio_log = Log2(packing_ratio);
+ bit_offset_mask = packing_ratio - 1;
+
+ buffer_size =
+ (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
+ sizeof(*buffer);
+ buffer = reinterpret_cast<u64*>(
+ memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
+ }
+ ~PackedCounterArray() {
+ if (buffer) {
+ memory_mapper->UnmapPackedCounterArrayBuffer(
+ reinterpret_cast<uptr>(buffer), buffer_size);
+ }
+ }
+
+ bool IsAllocated() const {
+ return !!buffer;
+ }
+
+ u64 GetCount() const {
+ return n;
+ }
+
+ uptr Get(uptr i) const {
+ DCHECK_LT(i, n);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ return (buffer[index] >> bit_offset) & counter_mask;
+ }
+
+ void Inc(uptr i) const {
+ DCHECK_LT(Get(i), counter_mask);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ buffer[index] += 1ULL << bit_offset;
+ }
+
+ void IncRange(uptr from, uptr to) const {
+ DCHECK_LE(from, to);
+ for (uptr i = from; i <= to; i++)
+ Inc(i);
+ }
+
+ private:
+ const u64 n;
+ u64 counter_size_bits_log;
+ u64 counter_mask;
+ u64 packing_ratio_log;
+ u64 bit_offset_mask;
+
+ MemoryMapperT* const memory_mapper;
+ u64 buffer_size;
+ u64* buffer;
+ };
+
+ template<class MemoryMapperT>
+ class FreePagesRangeTracker {
+ public:
+ explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ : memory_mapper(mapper),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
+ in_the_range(false), current_page(0), current_range_start_page(0) {}
+
+ void NextPage(bool freed) {
+ if (freed) {
+ if (!in_the_range) {
+ current_range_start_page = current_page;
+ in_the_range = true;
+ }
+ } else {
+ CloseOpenedRange();
+ }
+ current_page++;
+ }
+
+ void Done() {
+ CloseOpenedRange();
+ }
+
+ private:
+ void CloseOpenedRange() {
+ if (in_the_range) {
+ memory_mapper->ReleasePageRangeToOS(
+ current_range_start_page << page_size_scaled_log,
+ current_page << page_size_scaled_log);
+ in_the_range = false;
+ }
+ }
+
+ MemoryMapperT* const memory_mapper;
+ const uptr page_size_scaled_log;
+ bool in_the_range;
+ uptr current_page;
+ uptr current_range_start_page;
+ };
+
+ // Iterates over the free_array to identify memory pages containing freed
+ // chunks only and returns these pages back to OS.
+ // allocated_pages_count is the total number of pages allocated for the
+ // current bucket.
+ template<class MemoryMapperT>
+ static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
+ uptr free_array_count, uptr chunk_size,
+ uptr allocated_pages_count,
+ MemoryMapperT *memory_mapper) {
+ const uptr page_size = GetPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr full_pages_chunk_count_max;
+ bool same_chunk_count_per_page;
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Same number of chunks per page, no cross overs.
+ full_pages_chunk_count_max = page_size / chunk_size;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
+ chunk_size % (page_size % chunk_size) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 2;
+ same_chunk_count_per_page = false;
+ } else if (chunk_size > page_size && chunk_size % page_size == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ full_pages_chunk_count_max = 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size > page_size) {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ full_pages_chunk_count_max = 2;
+ same_chunk_count_per_page = false;
+ } else {
+ UNREACHABLE("All chunk_size/page_size ratios must be handled.");
+ }
+
+ PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
+ full_pages_chunk_count_max,
+ memory_mapper);
+ if (!counters.IsAllocated())
+ return;
+
+ const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
+ const uptr page_size_scaled = page_size >> kCompactPtrScale;
+ const uptr page_size_scaled_log = Log2(page_size_scaled);
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Each chunk affects one page only.
+ for (uptr i = 0; i < free_array_count; i++)
+ counters.Inc(free_array[i] >> page_size_scaled_log);
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (uptr i = 0; i < free_array_count; i++) {
+ counters.IncRange(
+ free_array[i] >> page_size_scaled_log,
+ (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ if (same_chunk_count_per_page) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr i = 0; i < counters.GetCount(); i++)
+ range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
+ } else {
+ // Show path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr pn =
+ chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
+ const uptr pnc = pn * chunk_size_scaled;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr prev_page_boundary = 0;
+ uptr current_boundary = 0;
+ for (uptr i = 0; i < counters.GetCount(); i++) {
+ uptr page_boundary = prev_page_boundary + page_size_scaled;
+ uptr chunks_per_page = pn;
+ if (current_boundary < page_boundary) {
+ if (current_boundary > prev_page_boundary)
+ chunks_per_page++;
+ current_boundary += pnc;
+ if (current_boundary < page_boundary) {
+ chunks_per_page++;
+ current_boundary += chunk_size_scaled;
+ }
+ }
+ prev_page_boundary = page_boundary;
+
+ range_tracker.NextPage(counters.Get(i) == chunks_per_page);
+ }
+ }
+ range_tracker.Done();
+ }
+
+ private:
+ friend class MemoryMapper;
+
+ ReservedAddressRange address_range;
+
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct Stats {
+ uptr n_allocated;
+ uptr n_freed;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ u64 last_release_at_ns;
+ u64 last_released_bytes;
+ };
+
+ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ bool exhausted; // Whether region is out of space for new chunks.
+ Stats stats;
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
+
+ RegionInfo *GetRegionInfo(uptr class_id) const {
+ DCHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) const {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) const {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) const {
+ return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
+ }
+
+ bool MapWithCallback(uptr beg, uptr size, const char *name) {
+ uptr mapped = address_range.Map(beg, size, name);
+ if (UNLIKELY(!mapped))
+ return false;
+ CHECK_EQ(beg, mapped);
+ MapUnmapCallback().OnMap(beg, size);
+ return true;
+ }
+
+ void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
+ CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ address_range.Unmap(beg, size);
+ }
+
+ bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ CHECK_LE(new_mapped_free_array, kFreeArraySize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
+ "SizeClassAllocator: freearray")))
+ return false;
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ return true;
+ }
+
+ // Check whether this size class is exhausted.
+ bool IsRegionExhausted(RegionInfo *region, uptr class_id,
+ uptr additional_map_size) {
+ if (LIKELY(region->mapped_user + region->mapped_meta +
+ additional_map_size <= kRegionSize - kFreeArraySize))
+ return false;
+ if (!region->exhausted) {
+ region->exhausted = true;
+ Printf("%s: Out of memory. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize >> 20, ClassIdToSize(class_id));
+ }
+ return true;
+ }
+
+ NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ const uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ const uptr size = ClassIdToSize(class_id);
+
+ const uptr total_user_bytes =
+ region->allocated_user + requested_count * size;
+ // Map more space for chunks, if necessary.
+ if (LIKELY(total_user_bytes > region->mapped_user)) {
+ if (UNLIKELY(region->mapped_user == 0)) {
+ if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
+ // The random state is initialized from ASLR.
+ region->rand_state = static_cast<u32>(region_beg >> 12);
+ // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
+ // preventing just allocated memory from being released sooner than
+ // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
+ // for short lived processes.
+ // Do it only when the feature is turned on, to avoid a potentially
+ // extraneous syscall.
+ if (ReleaseToOSIntervalMs() >= 0)
+ region->rtoi.last_release_at_ns = MonotonicNanoTime();
+ }
+ // Do the mmap for the user memory.
+ const uptr user_map_size =
+ RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
+ if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
+ return false;
+ if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
+ user_map_size,
+ "SizeClassAllocator: region data")))
+ return false;
+ stat->Add(AllocatorStatMapped, user_map_size);
+ region->mapped_user += user_map_size;
+ }
+ const uptr new_chunks_count =
+ (region->mapped_user - region->allocated_user) / size;
+
+ if (kMetadataSize) {
+ // Calculate the required space for metadata.
+ const uptr total_meta_bytes =
+ region->allocated_meta + new_chunks_count * kMetadataSize;
+ const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
+ RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
+ // Map more space for metadata, if necessary.
+ if (meta_map_size) {
+ if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
+ return false;
+ if (UNLIKELY(!MapWithCallback(
+ GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
+ meta_map_size, "SizeClassAllocator: region metadata")))
+ return false;
+ region->mapped_meta += meta_map_size;
+ }
+ }
+
+ // If necessary, allocate more space for the free array and populate it with
+ // newly allocated chunks.
+ const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
+ return false;
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
+ i++, chunk += size)
+ free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
+ &region->rand_state);
+
+ // All necessary memory is mapped and now it is safe to advance all
+ // 'allocated_*' counters.
+ region->num_freed_chunks += new_chunks_count;
+ region->allocated_user += new_chunks_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ region->allocated_meta += new_chunks_count * kMetadataSize;
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ region->exhausted = false;
+
+ // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
+ // MaybeReleaseToOS from releasing just allocated pages or protect these
+ // not yet used chunks some other way.
+
+ return true;
+ }
+
+ class MemoryMapper {
+ public:
+ MemoryMapper(const ThisT& base_allocator, uptr class_id)
+ : allocator(base_allocator),
+ region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
+ released_ranges_count(0),
+ released_bytes(0) {
+ }
+
+ uptr GetReleasedRangesCount() const {
+ return released_ranges_count;
+ }
+
+ uptr GetReleasedBytes() const {
+ return released_bytes;
+ }
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ // TODO(alekseyshl): The idea to explore is to check if we have enough
+ // space between num_freed_chunks*sizeof(CompactPtrT) and
+ // mapped_free_array to fit buffer_size bytes and use that space instead
+ // of mapping a temporary one.
+ return reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
+ }
+
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
+ UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
+ const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
+ const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
+ ReleaseMemoryPagesToOS(from_page, to_page);
+ released_ranges_count++;
+ released_bytes += to_page - from_page;
+ }
+
+ private:
+ const ThisT& allocator;
+ const uptr region_base;
+ uptr released_ranges_count;
+ uptr released_bytes;
+ };
+
+ // Attempts to release RAM occupied by freed chunks back to OS. The region is
+ // expected to be locked.
+ void MaybeReleaseToOS(uptr class_id, bool force) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->stats.n_freed -
+ region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
+ return; // Nothing new to release.
+ }
+
+ if (!force) {
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
+ MonotonicNanoTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ MemoryMapper memory_mapper(*this, class_id);
+
+ ReleaseFreeMemoryToOS<MemoryMapper>(
+ GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
+ RoundUpTo(region->allocated_user, page_size) / page_size,
+ &memory_mapper);
+
+ if (memory_mapper.GetReleasedRangesCount() > 0) {
+ region->rtoi.n_freed_at_last_release = region->stats.n_freed;
+ region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
+ region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ }
+ region->rtoi.last_release_at_ns = MonotonicNanoTime();
+ }
+};
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_report.h b/lib/tsan/sanitizer_common/sanitizer_allocator_report.h
new file mode 100644
index 0000000000..0653c365c1
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_report.h
@@ -0,0 +1,39 @@
+//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_REPORT_H
+#define SANITIZER_ALLOCATOR_REPORT_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+void NORETURN ReportCallocOverflow(uptr count, uptr size,
+ const StackTrace *stack);
+void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
+ const StackTrace *stack);
+void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack);
+void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
+ const StackTrace *stack);
+void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_REPORT_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_secondary.h b/lib/tsan/sanitizer_common/sanitizer_allocator_secondary.h
new file mode 100644
index 0000000000..1d128f55de
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_secondary.h
@@ -0,0 +1,326 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
+// allocated chunks. To be used in memory constrained or not memory hungry cases
+// (currently, 32 bits and internal allocator).
+class LargeMmapAllocatorPtrArrayStatic {
+ public:
+ INLINE void *Init() { return &p_[0]; }
+ INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ private:
+ static const int kMaxNumChunks = 1 << 15;
+ uptr p_[kMaxNumChunks];
+};
+
+// Much less restricted LargeMmapAllocator chunks list (comparing to
+// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
+// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
+// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
+class LargeMmapAllocatorPtrArrayDynamic {
+ public:
+ INLINE void *Init() {
+ uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
+ SecondaryAllocatorName);
+ CHECK(p);
+ return reinterpret_cast<void*>(p);
+ }
+
+ INLINE void EnsureSpace(uptr n) {
+ CHECK_LT(n, kMaxNumChunks);
+ DCHECK(n <= n_reserved_);
+ if (UNLIKELY(n == n_reserved_)) {
+ address_range_.MapOrDie(
+ reinterpret_cast<uptr>(address_range_.base()) +
+ n_reserved_ * sizeof(uptr),
+ kChunksBlockCount * sizeof(uptr));
+ n_reserved_ += kChunksBlockCount;
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << 20;
+ static const int kChunksBlockCount = 1 << 14;
+ ReservedAddressRange address_range_;
+ uptr n_reserved_;
+};
+
+#if SANITIZER_WORDSIZE == 32
+typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
+#else
+typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
+ class AddressSpaceViewTy = LocalAddressSpaceView>
+class LargeMmapAllocator {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void InitLinkerInitialized() {
+ page_size_ = GetPageSizeCached();
+ chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
+ }
+
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) {
+ Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, map_size, alignment);
+ return nullptr;
+ }
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
+ if (!map_beg)
+ return nullptr;
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ ptr_array_.EnsureSpace(n_chunks_);
+ uptr idx = n_chunks_++;
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ chunks_sorted_ = false;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[--n_chunks_];
+ chunks_[idx]->chunk_idx = idx;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ const Header *h =
+ AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
+ Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h_ptr);
+ }
+
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
+ Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ EnsureSortedChunks();
+ Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
+ auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
+ AddressSpaceView::Load(chunks[n - 1])->map_size;
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks[mid]))
+ end = mid - 1; // We are not interested in chunks[mid].
+ else
+ beg = mid; // chunks[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks[end]))
+ beg = end;
+ }
+
+ const Header *h = AddressSpaceView::Load(chunks[beg]);
+ Header *h_ptr = chunks[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h_ptr);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++) {
+ const Header *t = chunks[i];
+ callback(reinterpret_cast<uptr>(GetUser(t)), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks[i], t);
+ CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
+ }
+ }
+
+ private:
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(const Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header **chunks_;
+ PtrArrayT ptr_array_;
+ uptr n_chunks_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ StaticSpinMutex mutex_;
+};
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/tsan/sanitizer_common/sanitizer_allocator_size_class_map.h
new file mode 100644
index 0000000000..12d8c89230
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -0,0 +1,241 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ static const uptr kBatchClassID = kNumClasses - 1;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ // Estimate the result for kBatchClassID because this class does not know
+ // the exact size of TransferBatch. It's OK since we are using the actual
+ // sizeof(TransferBatch) where it matters.
+ if (UNLIKELY(class_id == kBatchClassID))
+ return kMaxNumCachedHint * sizeof(uptr);
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (UNLIKELY(size > kMaxSize))
+ return 0;
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ const uptr l = MostSignificantSetBitIndex(size);
+ const uptr hbits = (size >> (l - S)) & M;
+ const uptr lbits = size & ((1U << (l - S)) - 1);
+ const uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr size) {
+ DCHECK_LE(size, kMaxSize);
+ if (UNLIKELY(size == 0))
+ return 0;
+ uptr n;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
+ n = (1UL << kMaxBytesCachedLog) / size;
+ else
+ n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
+ return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(s) * s;
+ if (i == kBatchClassID)
+ d = p = l = 0;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ if (c == kBatchClassID)
+ continue;
+ CHECK_EQ(ClassID(s), c);
+ if (c < kLargestClassID)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ CHECK_GT(Size(c), Size(c - 1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c - 1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
+
+// The following SizeClassMap only holds a way small number of cached entries,
+// allowing for denser per-class arrays, smaller memory footprint and usually
+// better performances in threaded environments.
+typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
+// Similar to VeryCompact map above, this one has a small number of different
+// size classes, and also reduced thread-local caches.
+typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;
diff --git a/lib/tsan/sanitizer_common/sanitizer_allocator_stats.h b/lib/tsan/sanitizer_common/sanitizer_allocator_stats.h
new file mode 100644
index 0000000000..6f14e3863c
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_allocator_stats.h
@@ -0,0 +1,106 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable StaticSpinMutex mu_;
+};
+
+
diff --git a/lib/tsan/sanitizer_common/sanitizer_asm.h b/lib/tsan/sanitizer_common/sanitizer_asm.h
new file mode 100644
index 0000000000..803af3285e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_asm.h
@@ -0,0 +1,68 @@
+//===-- sanitizer_asm.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Various support for assemebler.
+//
+//===----------------------------------------------------------------------===//
+
+// Some toolchains do not support .cfi asm directives, so we have to hide
+// them inside macros.
+#if defined(__clang__) || \
+ (defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM))
+ // GCC defined __GCC_HAVE_DWARF2_CFI_ASM if it supports CFI.
+ // Clang seems to support CFI by default (or not?).
+ // We need two versions of macros: for inline asm and standalone asm files.
+# define CFI_INL_ADJUST_CFA_OFFSET(n) ".cfi_adjust_cfa_offset " #n ";"
+
+# define CFI_STARTPROC .cfi_startproc
+# define CFI_ENDPROC .cfi_endproc
+# define CFI_ADJUST_CFA_OFFSET(n) .cfi_adjust_cfa_offset n
+# define CFI_DEF_CFA_OFFSET(n) .cfi_def_cfa_offset n
+# define CFI_REL_OFFSET(reg, n) .cfi_rel_offset reg, n
+# define CFI_OFFSET(reg, n) .cfi_offset reg, n
+# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+# define CFI_DEF_CFA(reg, n) .cfi_def_cfa reg, n
+# define CFI_RESTORE(reg) .cfi_restore reg
+
+#else // No CFI
+# define CFI_INL_ADJUST_CFA_OFFSET(n)
+# define CFI_STARTPROC
+# define CFI_ENDPROC
+# define CFI_ADJUST_CFA_OFFSET(n)
+# define CFI_DEF_CFA_OFFSET(n)
+# define CFI_REL_OFFSET(reg, n)
+# define CFI_OFFSET(reg, n)
+# define CFI_DEF_CFA_REGISTER(reg)
+# define CFI_DEF_CFA(reg, n)
+# define CFI_RESTORE(reg)
+#endif
+
+#if !defined(__APPLE__)
+# define ASM_HIDDEN(symbol) .hidden symbol
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# define ASM_SIZE(symbol) .size symbol, .-symbol
+# define ASM_SYMBOL(symbol) symbol
+# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+#else
+# define ASM_HIDDEN(symbol)
+# define ASM_TYPE_FUNCTION(symbol)
+# define ASM_SIZE(symbol)
+# define ASM_SYMBOL(symbol) _##symbol
+# define ASM_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+#endif
+
+#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
+ defined(__Fuchsia__) || defined(__linux__))
+// clang-format off
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits // NOLINT
+// clang-format on
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic.h b/lib/tsan/sanitizer_common/sanitizer_atomic.h
new file mode 100644
index 0000000000..a798a0cf25
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic.h
@@ -0,0 +1,86 @@
+//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_H
+#define SANITIZER_ATOMIC_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum memory_order {
+ memory_order_relaxed = 1 << 0,
+ memory_order_consume = 1 << 1,
+ memory_order_acquire = 1 << 2,
+ memory_order_release = 1 << 3,
+ memory_order_acq_rel = 1 << 4,
+ memory_order_seq_cst = 1 << 5
+};
+
+struct atomic_uint8_t {
+ typedef u8 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint16_t {
+ typedef u16 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint32_t {
+ typedef u32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint64_t {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessary aligned on 8 bytes.
+ volatile ALIGNED(8) Type val_dont_use;
+};
+
+struct atomic_uintptr_t {
+ typedef uptr Type;
+ volatile Type val_dont_use;
+};
+
+} // namespace __sanitizer
+
+#if defined(__clang__) || defined(__GNUC__)
+# include "sanitizer_atomic_clang.h"
+#elif defined(_MSC_VER)
+# include "sanitizer_atomic_msvc.h"
+#else
+# error "Unsupported compiler"
+#endif
+
+namespace __sanitizer {
+
+// Clutter-reducing helpers.
+
+template<typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
+ return atomic_load(a, memory_order_relaxed);
+}
+
+template<typename T>
+INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
+ atomic_store(a, v, memory_order_relaxed);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic_clang.h b/lib/tsan/sanitizer_common/sanitizer_atomic_clang.h
new file mode 100644
index 0000000000..c40461ebc3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic_clang.h
@@ -0,0 +1,105 @@
+//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_H
+#define SANITIZER_ATOMIC_CLANG_H
+
+#if defined(__i386__) || defined(__x86_64__)
+# include "sanitizer_atomic_clang_x86.h"
+#else
+# include "sanitizer_atomic_clang_other.h"
+#endif
+
+namespace __sanitizer {
+
+// We would like to just use compiler builtin atomic operations
+// for loads and stores, but they are mostly broken in clang:
+// - they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281)
+// - 64-bit atomic operations are not implemented on x86_32
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// - they are not implemented on ARM
+// error: undefined reference to '__atomic_load_4'
+
+// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+// for mappings of the memory model to different processors.
+
+INLINE void atomic_signal_fence(memory_order) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ __sync_synchronize();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, -v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_exchange(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
+ __sync_synchronize();
+ v = __sync_lock_test_and_set(&a->val_dont_use, v);
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ return v;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ typedef typename T::Type Type;
+ Type cmpv = *cmp;
+ Type prev;
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ if (prev == cmpv) return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+// This include provides explicit template instantiations for atomic_uint64_t
+// on MIPS32, which does not directly support 8 byte atomics. It has to
+// proceed the template definitions above.
+#if defined(_MIPS_SIM) && defined(_ABIO32)
+ #include "sanitizer_atomic_clang_mips.h"
+#endif
+
+#undef ATOMIC_ORDER
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic_clang_mips.h b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_mips.h
new file mode 100644
index 0000000000..d369aeb993
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -0,0 +1,117 @@
+//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
+#define SANITIZER_ATOMIC_CLANG_MIPS_H
+
+namespace __sanitizer {
+
+// MIPS32 does not support atomics > 4 bytes. To address this lack of
+// functionality, the sanitizer library provides helper methods which use an
+// internal spin lock mechanism to emulate atomic oprations when the size is
+// 8 bytes.
+static void __spin_lock(volatile int *lock) {
+ while (__sync_lock_test_and_set(lock, 1))
+ while (*lock) {
+ }
+}
+
+static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
+
+// Make sure the lock is on its own cache line to prevent false sharing.
+// Put it inside a struct that is aligned and padded to the typical MIPS
+// cacheline which is 32 bytes.
+static struct {
+ int lock;
+ char pad[32 - sizeof(int)];
+} __attribute__((aligned(32))) lock = {0, {0}};
+
+template <>
+INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type val,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ atomic_uint64_t::Type ret;
+
+ __spin_lock(&lock.lock);
+ ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
+ ptr->val_dont_use = ret + val;
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <>
+INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type val,
+ memory_order mo) {
+ return atomic_fetch_add(ptr, -val, mo);
+}
+
+template <>
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type *cmp,
+ atomic_uint64_t::Type xchg,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ typedef atomic_uint64_t::Type Type;
+ Type cmpv = *cmp;
+ Type prev;
+ bool ret = false;
+
+ __spin_lock(&lock.lock);
+ prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
+ if (prev == cmpv) {
+ ret = true;
+ ptr->val_dont_use = xchg;
+ }
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <>
+INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ atomic_uint64_t::Type zero = 0;
+ volatile atomic_uint64_t *Newptr =
+ const_cast<volatile atomic_uint64_t *>(ptr);
+ return atomic_fetch_add(Newptr, zero, mo);
+}
+
+template <>
+INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ __spin_lock(&lock.lock);
+ ptr->val_dont_use = v;
+ __spin_unlock(&lock.lock);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_MIPS_H
+
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic_clang_other.h b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_other.h
new file mode 100644
index 0000000000..b8685a8542
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -0,0 +1,97 @@
+//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
+#define SANITIZER_ATOMIC_CLANG_OTHER_H
+
+namespace __sanitizer {
+
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __sync_synchronize();
+ } else { // seq_cst
+ // E.g. on POWER we need a hw fence even before the store.
+ __sync_synchronize();
+ v = a->val_dont_use;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ // Gross, but simple and reliable.
+ // Assume that it is not in read-only memory.
+ v = __sync_fetch_and_add(
+ const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ // Gross, but simple and reliable.
+ typename T::Type cmp = a->val_dont_use;
+ typename T::Type cur;
+ for (;;) {
+ cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+ if (cur == cmp || cur == v)
+ break;
+ cmp = cur;
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic_clang_x86.h b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_x86.h
new file mode 100644
index 0000000000..f2ce553baa
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -0,0 +1,113 @@
+//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_X86_H
+#define SANITIZER_ATOMIC_CLANG_X86_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ // On x86 loads are implicitly acquire.
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 plain MOV is enough for seq_cst store.
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;" // (ptr could be read-only)
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (v)
+ : "m" (a->val_dont_use)
+ : // mark the mmx registers as clobbered
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;"
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (a->val_dont_use)
+ : "m" (v)
+ : // mark the mmx registers as clobbered
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_atomic_msvc.h b/lib/tsan/sanitizer_common/sanitizer_atomic_msvc.h
new file mode 100644
index 0000000000..6a7c5465dc
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_atomic_msvc.h
@@ -0,0 +1,256 @@
+//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_MSVC_H
+#define SANITIZER_ATOMIC_MSVC_H
+
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+extern "C" void _mm_mfence();
+#pragma intrinsic(_mm_mfence)
+extern "C" void _mm_pause();
+#pragma intrinsic(_mm_pause)
+extern "C" char _InterlockedExchange8(char volatile *Addend, char Value);
+#pragma intrinsic(_InterlockedExchange8)
+extern "C" short _InterlockedExchange16(short volatile *Addend, short Value);
+#pragma intrinsic(_InterlockedExchange16)
+extern "C" long _InterlockedExchange(long volatile *Addend, long Value);
+#pragma intrinsic(_InterlockedExchange)
+extern "C" long _InterlockedExchangeAdd(long volatile *Addend, long Value);
+#pragma intrinsic(_InterlockedExchangeAdd)
+extern "C" char _InterlockedCompareExchange8(char volatile *Destination,
+ char Exchange, char Comparand);
+#pragma intrinsic(_InterlockedCompareExchange8)
+extern "C" short _InterlockedCompareExchange16(short volatile *Destination,
+ short Exchange, short Comparand);
+#pragma intrinsic(_InterlockedCompareExchange16)
+extern "C" long long _InterlockedCompareExchange64(
+ long long volatile *Destination, long long Exchange, long long Comparand);
+#pragma intrinsic(_InterlockedCompareExchange64)
+extern "C" void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+extern "C" long __cdecl _InterlockedCompareExchange(long volatile *Destination,
+ long Exchange,
+ long Comparand);
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
+ long long Value);
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#endif
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ _ReadWriteBarrier();
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ _mm_mfence();
+}
+
+INLINE void proc_yield(int cnt) {
+ for (int i = 0; i < cnt; i++)
+ _mm_pause();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
+ (long)v);
+}
+
+INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
+ (long long)v);
+#else
+ return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
+ (long)v);
+#endif
+}
+
+INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
+ -(long)v);
+}
+
+INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
+ -(long long)v);
+#else
+ return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
+ -(long)v);
+#endif
+}
+
+INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+ u8 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
+}
+
+INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+ u16 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
+}
+
+INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
+ u8 *cmp,
+ u8 xchgv,
+ memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ u8 cmpv = *cmp;
+#ifdef _WIN64
+ u8 prev = (u8)_InterlockedCompareExchange8(
+ (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
+#else
+ u8 prev;
+ __asm {
+ mov al, cmpv
+ mov ecx, a
+ mov dl, xchgv
+ lock cmpxchg [ecx], dl
+ mov prev, al
+ }
+#endif
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+ uptr *cmp,
+ uptr xchg,
+ memory_order mo) {
+ uptr cmpv = *cmp;
+ uptr prev = (uptr)_InterlockedCompareExchangePointer(
+ (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+ u16 *cmp,
+ u16 xchg,
+ memory_order mo) {
+ u16 cmpv = *cmp;
+ u16 prev = (u16)_InterlockedCompareExchange16(
+ (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+ u32 *cmp,
+ u32 xchg,
+ memory_order mo) {
+ u32 cmpv = *cmp;
+ u32 prev = (u32)_InterlockedCompareExchange(
+ (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+ u64 *cmp,
+ u64 xchg,
+ memory_order mo) {
+ u64 cmpv = *cmp;
+ u64 prev = (u64)_InterlockedCompareExchange64(
+ (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_bitvector.h b/lib/tsan/sanitizer_common/sanitizer_bitvector.h
new file mode 100644
index 0000000000..07a59ab11c
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_bitvector.h
@@ -0,0 +1,350 @@
+//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Specializer BitVector implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BITVECTOR_H
+#define SANITIZER_BITVECTOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Fixed size bit vector based on a single basic integer.
+template <class basic_int_t = uptr>
+class BasicBitVector {
+ public:
+ enum SizeEnum : uptr { kSize = sizeof(basic_int_t) * 8 };
+
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() { bits_ = 0; }
+ void setAll() { bits_ = ~(basic_int_t)0; }
+ bool empty() const { return bits_ == 0; }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ |= mask(idx);
+ return bits_ != old;
+ }
+
+ // Returns true if the bit has changed from 1 to 0.
+ bool clearBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ &= ~mask(idx);
+ return bits_ != old;
+ }
+
+ bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
+
+ uptr getAndClearFirstOne() {
+ CHECK(!empty());
+ uptr idx = LeastSignificantSetBitIndex(bits_);
+ clearBit(idx);
+ return idx;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ |= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= ~v.bits_;
+ return bits_ != old;
+ }
+
+ void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const BasicBitVector &v) const {
+ return (bits_ & v.bits_) != 0;
+ }
+
+ // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
+ bool hasNext() const { return !bv_.empty(); }
+ uptr next() { return bv_.getAndClearFirstOne(); }
+ void clear() { bv_.clear(); }
+ private:
+ BasicBitVector bv_;
+ };
+
+ private:
+ basic_int_t mask(uptr idx) const {
+ CHECK_LT(idx, size());
+ return (basic_int_t)1UL << idx;
+ }
+ basic_int_t bits_;
+};
+
+// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
+// The implementation is optimized for better performance on
+// sparse bit vectors, i.e. the those with few set bits.
+template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
+class TwoLevelBitVector {
+ // This is essentially a 2-level bit vector.
+ // Set bit in the first level BV indicates that there are set bits
+ // in the corresponding BV of the second level.
+ // This structure allows O(kLevel1Size) time for clear() and empty(),
+ // as well fast handling of sparse BVs.
+ public:
+ enum SizeEnum : uptr { kSize = BV::kSize * BV::kSize * kLevel1Size };
+ // No CTOR.
+
+ uptr size() const { return kSize; }
+
+ void clear() {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ l1_[i].clear();
+ }
+
+ void setAll() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ l1_[i0].setAll();
+ for (uptr i1 = 0; i1 < BV::kSize; i1++)
+ l2_[i0][i1].setAll();
+ }
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ if (!l1_[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ if (!l1_[i0].getBit(i1)) {
+ l1_[i0].setBit(i1);
+ l2_[i0][i1].clear();
+ }
+ bool res = l2_[i0][i1].setBit(i2);
+ // Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
+ // idx, i0, i1, i2, res);
+ return res;
+ }
+
+ bool clearBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ bool res = false;
+ if (l1_[i0].getBit(i1)) {
+ res = l2_[i0][i1].clearBit(i2);
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ return res;
+ }
+
+ bool getBit(uptr idx) const {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ // Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
+ return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
+ }
+
+ uptr getAndClearFirstOne() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].empty()) continue;
+ uptr i1 = l1_[i0].getAndClearFirstOne();
+ uptr i2 = l2_[i0][i1].getAndClearFirstOne();
+ if (!l2_[i0][i1].empty())
+ l1_[i0].setBit(i1);
+ uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
+ // Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
+ return res;
+ }
+ CHECK(0);
+ return 0;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = v.l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l1_[i0].setBit(i1))
+ l2_[i0][i1].clear();
+ if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].setIntersection(v.l1_[i0]))
+ res = true;
+ if (!l1_[i0].empty()) {
+ BV t = l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ return res;
+ }
+
+ void copyFrom(const TwoLevelBitVector &v) {
+ clear();
+ setUnion(v);
+ }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const TwoLevelBitVector &v) const {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (!v.l1_[i0].getBit(i1)) continue;
+ if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
+ it1_.clear();
+ it2_.clear();
+ }
+
+ bool hasNext() const {
+ if (it1_.hasNext()) return true;
+ for (uptr i = i0_; i < kLevel1Size; i++)
+ if (!bv_.l1_[i].empty()) return true;
+ return false;
+ }
+
+ uptr next() {
+ // Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ if (!it1_.hasNext() && !it2_.hasNext()) {
+ for (; i0_ < kLevel1Size; i0_++) {
+ if (bv_.l1_[i0_].empty()) continue;
+ it1_ = typename BV::Iterator(bv_.l1_[i0_]);
+ // Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ break;
+ }
+ }
+ if (!it2_.hasNext()) {
+ CHECK(it1_.hasNext());
+ i1_ = it1_.next();
+ it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
+ // Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ }
+ CHECK(it2_.hasNext());
+ uptr i2 = it2_.next();
+ uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
+ // Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
+ // it1_.hasNext(), it2_.hasNext(), kSize, res);
+ if (!it1_.hasNext() && !it2_.hasNext())
+ i0_++;
+ return res;
+ }
+
+ private:
+ const TwoLevelBitVector &bv_;
+ uptr i0_, i1_;
+ typename BV::Iterator it1_, it2_;
+ };
+
+ private:
+ void check(uptr idx) const { CHECK_LE(idx, size()); }
+
+ uptr idx0(uptr idx) const {
+ uptr res = idx / (BV::kSize * BV::kSize);
+ CHECK_LE(res, kLevel1Size);
+ return res;
+ }
+
+ uptr idx1(uptr idx) const {
+ uptr res = (idx / BV::kSize) % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ uptr idx2(uptr idx) const {
+ uptr res = idx % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ BV l1_[kLevel1Size];
+ BV l2_[kLevel1Size][BV::kSize];
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BITVECTOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_bvgraph.h b/lib/tsan/sanitizer_common/sanitizer_bvgraph.h
new file mode 100644
index 0000000000..e7249055be
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_bvgraph.h
@@ -0,0 +1,164 @@
+//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// BVGraph -- a directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BVGRAPH_H
+#define SANITIZER_BVGRAPH_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bitvector.h"
+
+namespace __sanitizer {
+
+// Directed graph of fixed size implemented as an array of bit vectors.
+// Not thread-safe, all accesses should be protected by an external lock.
+template<class BV>
+class BVGraph {
+ public:
+ enum SizeEnum : uptr { kSize = BV::kSize };
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() {
+ for (uptr i = 0; i < size(); i++)
+ v[i].clear();
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < size(); i++)
+ if (!v[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if a new edge was added.
+ bool addEdge(uptr from, uptr to) {
+ check(from, to);
+ return v[from].setBit(to);
+ }
+
+ // Returns true if at least one new edge was added.
+ uptr addEdges(const BV &from, uptr to, uptr added_edges[],
+ uptr max_added_edges) {
+ uptr res = 0;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr node = t1.getAndClearFirstOne();
+ if (v[node].setBit(to))
+ if (res < max_added_edges)
+ added_edges[res++] = node;
+ }
+ return res;
+ }
+
+ // *EXPERIMENTAL*
+ // Returns true if an edge from=>to exist.
+ // This function does not use any global state except for 'this' itself,
+ // and thus can be called from different threads w/o locking.
+ // This would be racy.
+ // FIXME: investigate how much we can prove about this race being "benign".
+ bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
+
+ // Returns true if the edge from=>to was removed.
+ bool removeEdge(uptr from, uptr to) {
+ return v[from].clearBit(to);
+ }
+
+ // Returns true if at least one edge *=>to was removed.
+ bool removeEdgesTo(const BV &to) {
+ bool res = 0;
+ for (uptr from = 0; from < size(); from++) {
+ if (v[from].setDifference(to))
+ res = true;
+ }
+ return res;
+ }
+
+ // Returns true if at least one edge from=>* was removed.
+ bool removeEdgesFrom(const BV &from) {
+ bool res = false;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr idx = t1.getAndClearFirstOne();
+ if (!v[idx].empty()) {
+ v[idx].clear();
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ void removeEdgesFrom(uptr from) {
+ return v[from].clear();
+ }
+
+ bool hasEdge(uptr from, uptr to) const {
+ check(from, to);
+ return v[from].getBit(to);
+ }
+
+ // Returns true if there is a path from the node 'from'
+ // to any of the nodes in 'targets'.
+ bool isReachable(uptr from, const BV &targets) {
+ BV &to_visit = t1,
+ &visited = t2;
+ to_visit.copyFrom(v[from]);
+ visited.clear();
+ visited.setBit(from);
+ while (!to_visit.empty()) {
+ uptr idx = to_visit.getAndClearFirstOne();
+ if (visited.setBit(idx))
+ to_visit.setUnion(v[idx]);
+ }
+ return targets.intersectsWith(visited);
+ }
+
+ // Finds a path from 'from' to one of the nodes in 'target',
+ // stores up to 'path_size' items of the path into 'path',
+ // returns the path length, or 0 if there is no path of size 'path_size'.
+ uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
+ if (path_size == 0)
+ return 0;
+ path[0] = from;
+ if (targets.getBit(from))
+ return 1;
+ // The function is recursive, so we don't want to create BV on stack.
+ // Instead of a getAndClearFirstOne loop we use the slower iterator.
+ for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
+ uptr idx = it.next();
+ if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
+ return res + 1;
+ }
+ return 0;
+ }
+
+ // Same as findPath, but finds a shortest path.
+ uptr findShortestPath(uptr from, const BV &targets, uptr *path,
+ uptr path_size) {
+ for (uptr p = 1; p <= path_size; p++)
+ if (findPath(from, targets, path, p) == p)
+ return p;
+ return 0;
+ }
+
+ private:
+ void check(uptr idx1, uptr idx2) const {
+ CHECK_LT(idx1, size());
+ CHECK_LT(idx2, size());
+ }
+ BV v[kSize];
+ // Keep temporary vectors here since we can not create large objects on stack.
+ BV t1, t2;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BVGRAPH_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_common.cpp b/lib/tsan/sanitizer_common/sanitizer_common.cpp
new file mode 100644
index 0000000000..87efda5bd3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common.cpp
@@ -0,0 +1,348 @@
+//===-- sanitizer_common.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+const char *SanitizerToolName = "SanitizerTool";
+
+atomic_uint32_t current_verbosity;
+uptr PageSizeCached;
+u32 NumberOfCPUsCached;
+
+// PID of the tracer task in StopTheWorld. It shares the address space with the
+// main process, but has a different PID and thus requires special handling.
+uptr stoptheworld_tracer_pid = 0;
+// Cached pid of parent process - if the parent process dies, we want to keep
+// writing to the same log file.
+uptr stoptheworld_tracer_ppid = 0;
+
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err,
+ bool raw_report) {
+ static int recursion_count;
+ if (SANITIZER_RTEMS || raw_report || recursion_count) {
+ // If we are on RTEMS or raw report is requested or we went into recursion,
+ // just die. The Report() and CHECK calls below may call mmap recursively
+ // and fail.
+ RawWrite("ERROR: Failed to mmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report("ERROR: %s failed to "
+ "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, mmap_type, size, size, mem_type, err);
+#if !SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to mmap");
+}
+
+typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
+
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_path_prefix) {
+ if (!filepath) return nullptr;
+ if (!strip_path_prefix) return filepath;
+ const char *res = filepath;
+ if (const char *pos = internal_strstr(filepath, strip_path_prefix))
+ res = pos + internal_strlen(strip_path_prefix);
+ if (res[0] == '.' && res[1] == '/')
+ res += 2;
+ return res;
+}
+
+const char *StripModuleName(const char *module) {
+ if (!module)
+ return nullptr;
+ if (SANITIZER_WINDOWS) {
+ // On Windows, both slash and backslash are possible.
+ // Pick the one that goes last.
+ if (const char *bslash_pos = internal_strrchr(module, '\\'))
+ return StripModuleName(bslash_pos + 1);
+ }
+ if (const char *slash_pos = internal_strrchr(module, '/')) {
+ return slash_pos + 1;
+ }
+ return module;
+}
+
+void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
+ if (!common_flags()->print_summary)
+ return;
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
+ __sanitizer_report_error_summary(buff.data());
+}
+
+// Removes the ANSI escape sequences from the input string (in-place).
+void RemoveANSIEscapeSequencesFromString(char *str) {
+ if (!str)
+ return;
+
+ // We are going to remove the escape sequences in place.
+ char *s = str;
+ char *z = str;
+ while (*s != '\0') {
+ CHECK_GE(s, z);
+ // Skip over ANSI escape sequences with pointer 's'.
+ if (*s == '\033' && *(s + 1) == '[') {
+ s = internal_strchrnul(s, 'm');
+ if (*s == '\0') {
+ break;
+ }
+ s++;
+ continue;
+ }
+ // 's' now points at a character we want to keep. Copy over the buffer
+ // content if the escape sequence has been perviously skipped andadvance
+ // both pointers.
+ if (s != z)
+ *z = *s;
+
+ // If we have not seen an escape sequence, just advance both pointers.
+ z++;
+ s++;
+ }
+
+ // Null terminate the string.
+ *z = '\0';
+}
+
+void LoadedModule::set(const char *module_name, uptr base_address) {
+ clear();
+ full_name_ = internal_strdup(module_name);
+ base_address_ = base_address;
+}
+
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize],
+ bool instrumented) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ instrumented_ = instrumented;
+}
+
+void LoadedModule::clear() {
+ InternalFree(full_name_);
+ base_address_ = 0;
+ max_executable_address_ = 0;
+ full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ instrumented_ = false;
+ while (!ranges_.empty()) {
+ AddressRange *r = ranges_.front();
+ ranges_.pop_front();
+ InternalFree(r);
+ }
+}
+
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
+ bool writable, const char *name) {
+ void *mem = InternalAlloc(sizeof(AddressRange));
+ AddressRange *r =
+ new(mem) AddressRange(beg, end, executable, writable, name);
+ ranges_.push_back(r);
+ if (executable && end > max_executable_address_)
+ max_executable_address_ = end;
+}
+
+bool LoadedModule::containsAddress(uptr address) const {
+ for (const AddressRange &r : ranges()) {
+ if (r.beg <= address && address < r.end)
+ return true;
+ }
+ return false;
+}
+
+static atomic_uintptr_t g_total_mmaped;
+
+void IncreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ uptr total_mmaped =
+ atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
+ // Since for now mmap_limit_mb is not a user-facing flag, just kill
+ // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
+ RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
+}
+
+void DecreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
+}
+
+bool TemplateMatch(const char *templ, const char *str) {
+ if ((!str) || str[0] == 0)
+ return false;
+ bool start = false;
+ if (templ && templ[0] == '^') {
+ start = true;
+ templ++;
+ }
+ bool asterisk = false;
+ while (templ && templ[0]) {
+ if (templ[0] == '*') {
+ templ++;
+ start = false;
+ asterisk = true;
+ continue;
+ }
+ if (templ[0] == '$')
+ return str[0] == 0 || asterisk;
+ if (str[0] == 0)
+ return false;
+ char *tpos = (char*)internal_strchr(templ, '*');
+ char *tpos1 = (char*)internal_strchr(templ, '$');
+ if ((!tpos) || (tpos1 && tpos1 < tpos))
+ tpos = tpos1;
+ if (tpos)
+ tpos[0] = 0;
+ const char *str0 = str;
+ const char *spos = internal_strstr(str, templ);
+ str = spos + internal_strlen(templ);
+ templ = tpos;
+ if (tpos)
+ tpos[0] = tpos == tpos1 ? '$' : '*';
+ if (!spos)
+ return false;
+ if (start && spos != str0)
+ return false;
+ start = false;
+ asterisk = false;
+ }
+ return true;
+}
+
+static char binary_name_cache_str[kMaxPathLength];
+static char process_name_cache_str[kMaxPathLength];
+
+const char *GetProcessName() {
+ return process_name_cache_str;
+}
+
+static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
+ ReadLongProcessName(buf, buf_len);
+ char *s = const_cast<char *>(StripModuleName(buf));
+ uptr len = internal_strlen(s);
+ if (s != buf) {
+ internal_memmove(buf, s, len);
+ buf[len] = '\0';
+ }
+ return len;
+}
+
+void UpdateProcessName() {
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
+}
+
+// Call once to make sure that binary_name_cache_str is initialized
+void CacheBinaryName() {
+ if (binary_name_cache_str[0] != '\0')
+ return;
+ ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
+}
+
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
+ CacheBinaryName();
+ uptr name_len = internal_strlen(binary_name_cache_str);
+ name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
+ if (buf_len == 0)
+ return 0;
+ internal_memcpy(buf, binary_name_cache_str, name_len);
+ buf[name_len] = '\0';
+ return name_len;
+}
+
+#if !SANITIZER_GO
+void PrintCmdline() {
+ char **argv = GetArgv();
+ if (!argv) return;
+ Printf("\nCommand: ");
+ for (uptr i = 0; argv[i]; ++i)
+ Printf("%s ", argv[i]);
+ Printf("\n\n");
+}
+#endif
+
+// Malloc hooks.
+static const int kMaxMallocFreeHooks = 5;
+struct MallocFreeHook {
+ void (*malloc_hook)(const void *, uptr);
+ void (*free_hook)(const void *);
+};
+
+static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
+
+void RunMallocHooks(const void *ptr, uptr size) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].malloc_hook;
+ if (!hook) return;
+ hook(ptr, size);
+ }
+}
+
+void RunFreeHooks(const void *ptr) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].free_hook;
+ if (!hook) return;
+ hook(ptr);
+ }
+}
+
+static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *)) {
+ if (!malloc_hook || !free_hook) return 0;
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ if (MFHooks[i].malloc_hook == nullptr) {
+ MFHooks[i].malloc_hook = malloc_hook;
+ MFHooks[i].free_hook = free_hook;
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
+ const char *error_summary) {
+ Printf("%s\n", error_summary);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_acquire_crash_state() {
+ static atomic_uint8_t in_crash_state = {};
+ return !atomic_exchange(&in_crash_state, 1, memory_order_relaxed);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
+ uptr),
+ void (*free_hook)(const void *)) {
+ return InstallMallocFreeHooks(malloc_hook, free_hook);
+}
+} // extern "C"
diff --git a/lib/tsan/sanitizer_common/sanitizer_common.h b/lib/tsan/sanitizer_common/sanitizer_common.h
new file mode 100644
index 0000000000..07b307a602
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common.h
@@ -0,0 +1,1002 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// It declares common functions and classes that are used in both runtimes.
+// Implementation of some functions are provided in sanitizer_common, while
+// others must be defined by run-time library itself.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_H
+#define SANITIZER_COMMON_H
+
+#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+#endif
+
+namespace __sanitizer {
+
+struct AddressInfo;
+struct BufferedStackTrace;
+struct SignalContext;
+struct StackTrace;
+
+// Constants.
+const uptr kWordSize = SANITIZER_WORDSIZE / 8;
+const uptr kWordSizeInBits = 8 * kWordSize;
+
+const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
+
+const uptr kMaxPathLength = 4096;
+
+const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+
+static const uptr kErrorMessageBufferSize = 1 << 16;
+
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external_ex() will be called.
+const u64 kExternalPCBit = 1ULL << 60;
+
+extern const char *SanitizerToolName; // Can be changed by the tool.
+
+extern atomic_uint32_t current_verbosity;
+INLINE void SetVerbosity(int verbosity) {
+ atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
+}
+INLINE int Verbosity() {
+ return atomic_load(&current_verbosity, memory_order_relaxed);
+}
+
+#if SANITIZER_ANDROID
+INLINE uptr GetPageSize() {
+// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
+ return 4096;
+}
+INLINE uptr GetPageSizeCached() {
+ return 4096;
+}
+#else
+uptr GetPageSize();
+extern uptr PageSizeCached;
+INLINE uptr GetPageSizeCached() {
+ if (!PageSizeCached)
+ PageSizeCached = GetPageSize();
+ return PageSizeCached;
+}
+#endif
+uptr GetMmapGranularity();
+uptr GetMaxVirtualAddress();
+uptr GetMaxUserVirtualAddress();
+// Threads
+tid_t GetTid();
+int TgKill(pid_t pid, tid_t tid, int sig);
+uptr GetThreadSelf();
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom);
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size);
+
+// Memory management
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
+INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type, /*raw_report*/ true);
+}
+void UnmapOrDie(void *addr, uptr size);
+// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
+// case returns nullptr.
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
+ WARN_UNUSED_RESULT;
+bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
+ const char *name = nullptr) WARN_UNUSED_RESULT;
+void *MmapNoReserveOrDie(uptr size, const char *mem_type);
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
+// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
+// that case returns nullptr.
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
+ const char *name = nullptr);
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
+void *MmapNoAccess(uptr size);
+// Map aligned chunk of address space; size and alignment are powers of two.
+// Dies on all but out of memory errors, in the latter case returns nullptr.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type);
+// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
+// unaccessible memory.
+bool MprotectNoAccess(uptr addr, uptr size);
+bool MprotectReadOnly(uptr addr, uptr size);
+
+void MprotectMallocZones(void *addr, int prot);
+
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found, uptr *max_occupied_addr);
+
+// Used to check if we can map shadow memory to a fixed location.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
+void IncreaseTotalMmap(uptr size);
+void DecreaseTotalMmap(uptr size);
+uptr GetRSS();
+void SetShadowRegionHugePageMode(uptr addr, uptr length);
+bool DontDumpShadowMemory(uptr addr, uptr length);
+// Check if the built VMA size matches the runtime one.
+void CheckVMASize();
+void RunMallocHooks(const void *ptr, uptr size);
+void RunFreeHooks(const void *ptr);
+
+class ReservedAddressRange {
+ public:
+ uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
+ uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
+ uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
+ uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
+ void Unmap(uptr addr, uptr size);
+ void *base() const { return base_; }
+ uptr size() const { return size_; }
+
+ private:
+ void* base_;
+ uptr size_;
+ const char* name_;
+ uptr os_handle_;
+};
+
+typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
+ /*out*/uptr *stats, uptr stats_size);
+
+// Parse the contents of /proc/self/smaps and generate a memory profile.
+// |cb| is a tool-specific callback that fills the |stats| array containing
+// |stats_size| elements.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
+
+// Simple low-level (mmap-based) allocator for internal use. Doesn't have
+// constructor, so all instances of LowLevelAllocator should be
+// linker initialized.
+class LowLevelAllocator {
+ public:
+ // Requires an external lock.
+ void *Allocate(uptr size);
+ private:
+ char *allocated_end_;
+ char *allocated_current_;
+};
+// Set the min alignment of LowLevelAllocator to at least alignment.
+void SetLowLevelAllocateMinAlignment(uptr alignment);
+typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
+// Allows to register tool-specific callbacks for LowLevelAllocator.
+// Passing NULL removes the callback.
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
+
+// IO
+void CatastrophicErrorWrite(const char *buffer, uptr length);
+void RawWrite(const char *buffer);
+bool ColorizeReports();
+void RemoveANSIEscapeSequencesFromString(char *buffer);
+void Printf(const char *format, ...);
+void Report(const char *format, ...);
+void SetPrintfAndReportCallback(void (*callback)(const char *));
+#define VReport(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
+ } while (0)
+#define VPrintf(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
+ } while (0)
+
+// Lock sanitizer error reporting and protects against nested errors.
+class ScopedErrorReportLock {
+ public:
+ ScopedErrorReportLock();
+ ~ScopedErrorReportLock();
+
+ static void CheckLocked();
+};
+
+extern uptr stoptheworld_tracer_pid;
+extern uptr stoptheworld_tracer_ppid;
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size);
+
+// Error report formatting.
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix);
+// Strip the directories from the module name.
+const char *StripModuleName(const char *module);
+
+// OS
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
+const char *GetProcessName();
+void UpdateProcessName();
+void CacheBinaryName();
+void DisableCoreDumperIfNecessary();
+void DumpProcessMap();
+void PrintModuleMap();
+const char *GetEnv(const char *name);
+bool SetEnv(const char *name, const char *value);
+
+u32 GetUid();
+void ReExec();
+void CheckASLR();
+void CheckMPROTECT();
+char **GetArgv();
+char **GetEnviron();
+void PrintCmdline();
+bool StackSizeIsUnlimited();
+void SetStackSizeLimitInBytes(uptr limit);
+bool AddressSpaceIsUnlimited();
+void SetAddressSpaceUnlimited();
+void AdjustStackSize(void *attr);
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void SetSandboxingCallback(void (*f)());
+
+void InitializeCoverage(bool enabled, const char *coverage_dir);
+
+void InitTlsSize();
+uptr GetTlsSize();
+
+// Other
+void SleepForSeconds(int seconds);
+void SleepForMillis(int millis);
+u64 NanoTime();
+u64 MonotonicNanoTime();
+int Atexit(void (*function)(void));
+bool TemplateMatch(const char *templ, const char *str);
+
+// Exit
+void NORETURN Abort();
+void NORETURN Die();
+void NORETURN
+CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err,
+ bool raw_report = false);
+
+// Specific tools may override behavior of "Die" and "CheckFailed" functions
+// to do tool-specific job.
+typedef void (*DieCallbackType)(void);
+
+// It's possible to add several callbacks that would be run when "Die" is
+// called. The callbacks will be run in the opposite order. The tools are
+// strongly recommended to setup all callbacks during initialization, when there
+// is only a single thread.
+bool AddDieCallback(DieCallbackType callback);
+bool RemoveDieCallback(DieCallbackType callback);
+
+void SetUserDieCallback(DieCallbackType callback);
+
+typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
+ u64, u64);
+void SetCheckFailedCallback(CheckFailedCallbackType callback);
+
+// Callback will be called if soft_rss_limit_mb is given and the limit is
+// exceeded (exceeded==true) or if rss went down below the limit
+// (exceeded==false).
+// The callback should be registered once at the tool init time.
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+
+// Functions related to signal handling.
+typedef void (*SignalHandlerType)(int, void *, void *);
+HandleSignalMode GetHandleSignalMode(int signum);
+void InstallDeadlySignalHandlers(SignalHandlerType handler);
+
+// Signal reporting.
+// Each sanitizer uses slightly different implementation of stack unwinding.
+typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
+ const void *callback_context,
+ BufferedStackTrace *stack);
+// Print deadly signal report and die.
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
+// Part of HandleDeadlySignal, exposed for asan.
+void StartReportDeadlySignal();
+// Part of HandleDeadlySignal, exposed for asan.
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
+// Alternative signal stack (POSIX-only).
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+
+// We don't want a summary too long.
+const int kMaxSummaryLength = 1024;
+// Construct a one-line string:
+// SUMMARY: SanitizerToolName: error_message
+// and pass it to __sanitizer_report_error_summary.
+// If alt_tool_name is provided, it's used in place of SanitizerToolName.
+void ReportErrorSummary(const char *error_message,
+ const char *alt_tool_name = nullptr);
+// Same as above, but construct error_message as:
+// error_type file:line[:column][ function]
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name = nullptr);
+// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
+void ReportErrorSummary(const char *error_type, const StackTrace *trace,
+ const char *alt_tool_name = nullptr);
+
+void ReportMmapWriteExec(int prot);
+
+// Math
+#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
+extern "C" {
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
+#if defined(_WIN64)
+unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
+unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
+#endif
+}
+#endif
+
+INLINE uptr MostSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up;
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
+# else
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
+# endif
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, x);
+#else
+ _BitScanReverse(&up, x);
+#endif
+ return up;
+}
+
+INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up;
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = __builtin_ctzll(x);
+# else
+ up = __builtin_ctzl(x);
+# endif
+#elif defined(_WIN64)
+ _BitScanForward64(&up, x);
+#else
+ _BitScanForward(&up, x);
+#endif
+ return up;
+}
+
+INLINE bool IsPowerOfTwo(uptr x) {
+ return (x & (x - 1)) == 0;
+}
+
+INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+ CHECK(size);
+ if (IsPowerOfTwo(size)) return size;
+
+ uptr up = MostSignificantSetBitIndex(size);
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
+ return 1ULL << (up + 1);
+}
+
+INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+ RAW_CHECK(IsPowerOfTwo(boundary));
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
+INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+ return x & ~(boundary - 1);
+}
+
+INLINE bool IsAligned(uptr a, uptr alignment) {
+ return (a & (alignment - 1)) == 0;
+}
+
+INLINE uptr Log2(uptr x) {
+ CHECK(IsPowerOfTwo(x));
+ return LeastSignificantSetBitIndex(x);
+}
+
+// Don't use std::min, std::max or std::swap, to minimize dependency
+// on libstdc++.
+template<class T> T Min(T a, T b) { return a < b ? a : b; }
+template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template<class T> void Swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+// Char handling
+INLINE bool IsSpace(int c) {
+ return (c == ' ') || (c == '\n') || (c == '\t') ||
+ (c == '\f') || (c == '\r') || (c == '\v');
+}
+INLINE bool IsDigit(int c) {
+ return (c >= '0') && (c <= '9');
+}
+INLINE int ToLower(int c) {
+ return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
+}
+
+// A low-level vector based on mmap. May incur a significant memory overhead for
+// small vectors.
+// WARNING: The current implementation supports only POD types.
+template<typename T>
+class InternalMmapVectorNoCtor {
+ public:
+ void Initialize(uptr initial_capacity) {
+ capacity_bytes_ = 0;
+ size_ = 0;
+ data_ = 0;
+ reserve(initial_capacity);
+ }
+ void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
+ T &operator[](uptr i) {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ const T &operator[](uptr i) const {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ void push_back(const T &element) {
+ CHECK_LE(size_, capacity());
+ if (size_ == capacity()) {
+ uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
+ Realloc(new_capacity);
+ }
+ internal_memcpy(&data_[size_++], &element, sizeof(T));
+ }
+ T &back() {
+ CHECK_GT(size_, 0);
+ return data_[size_ - 1];
+ }
+ void pop_back() {
+ CHECK_GT(size_, 0);
+ size_--;
+ }
+ uptr size() const {
+ return size_;
+ }
+ const T *data() const {
+ return data_;
+ }
+ T *data() {
+ return data_;
+ }
+ uptr capacity() const { return capacity_bytes_ / sizeof(T); }
+ void reserve(uptr new_size) {
+ // Never downsize internal buffer.
+ if (new_size > capacity())
+ Realloc(new_size);
+ }
+ void resize(uptr new_size) {
+ if (new_size > size_) {
+ reserve(new_size);
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
+
+ void clear() { size_ = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const {
+ return data();
+ }
+ T *begin() {
+ return data();
+ }
+ const T *end() const {
+ return data() + size();
+ }
+ T *end() {
+ return data() + size();
+ }
+
+ void swap(InternalMmapVectorNoCtor &other) {
+ Swap(data_, other.data_);
+ Swap(capacity_bytes_, other.capacity_bytes_);
+ Swap(size_, other.size_);
+ }
+
+ private:
+ void Realloc(uptr new_capacity) {
+ CHECK_GT(new_capacity, 0);
+ CHECK_LE(size_, new_capacity);
+ uptr new_capacity_bytes =
+ RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
+ T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
+ internal_memcpy(new_data, data_, size_ * sizeof(T));
+ UnmapOrDie(data_, capacity_bytes_);
+ data_ = new_data;
+ capacity_bytes_ = new_capacity_bytes;
+ }
+
+ T *data_;
+ uptr capacity_bytes_;
+ uptr size_;
+};
+
+template <typename T>
+bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
+ const InternalMmapVectorNoCtor<T> &rhs) {
+ if (lhs.size() != rhs.size()) return false;
+ return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
+}
+
+template <typename T>
+bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
+ const InternalMmapVectorNoCtor<T> &rhs) {
+ return !(lhs == rhs);
+}
+
+template<typename T>
+class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
+ public:
+ InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
+ explicit InternalMmapVector(uptr cnt) {
+ InternalMmapVectorNoCtor<T>::Initialize(cnt);
+ this->resize(cnt);
+ }
+ ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
+ // Disallow copies and moves.
+ InternalMmapVector(const InternalMmapVector &) = delete;
+ InternalMmapVector &operator=(const InternalMmapVector &) = delete;
+ InternalMmapVector(InternalMmapVector &&) = delete;
+ InternalMmapVector &operator=(InternalMmapVector &&) = delete;
+};
+
+class InternalScopedString : public InternalMmapVector<char> {
+ public:
+ explicit InternalScopedString(uptr max_length)
+ : InternalMmapVector<char>(max_length), length_(0) {
+ (*this)[0] = '\0';
+ }
+ uptr length() { return length_; }
+ void clear() {
+ (*this)[0] = '\0';
+ length_ = 0;
+ }
+ void append(const char *format, ...);
+
+ private:
+ uptr length_;
+};
+
+template <class T>
+struct CompareLess {
+ bool operator()(const T &a, const T &b) const { return a < b; }
+};
+
+// HeapSort for arrays and InternalMmapVector.
+template <class T, class Compare = CompareLess<T>>
+void Sort(T *v, uptr size, Compare comp = {}) {
+ if (size < 2)
+ return;
+ // Stage 1: insert elements to the heap.
+ for (uptr i = 1; i < size; i++) {
+ uptr j, p;
+ for (j = i; j > 0; j = p) {
+ p = (j - 1) / 2;
+ if (comp(v[p], v[j]))
+ Swap(v[j], v[p]);
+ else
+ break;
+ }
+ }
+ // Stage 2: swap largest element with the last one,
+ // and sink the new top.
+ for (uptr i = size - 1; i > 0; i--) {
+ Swap(v[0], v[i]);
+ uptr j, max_ind;
+ for (j = 0; j < i; j = max_ind) {
+ uptr left = 2 * j + 1;
+ uptr right = 2 * j + 2;
+ max_ind = j;
+ if (left < i && comp(v[max_ind], v[left]))
+ max_ind = left;
+ if (right < i && comp(v[max_ind], v[right]))
+ max_ind = right;
+ if (max_ind != j)
+ Swap(v[j], v[max_ind]);
+ else
+ break;
+ }
+ }
+}
+
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
+ uptr mid = (first + last) / 2;
+ if (comp(v[mid], val))
+ first = mid + 1;
+ else
+ last = mid;
+ }
+ return first;
+}
+
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// The resulting buffer is mmaped and stored in '*buff'.
+// Returns true if file was successfully opened and read.
+bool ReadFileToVector(const char *file_name,
+ InternalMmapVectorNoCtor<char> *buff,
+ uptr max_len = 1 << 26, error_t *errno_p = nullptr);
+
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// This function is less I/O efficient than ReadFileToVector as it may reread
+// file multiple times to avoid mmap during read attempts. It's used to read
+// procmap, so short reads with mmap in between can produce inconsistent result.
+// The resulting buffer is mmaped and stored in '*buff'.
+// The size of the mmaped region is stored in '*buff_size'.
+// The total number of read bytes is stored in '*read_len'.
+// Returns true if file was successfully opened and read.
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len = 1 << 26,
+ error_t *errno_p = nullptr);
+
+// When adding a new architecture, don't forget to also update
+// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
+inline const char *ModuleArchToString(ModuleArch arch) {
+ switch (arch) {
+ case kModuleArchUnknown:
+ return "";
+ case kModuleArchI386:
+ return "i386";
+ case kModuleArchX86_64:
+ return "x86_64";
+ case kModuleArchX86_64H:
+ return "x86_64h";
+ case kModuleArchARMV6:
+ return "armv6";
+ case kModuleArchARMV7:
+ return "armv7";
+ case kModuleArchARMV7S:
+ return "armv7s";
+ case kModuleArchARMV7K:
+ return "armv7k";
+ case kModuleArchARM64:
+ return "arm64";
+ }
+ CHECK(0 && "Invalid module arch");
+ return "";
+}
+
+const uptr kModuleUUIDSize = 16;
+const uptr kMaxSegName = 16;
+
+// Represents a binary loaded into virtual memory (e.g. this can be an
+// executable or a shared object).
+class LoadedModule {
+ public:
+ LoadedModule()
+ : full_name_(nullptr),
+ base_address_(0),
+ max_executable_address_(0),
+ arch_(kModuleArchUnknown),
+ instrumented_(false) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
+ void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize], bool instrumented);
+ void clear();
+ void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name = nullptr);
+ bool containsAddress(uptr address) const;
+
+ const char *full_name() const { return full_name_; }
+ uptr base_address() const { return base_address_; }
+ uptr max_executable_address() const { return max_executable_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
+ bool instrumented() const { return instrumented_; }
+
+ struct AddressRange {
+ AddressRange *next;
+ uptr beg;
+ uptr end;
+ bool executable;
+ bool writable;
+ char name[kMaxSegName];
+
+ AddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name)
+ : next(nullptr),
+ beg(beg),
+ end(end),
+ executable(executable),
+ writable(writable) {
+ internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
+ }
+ };
+
+ const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
+
+ private:
+ char *full_name_; // Owned.
+ uptr base_address_;
+ uptr max_executable_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
+ bool instrumented_;
+ IntrusiveList<AddressRange> ranges_;
+};
+
+// List of LoadedModules. OS-dependent implementation is responsible for
+// filling this information.
+class ListOfModules {
+ public:
+ ListOfModules() : initialized(false) {}
+ ~ListOfModules() { clear(); }
+ void init();
+ void fallbackInit(); // Uses fallback init if available, otherwise clears
+ const LoadedModule *begin() const { return modules_.begin(); }
+ LoadedModule *begin() { return modules_.begin(); }
+ const LoadedModule *end() const { return modules_.end(); }
+ LoadedModule *end() { return modules_.end(); }
+ uptr size() const { return modules_.size(); }
+ const LoadedModule &operator[](uptr i) const {
+ CHECK_LT(i, modules_.size());
+ return modules_[i];
+ }
+
+ private:
+ void clear() {
+ for (auto &module : modules_) module.clear();
+ modules_.clear();
+ }
+ void clearOrInit() {
+ initialized ? clear() : modules_.Initialize(kInitialCapacity);
+ initialized = true;
+ }
+
+ InternalMmapVectorNoCtor<LoadedModule> modules_;
+ // We rarely have more than 16K loaded modules.
+ static const uptr kInitialCapacity = 1 << 14;
+ bool initialized;
+};
+
+// Callback type for iterating over a set of memory ranges.
+typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
+
+enum AndroidApiLevel {
+ ANDROID_NOT_ANDROID = 0,
+ ANDROID_KITKAT = 19,
+ ANDROID_LOLLIPOP_MR1 = 22,
+ ANDROID_POST_LOLLIPOP = 23
+};
+
+void WriteToSyslog(const char *buffer);
+
+#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
+#define SANITIZER_WIN_TRACE 1
+#else
+#define SANITIZER_WIN_TRACE 0
+#endif
+
+#if SANITIZER_MAC || SANITIZER_WIN_TRACE
+void LogFullErrorReport(const char *buffer);
+#else
+INLINE void LogFullErrorReport(const char *buffer) {}
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+void WriteOneLineToSyslog(const char *s);
+void LogMessageOnPrintf(const char *str);
+#else
+INLINE void WriteOneLineToSyslog(const char *s) {}
+INLINE void LogMessageOnPrintf(const char *str) {}
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
+// Initialize Android logging. Any writes before this are silently lost.
+void AndroidLogInit();
+void SetAbortMessage(const char *);
+#else
+INLINE void AndroidLogInit() {}
+// FIXME: MacOS implementation could use CRSetCrashLogMessage.
+INLINE void SetAbortMessage(const char *) {}
+#endif
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder();
+AndroidApiLevel AndroidGetApiLevel();
+#else
+INLINE void AndroidLogWrite(const char *buffer_unused) {}
+INLINE void SanitizerInitializeUnwinder() {}
+INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
+#endif
+
+INLINE uptr GetPthreadDestructorIterations() {
+#if SANITIZER_ANDROID
+ return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
+#elif SANITIZER_POSIX
+ return 4;
+#else
+// Unused on Windows.
+ return 0;
+#endif
+}
+
+void *internal_start_thread(void *(*func)(void*), void *arg);
+void internal_join_thread(void *th);
+void MaybeStartBackgroudThread();
+
+// Make the compiler think that something is going on there.
+// Use this inside a loop that looks like memset/memcpy/etc to prevent the
+// compiler from recognising it and turning it into an actual call to
+// memset/memcpy/etc.
+static inline void SanitizerBreakOptimization(void *arg) {
+#if defined(_MSC_VER) && !defined(__clang__)
+ _ReadWriteBarrier();
+#else
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
+struct SignalContext {
+ void *siginfo;
+ void *context;
+ uptr addr;
+ uptr pc;
+ uptr sp;
+ uptr bp;
+ bool is_memory_access;
+ enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
+
+ // In some cases the kernel cannot provide the true faulting address; `addr`
+ // will be zero then. This field allows to distinguish between these cases
+ // and dereferences of null.
+ bool is_true_faulting_addr;
+
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ SignalContext() = default;
+
+ // Creates signal context in a platform-specific manner.
+ // SignalContext is going to keep pointers to siginfo and context without
+ // owning them.
+ SignalContext(void *siginfo, void *context)
+ : siginfo(siginfo),
+ context(context),
+ addr(GetAddress()),
+ is_memory_access(IsMemoryAccess()),
+ write_flag(GetWriteFlag()),
+ is_true_faulting_addr(IsTrueFaultingAddress()) {
+ InitPcSpBp();
+ }
+
+ static void DumpAllRegisters(void *context);
+
+ // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
+ int GetType() const;
+
+ // String description of the signal.
+ const char *Describe() const;
+
+ // Returns true if signal is stack overflow.
+ bool IsStackOverflow() const;
+
+ private:
+ // Platform specific initialization.
+ void InitPcSpBp();
+ uptr GetAddress() const;
+ WriteFlag GetWriteFlag() const;
+ bool IsMemoryAccess() const;
+ bool IsTrueFaultingAddress() const;
+};
+
+void InitializePlatformEarly();
+void MaybeReexec();
+
+template <typename Fn>
+class RunOnDestruction {
+ public:
+ explicit RunOnDestruction(Fn fn) : fn_(fn) {}
+ ~RunOnDestruction() { fn_(); }
+
+ private:
+ Fn fn_;
+};
+
+// A simple scope guard. Usage:
+// auto cleanup = at_scope_exit([]{ do_cleanup; });
+template <typename Fn>
+RunOnDestruction<Fn> at_scope_exit(Fn fn) {
+ return RunOnDestruction<Fn>(fn);
+}
+
+// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
+// if a process uses virtual memory over 4TB (as many sanitizers like
+// to do). This function will abort the process if running on a kernel
+// that looks vulnerable.
+#if SANITIZER_LINUX && SANITIZER_S390_64
+void AvoidCVE_2016_2143();
+#else
+INLINE void AvoidCVE_2016_2143() {}
+#endif
+
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
+void CheckNoDeepBind(const char *filename, int flag);
+
+// Returns the requested amount of random data (up to 256 bytes) that can then
+// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
+bool GetRandom(void *buffer, uptr length, bool blocking = true);
+
+// Returns the number of logical processors on the system.
+u32 GetNumberOfCPUs();
+extern u32 NumberOfCPUsCached;
+INLINE u32 GetNumberOfCPUsCached() {
+ if (!NumberOfCPUsCached)
+ NumberOfCPUsCached = GetNumberOfCPUs();
+ return NumberOfCPUsCached;
+}
+
+template <typename T>
+class ArrayRef {
+ public:
+ ArrayRef() {}
+ ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
+
+ T *begin() { return begin_; }
+ T *end() { return end_; }
+
+ private:
+ T *begin_ = nullptr;
+ T *end_ = nullptr;
+};
+
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_size_type size,
+ __sanitizer::LowLevelAllocator &alloc) { // NOLINT
+ return alloc.Allocate(size);
+}
+
+#endif // SANITIZER_COMMON_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interceptors.inc b/lib/tsan/sanitizer_common/sanitizer_common_interceptors.inc
new file mode 100644
index 0000000000..d7e0bba762
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interceptors.inc
@@ -0,0 +1,10175 @@
+//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define its own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_ENTER_NOIGNORE
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_INITIALIZE_RANGE
+// COMMON_INTERCEPTOR_DIR_ACQUIRE
+// COMMON_INTERCEPTOR_FD_ACQUIRE
+// COMMON_INTERCEPTOR_FD_RELEASE
+// COMMON_INTERCEPTOR_FD_ACCESS
+// COMMON_INTERCEPTOR_SET_THREAD_NAME
+// COMMON_INTERCEPTOR_ON_DLOPEN
+// COMMON_INTERCEPTOR_ON_EXIT
+// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+// COMMON_INTERCEPTOR_MUTEX_UNLOCK
+// COMMON_INTERCEPTOR_MUTEX_REPAIR
+// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
+// COMMON_INTERCEPTOR_HANDLE_RECVMSG
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_MMAP_IMPL
+// COMMON_INTERCEPTOR_COPY_STRING
+// COMMON_INTERCEPTOR_STRNDUP_IMPL
+// COMMON_INTERCEPTOR_STRERROR
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_addrhashmap.h"
+#include "sanitizer_errno.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_tls_get_addr.h"
+
+#include <stdarg.h>
+
+#if SANITIZER_INTERCEPTOR_HOOKS
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}
+#else
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
+
+#endif // SANITIZER_INTERCEPTOR_HOOKS
+
+#if SANITIZER_WINDOWS && !defined(va_copy)
+#define va_copy(dst, src) ((dst) = (src))
+#endif // _WIN32
+
+#if SANITIZER_FREEBSD
+#define pthread_setname_np pthread_set_name_np
+#define inet_aton __inet_aton
+#define inet_pton __inet_pton
+#define iconv __bsd_iconv
+#endif
+
+#if SANITIZER_NETBSD
+#define clock_getres __clock_getres50
+#define clock_gettime __clock_gettime50
+#define clock_settime __clock_settime50
+#define ctime __ctime50
+#define ctime_r __ctime_r50
+#define devname __devname50
+#define fgetpos __fgetpos50
+#define fsetpos __fsetpos50
+#define fstatvfs __fstatvfs90
+#define fstatvfs1 __fstatvfs190
+#define fts_children __fts_children60
+#define fts_close __fts_close60
+#define fts_open __fts_open60
+#define fts_read __fts_read60
+#define fts_set __fts_set60
+#define getitimer __getitimer50
+#define getmntinfo __getmntinfo90
+#define getpwent __getpwent50
+#define getpwnam __getpwnam50
+#define getpwnam_r __getpwnam_r50
+#define getpwuid __getpwuid50
+#define getpwuid_r __getpwuid_r50
+#define getutent __getutent50
+#define getutxent __getutxent50
+#define getutxid __getutxid50
+#define getutxline __getutxline50
+#define getvfsstat __getvfsstat90
+#define pututxline __pututxline50
+#define glob __glob30
+#define gmtime __gmtime50
+#define gmtime_r __gmtime_r50
+#define localtime __locatime50
+#define localtime_r __localtime_r50
+#define mktime __mktime50
+#define lstat __lstat50
+#define opendir __opendir30
+#define readdir __readdir30
+#define readdir_r __readdir_r30
+#define scandir __scandir30
+#define setitimer __setitimer50
+#define setlocale __setlocale50
+#define shmctl __shmctl50
+#define sigaltstack __sigaltstack14
+#define sigemptyset __sigemptyset14
+#define sigfillset __sigfillset14
+#define sigpending __sigpending14
+#define sigprocmask __sigprocmask14
+#define sigtimedwait __sigtimedwait50
+#define stat __stat50
+#define statvfs __statvfs90
+#define statvfs1 __statvfs190
+#define time __time50
+#define times __times13
+#define unvis __unvis50
+#define wait3 __wait350
+#define wait4 __wait450
+extern const unsigned short *_ctype_tab_;
+extern const short *_toupper_tab_;
+extern const short *_tolower_tab_;
+#endif
+
+// Platform-specific options.
+#if SANITIZER_MAC
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
+#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FD_ACCESS
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_OPEN
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_CLOSE
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
+#endif
+
+#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
+
+#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ CheckNoDeepBind(filename, flag);
+#endif
+
+#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0;
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ACQUIRE
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_RELEASE
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_START
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_END
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() {}
+#endif
+
+#ifdef SANITIZER_NLDBL_VERSION
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION_VER(fn, SANITIZER_NLDBL_VERSION)
+#else
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION(fn)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ { return REAL(mmap)(addr, sz, prot, flags, fd, off); }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_COPY_STRING
+#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL
+#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \
+ COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \
+ uptr copy_length = internal_strnlen(s, size); \
+ char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \
+ if (common_flags()->intercept_strndup) { \
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \
+ } \
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
+ internal_memcpy(new_mem, s, copy_length); \
+ new_mem[copy_length] = '\0'; \
+ return new_mem;
+#endif
+
+#ifndef COMMON_INTERCEPTOR_STRERROR
+#define COMMON_INTERCEPTOR_STRERROR() {}
+#endif
+
+struct FileMetadata {
+ // For open_memstream().
+ char **addr;
+ SIZE_T *size;
+};
+
+struct CommonInterceptorMetadata {
+ enum {
+ CIMT_INVALID = 0,
+ CIMT_FILE
+ } type;
+ union {
+ FileMetadata file;
+ };
+};
+
+#if SI_POSIX
+typedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;
+
+static MetadataHashMap *interceptor_metadata_map;
+
+UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
+ const FileMetadata &file) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
+ CHECK(h.created());
+ h->type = CommonInterceptorMetadata::CIMT_FILE;
+ h->file = file;
+}
+
+UNUSED static const FileMetadata *GetInterceptorMetadata(
+ __sanitizer_FILE *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,
+ /* remove */ false,
+ /* create */ false);
+ if (addr && h.exists()) {
+ CHECK(!h.created());
+ CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);
+ return &h->file;
+ } else {
+ return 0;
+ }
+}
+
+UNUSED static void DeleteInterceptorMetadata(void *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
+ CHECK(h.exists());
+}
+#endif // SI_POSIX
+
+#if SANITIZER_INTERCEPT_STRLEN
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ // Sometimes strlen is called prior to InitializeCommonInterceptors,
+ // in which case the REAL(strlen) typically used in
+ // COMMON_INTERCEPTOR_ENTER will fail. We use internal_strlen here
+ // to handle that.
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strlen(s);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlen, s);
+ SIZE_T result = REAL(strlen)(s);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, result + 1);
+ return result;
+}
+#define INIT_STRLEN COMMON_INTERCEPT_FUNCTION(strlen)
+#else
+#define INIT_STRLEN
+#endif
+
+#if SANITIZER_INTERCEPT_STRNLEN
+INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnlen, s, maxlen);
+ SIZE_T length = REAL(strnlen)(s, maxlen);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, Min(length + 1, maxlen));
+ return length;
+}
+#define INIT_STRNLEN COMMON_INTERCEPT_FUNCTION(strnlen)
+#else
+#define INIT_STRNLEN
+#endif
+
+#if SANITIZER_INTERCEPT_STRNDUP
+INTERCEPTOR(char*, strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup)
+#else
+#define INIT_STRNDUP
+#endif // SANITIZER_INTERCEPT_STRNDUP
+
+#if SANITIZER_INTERCEPT___STRNDUP
+INTERCEPTOR(char*, __strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup)
+#else
+#define INIT___STRNDUP
+#endif // SANITIZER_INTERCEPT___STRNDUP
+
+#if SANITIZER_INTERCEPT_TEXTDOMAIN
+INTERCEPTOR(char*, textdomain, const char *domainname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
+ if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
+ char *domain = REAL(textdomain)(domainname);
+ if (domain) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
+ }
+ return domain;
+}
+#define INIT_TEXTDOMAIN COMMON_INTERCEPT_FUNCTION(textdomain)
+#else
+#define INIT_TEXTDOMAIN
+#endif
+
+#if SANITIZER_INTERCEPT_STRCMP
+static inline int CharCmpX(unsigned char c1, unsigned char c2) {
+ return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
+ const char *s1, const char *s2, int result)
+
+INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2);
+ unsigned char c1, c2;
+ uptr i;
+ for (i = 0;; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (c1 != c2 || c1 == '\0') break;
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
+ s2, result);
+ return result;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc,
+ const char *s1, const char *s2, uptr n,
+ int result)
+
+INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strncmp(s1, s2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (c1 != c2 || c1 == '\0') break;
+ }
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
+ s2, size, result);
+ return result;
+}
+
+#define INIT_STRCMP COMMON_INTERCEPT_FUNCTION(strcmp)
+#define INIT_STRNCMP COMMON_INTERCEPT_FUNCTION(strncmp)
+#else
+#define INIT_STRCMP
+#define INIT_STRNCMP
+#endif
+
+#if SANITIZER_INTERCEPT_STRCASECMP
+static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
+ int c1_low = ToLower(c1);
+ int c2_low = ToLower(c2);
+ return c1_low - c2_low;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, uptr called_pc,
+ const char *s1, const char *s2, int result)
+
+INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0;; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, GET_CALLER_PC(),
+ s1, s2, result);
+ return result;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
+ const char *s1, const char *s2, uptr size,
+ int result)
+
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
+ }
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
+ s1, s2, size, result);
+ return result;
+}
+
+#define INIT_STRCASECMP COMMON_INTERCEPT_FUNCTION(strcasecmp)
+#define INIT_STRNCASECMP COMMON_INTERCEPT_FUNCTION(strncasecmp)
+#else
+#define INIT_STRCASECMP
+#define INIT_STRNCASECMP
+#endif
+
+#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
+static inline void StrstrCheck(void *ctx, char *r, const char *s1,
+ const char *s2) {
+ uptr len1 = REAL(strlen)(s1);
+ uptr len2 = REAL(strlen)(s2);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_STRSTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
+INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strstr(s1, s2);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2);
+ char *r = REAL(strstr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, GET_CALLER_PC(), s1,
+ s2, r);
+ return r;
+}
+
+#define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr);
+#else
+#define INIT_STRSTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCASESTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
+INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2);
+ char *r = REAL(strcasestr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, GET_CALLER_PC(),
+ s1, s2, r);
+ return r;
+}
+
+#define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr);
+#else
+#define INIT_STRCASESTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRTOK
+
+INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);
+ if (!common_flags()->intercept_strtok) {
+ return REAL(strtok)(str, delimiters);
+ }
+ if (common_flags()->strict_string_checks) {
+ // If strict_string_checks is enabled, we check the whole first argument
+ // string on the first call (strtok saves this string in a static buffer
+ // for subsequent calls). We do not need to check strtok's result.
+ // As the delimiters can change, we check them every call.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
+ REAL(strlen)(delimiters) + 1);
+ return REAL(strtok)(str, delimiters);
+ } else {
+ // However, when strict_string_checks is disabled we cannot check the
+ // whole string on the first call. Instead, we check the result string
+ // which is guaranteed to be a NULL-terminated substring of the first
+ // argument. We also conservatively check one character of str and the
+ // delimiters.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
+ char *result = REAL(strtok)(str, delimiters);
+ if (result != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ } else if (str != nullptr) {
+ // No delimiter were found, it's safe to assume that the entire str was
+ // scanned.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ return result;
+ }
+}
+
+#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)
+#else
+#define INIT_STRTOK
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMEM
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
+ const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2, void *result)
+
+INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memmem, s1, len1, s2, len2);
+ void *r = REAL(memmem)(s1, len1, s2, len2);
+ if (common_flags()->intercept_memmem) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, len1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2);
+ }
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, GET_CALLER_PC(),
+ s1, len1, s2, len2, r);
+ return r;
+}
+
+#define INIT_MEMMEM COMMON_INTERCEPT_FUNCTION(memmem);
+#else
+#define INIT_MEMMEM
+#endif // SANITIZER_INTERCEPT_MEMMEM
+
+#if SANITIZER_INTERCEPT_STRCHR
+INTERCEPTOR(char*, strchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);
+ char *result = REAL(strchr)(s, c);
+ if (common_flags()->intercept_strchr) {
+ // Keep strlen as macro argument, as macro may ignore it.
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s,
+ (result ? result - s : REAL(strlen)(s)) + 1);
+ }
+ return result;
+}
+#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)
+#else
+#define INIT_STRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCHRNUL
+INTERCEPTOR(char*, strchrnul, const char *s, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strchrnul, s, c);
+ char *result = REAL(strchrnul)(s, c);
+ uptr len = result - s + 1;
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, len);
+ return result;
+}
+#define INIT_STRCHRNUL COMMON_INTERCEPT_FUNCTION(strchrnul)
+#else
+#define INIT_STRCHRNUL
+#endif
+
+#if SANITIZER_INTERCEPT_STRRCHR
+INTERCEPTOR(char*, strrchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strrchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return REAL(strrchr)(s, c);
+}
+#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
+#else
+#define INIT_STRRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_STRSPN
+INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
+ SIZE_T r = REAL(strspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
+ SIZE_T r = REAL(strcspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+#define INIT_STRSPN \
+ COMMON_INTERCEPT_FUNCTION(strspn); \
+ COMMON_INTERCEPT_FUNCTION(strcspn);
+#else
+#define INIT_STRSPN
+#endif
+
+#if SANITIZER_INTERCEPT_STRPBRK
+INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
+ char *r = REAL(strpbrk)(s1, s2);
+ if (common_flags()->intercept_strpbrk) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
+ r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+ }
+ return r;
+}
+
+#define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk);
+#else
+#define INIT_STRPBRK
+#endif
+
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
+}
+
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
+#else
+#define INIT_MEMCPY
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCMP
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
+ const void *s1, const void *s2, uptr n,
+ int result)
+
+// Common code for `memcmp` and `bcmp`.
+int MemcmpInterceptorCommon(void *ctx,
+ int (*real_fn)(const void *, const void *, uptr),
+ const void *a1, const void *a2, uptr size) {
+ if (common_flags()->intercept_memcmp) {
+ if (common_flags()->strict_memcmp) {
+ // Check the entire regions even if the first bytes of the buffers are
+ // different.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size);
+ // Fallthrough to REAL(memcmp) below.
+ } else {
+ unsigned char c1 = 0, c2 = 0;
+ const unsigned char *s1 = (const unsigned char*)a1;
+ const unsigned char *s2 = (const unsigned char*)a2;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if (c1 != c2) break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ int r = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(),
+ a1, a2, size, r);
+ return r;
+ }
+ }
+ int result = real_fn(a1, a2, size);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,
+ a2, size, result);
+ return result;
+}
+
+INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memcmp(a1, a2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);
+ return MemcmpInterceptorCommon(ctx, REAL(memcmp), a1, a2, size);
+}
+
+#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp)
+#else
+#define INIT_MEMCMP
+#endif
+
+#if SANITIZER_INTERCEPT_BCMP
+INTERCEPTOR(int, bcmp, const void *a1, const void *a2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memcmp(a1, a2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, bcmp, a1, a2, size);
+ return MemcmpInterceptorCommon(ctx, REAL(bcmp), a1, a2, size);
+}
+
+#define INIT_BCMP COMMON_INTERCEPT_FUNCTION(bcmp)
+#else
+#define INIT_BCMP
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memchr(s, c, n);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
+ void *res = REAL(memchr)(s, c, n);
+#endif
+ uptr len = res ? (char *)res - (const char *)s + 1 : n;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
+ return res;
+}
+
+#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)
+#else
+#define INIT_MEMCHR
+#endif
+
+#if SANITIZER_INTERCEPT_MEMRCHR
+INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);
+ return REAL(memrchr)(s, c, n);
+}
+
+#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)
+#else
+#define INIT_MEMRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_FREXP
+INTERCEPTOR(double, frexp, double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
+ // Assuming frexp() always writes to |exp|.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ double res = REAL(frexp)(x, exp);
+ return res;
+}
+
+#define INIT_FREXP COMMON_INTERCEPT_FUNCTION(frexp);
+#else
+#define INIT_FREXP
+#endif // SANITIZER_INTERCEPT_FREXP
+
+#if SANITIZER_INTERCEPT_FREXPF_FREXPL
+INTERCEPTOR(float, frexpf, float x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(frexpf)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+INTERCEPTOR(long double, frexpl, long double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(frexpl)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+#define INIT_FREXPF_FREXPL \
+ COMMON_INTERCEPT_FUNCTION(frexpf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)
+#else
+#define INIT_FREXPF_FREXPL
+#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
+
+#if SI_POSIX
+static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+static void read_iovec(void *ctx, struct __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec) * iovlen);
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+#endif
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_READ COMMON_INTERCEPT_FUNCTION(read)
+#else
+#define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_FREAD
+INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);
+ return res;
+}
+#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)
+#else
+#define INIT_FREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD COMMON_INTERCEPT_FUNCTION(pread)
+#else
+#define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD64 COMMON_INTERCEPT_FUNCTION(pread64)
+#else
+#define INIT_PREAD64
+#endif
+
+#if SANITIZER_INTERCEPT_READV
+INTERCEPTOR_WITH_SUFFIX(SSIZE_T, readv, int fd, __sanitizer_iovec *iov,
+ int iovcnt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readv, fd, iov, iovcnt);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(readv)(fd, iov, iovcnt);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_READV COMMON_INTERCEPT_FUNCTION(readv)
+#else
+#define INIT_READV
+#endif
+
+#if SANITIZER_INTERCEPT_PREADV
+INTERCEPTOR(SSIZE_T, preadv, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, preadv, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(preadv)(fd, iov, iovcnt, offset);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREADV COMMON_INTERCEPT_FUNCTION(preadv)
+#else
+#define INIT_PREADV
+#endif
+
+#if SANITIZER_INTERCEPT_PREADV64
+INTERCEPTOR(SSIZE_T, preadv64, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, preadv64, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(preadv64)(fd, iov, iovcnt, offset);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREADV64 COMMON_INTERCEPT_FUNCTION(preadv64)
+#else
+#define INIT_PREADV64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITE
+INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(write)(fd, ptr, count);
+ // FIXME: this check should be _before_ the call to REAL(write), not after
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_WRITE COMMON_INTERCEPT_FUNCTION(write)
+#else
+#define INIT_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_FWRITE
+INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);
+ SIZE_T res = REAL(fwrite)(p, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);
+ return res;
+}
+#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)
+#else
+#define INIT_FWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE COMMON_INTERCEPT_FUNCTION(pwrite)
+#else
+#define INIT_PWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE64
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE64 COMMON_INTERCEPT_FUNCTION(pwrite64)
+#else
+#define INIT_PWRITE64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITEV
+INTERCEPTOR_WITH_SUFFIX(SSIZE_T, writev, int fd, __sanitizer_iovec *iov,
+ int iovcnt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, writev, fd, iov, iovcnt);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(writev)(fd, iov, iovcnt);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_WRITEV COMMON_INTERCEPT_FUNCTION(writev)
+#else
+#define INIT_WRITEV
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITEV
+INTERCEPTOR(SSIZE_T, pwritev, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwritev, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwritev)(fd, iov, iovcnt, offset);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_PWRITEV COMMON_INTERCEPT_FUNCTION(pwritev)
+#else
+#define INIT_PWRITEV
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITEV64
+INTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwritev64, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwritev64)(fd, iov, iovcnt, offset);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_PWRITEV64 COMMON_INTERCEPT_FUNCTION(pwritev64)
+#else
+#define INIT_PWRITEV64
+#endif
+
+#if SANITIZER_INTERCEPT_FGETS
+INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgets, s, size, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(fgets)(s, size, file);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return res;
+}
+#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)
+#else
+#define INIT_FGETS
+#endif
+
+#if SANITIZER_INTERCEPT_FPUTS
+INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);
+ if (!SANITIZER_MAC || s) { // `fputs(NULL, file)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ }
+ return REAL(fputs)(s, file);
+}
+#define INIT_FPUTS COMMON_INTERCEPT_FUNCTION(fputs)
+#else
+#define INIT_FPUTS
+#endif
+
+#if SANITIZER_INTERCEPT_PUTS
+INTERCEPTOR(int, puts, char *s) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
+ if (!SANITIZER_MAC || s) { // `puts(NULL)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ }
+ return REAL(puts)(s);
+}
+#define INIT_PUTS COMMON_INTERCEPT_FUNCTION(puts)
+#else
+#define INIT_PUTS
+#endif
+
+#if SANITIZER_INTERCEPT_PRCTL
+INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
+ static const int PR_SET_NAME = 15;
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ char buff[16];
+ internal_strncpy(buff, (char *)arg2, 15);
+ buff[15] = 0;
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ }
+ return res;
+}
+#define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl)
+#else
+#define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+
+#if SANITIZER_INTERCEPT_TIME
+INTERCEPTOR(unsigned long, time, unsigned long *t) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, time, t);
+ unsigned long local_t;
+ unsigned long res = REAL(time)(&local_t);
+ if (t && res != (unsigned long)-1) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ *t = local_t;
+ }
+ return res;
+}
+#define INIT_TIME COMMON_INTERCEPT_FUNCTION(time);
+#else
+#define INIT_TIME
+#endif // SANITIZER_INTERCEPT_TIME
+
+#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
+#if !SANITIZER_SOLARIS
+ if (tm->tm_zone) {
+ // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
+ // can point to shared memory and tsan would report a data race.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
+ REAL(strlen(tm->tm_zone)) + 1);
+ }
+#endif
+}
+INTERCEPTOR(__sanitizer_tm *, localtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep);
+ __sanitizer_tm *res = REAL(localtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, localtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result);
+ __sanitizer_tm *res = REAL(localtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, gmtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep);
+ __sanitizer_tm *res = REAL(gmtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result);
+ __sanitizer_tm *res = REAL(gmtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ctime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ctime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(asctime)(tm);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(asctime_r)(tm, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(long, mktime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));
+ long res = REAL(mktime)(tm);
+ if (res != -1) unpoison_tm(ctx, tm);
+ return res;
+}
+#define INIT_LOCALTIME_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(localtime); \
+ COMMON_INTERCEPT_FUNCTION(localtime_r); \
+ COMMON_INTERCEPT_FUNCTION(gmtime); \
+ COMMON_INTERCEPT_FUNCTION(gmtime_r); \
+ COMMON_INTERCEPT_FUNCTION(ctime); \
+ COMMON_INTERCEPT_FUNCTION(ctime_r); \
+ COMMON_INTERCEPT_FUNCTION(asctime); \
+ COMMON_INTERCEPT_FUNCTION(asctime_r); \
+ COMMON_INTERCEPT_FUNCTION(mktime);
+#else
+#define INIT_LOCALTIME_AND_FRIENDS
+#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+
+#if SANITIZER_INTERCEPT_STRPTIME
+INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
+ if (format)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(strptime)(s, format, tm);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0);
+ if (res && tm) {
+ // Do not call unpoison_tm here, because strptime does not, in fact,
+ // initialize the entire struct tm. For example, tm_zone pointer is left
+ // uninitialized.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
+ }
+ return res;
+}
+#define INIT_STRPTIME COMMON_INTERCEPT_FUNCTION(strptime);
+#else
+#define INIT_STRPTIME
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF
+#include "sanitizer_common_interceptors_format.inc"
+
+#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ va_list ap; \
+ va_start(ap, format); \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
+ int res = WRAP(vname)(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
+
+#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ if (res > 0) \
+ scanf_common(ctx, res, allowGnuMalloc, format, aq); \
+ va_end(aq); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap)
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap)
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
+
+INTERCEPTOR(int, scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)
+
+INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+
+INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+#endif
+
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
+#define INIT_SCANF \
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
+#else
+#define INIT_SCANF
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+#define INIT_ISOC99_SCANF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_scanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
+#else
+#define INIT_ISOC99_SCANF
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+#define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap);
+
+#define VPRINTF_INTERCEPTOR_RETURN() \
+ va_end(aq);
+
+#define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \
+ if (common_flags()->check_printf) \
+ printf_common(ctx, format, aq); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, size, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(strp, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)
+
+INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,
+ va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __vsnprintf_chk, char *str, SIZE_T size, int flag,
+ SIZE_T size_to, const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
+
+INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
+#endif // SANITIZER_INTERCEPT_PRINTF_L
+
+INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __vsprintf_chk, char *str, int flag, SIZE_T size_to,
+ const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+#endif
+
+INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
+VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,
+ const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,
+ va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,
+ ap)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+INTERCEPTOR(int, printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)
+
+INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __fprintf_chk, __sanitizer_FILE *stream, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__fprintf_chk, vfprintf, stream, format)
+#endif
+
+INTERCEPTOR(int, sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __sprintf_chk, char *str, int flag, SIZE_T size_to,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__sprintf_chk, vsprintf, str, format)
+#endif
+
+INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __snprintf_chk, char *str, SIZE_T size, int flag,
+ SIZE_T size_to, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__snprintf_chk, vsnprintf, str, size, format)
+#endif
+
+INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
+
+INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,
+ ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)
+
+INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
+ format)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+#endif // SANITIZER_INTERCEPT_PRINTF
+
+#if SANITIZER_INTERCEPT_PRINTF
+#define INIT_PRINTF \
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
+#else
+#define INIT_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+#define INIT___PRINTF_CHK \
+ COMMON_INTERCEPT_FUNCTION(__sprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__snprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__vsprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__vsnprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__fprintf_chk);
+#else
+#define INIT___PRINTF_CHK
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+#define INIT_PRINTF_L \
+ COMMON_INTERCEPT_FUNCTION(snprintf_l); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
+#else
+#define INIT_PRINTF_L
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define INIT_ISOC99_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);
+#else
+#define INIT_ISOC99_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT_IOCTL
+#include "sanitizer_common_interceptors_ioctl.inc"
+#include "sanitizer_interceptors_ioctl_netbsd.inc"
+INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
+ // We need a frame pointer, because we call into ioctl_common_[pre|post] which
+ // can trigger a report and we need to be able to unwind through this
+ // function. On Mac in debug mode we might not have a frame pointer, because
+ // ioctl_common_[pre|post] doesn't get inlined here.
+ ENABLE_FRAME_POINTER;
+
+ void *ctx;
+ va_list ap;
+ va_start(ap, request);
+ void *arg = va_arg(ap, void *);
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
+
+ CHECK(ioctl_initialized);
+
+ // Note: TSan does not use common flags, and they are zero-initialized.
+ // This effectively disables ioctl handling in TSan.
+ if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
+
+ // Although request is unsigned long, the rest of the interceptor uses it
+ // as just "unsigned" to save space, because we know that all values fit in
+ // "unsigned" - they are compile-time constants.
+
+ const ioctl_desc *desc = ioctl_lookup(request);
+ ioctl_desc decoded_desc;
+ if (!desc) {
+ VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ if (!ioctl_decode(request, &decoded_desc))
+ Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ else
+ desc = &decoded_desc;
+ }
+
+ if (desc) ioctl_common_pre(ctx, desc, d, request, arg);
+ int res = REAL(ioctl)(d, request, arg);
+ // FIXME: some ioctls have different return values for success and failure.
+ if (desc && res != -1) ioctl_common_post(ctx, desc, res, d, request, arg);
+ return res;
+}
+#define INIT_IOCTL \
+ ioctl_init(); \
+ COMMON_INTERCEPT_FUNCTION(ioctl);
+#else
+#define INIT_IOCTL
+#endif
+
+#if SANITIZER_POSIX
+UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
+ if (pwd) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
+ if (pwd->pw_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,
+ REAL(strlen)(pwd->pw_name) + 1);
+ if (pwd->pw_passwd)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,
+ REAL(strlen)(pwd->pw_passwd) + 1);
+#if !SANITIZER_ANDROID
+ if (pwd->pw_gecos)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
+ REAL(strlen)(pwd->pw_gecos) + 1);
+#endif
+#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+ if (pwd->pw_class)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
+ REAL(strlen)(pwd->pw_class) + 1);
+#endif
+ if (pwd->pw_dir)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,
+ REAL(strlen)(pwd->pw_dir) + 1);
+ if (pwd->pw_shell)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,
+ REAL(strlen)(pwd->pw_shell) + 1);
+ }
+}
+
+UNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {
+ if (grp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
+ if (grp->gr_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,
+ REAL(strlen)(grp->gr_name) + 1);
+ if (grp->gr_passwd)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,
+ REAL(strlen)(grp->gr_passwd) + 1);
+ char **p = grp->gr_mem;
+ for (; *p; ++p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,
+ (p - grp->gr_mem + 1) * sizeof(*p));
+ }
+}
+#endif // SANITIZER_POSIX
+
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ __sanitizer_passwd *res = REAL(getpwnam)(name);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
+ __sanitizer_passwd *res = REAL(getpwuid)(uid);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ __sanitizer_group *res = REAL(getgrnam)(name);
+ unpoison_group(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
+ __sanitizer_group *res = REAL(getgrgid)(gid);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_GETPWNAM_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(getpwnam); \
+ COMMON_INTERCEPT_FUNCTION(getpwuid); \
+ COMMON_INTERCEPT_FUNCTION(getgrnam); \
+ COMMON_INTERCEPT_FUNCTION(getgrgid);
+#else
+#define INIT_GETPWNAM_AND_FRIENDS
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
+ char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
+ if (!res && result)
+ unpoison_passwd(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
+ if (!res && result)
+ unpoison_passwd(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
+ char *buf, SIZE_T buflen, __sanitizer_group **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
+ if (!res && result)
+ unpoison_group(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
+ SIZE_T buflen, __sanitizer_group **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
+ if (!res && result)
+ unpoison_group(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_GETPWNAM_R_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(getpwnam_r); \
+ COMMON_INTERCEPT_FUNCTION(getpwuid_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrnam_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrgid_r);
+#else
+#define INIT_GETPWNAM_R_AND_FRIENDS
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT
+INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);
+ __sanitizer_passwd *res = REAL(getpwent)(dummy);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);
+ __sanitizer_group *res = REAL(getgrent)(dummy);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_GETPWENT \
+ COMMON_INTERCEPT_FUNCTION(getpwent); \
+ COMMON_INTERCEPT_FUNCTION(getgrent);
+#else
+#define INIT_GETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT
+INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);
+ __sanitizer_passwd *res = REAL(fgetpwent)(fp);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);
+ __sanitizer_group *res = REAL(fgetgrent)(fp);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_FGETPWENT \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent);
+#else
+#define INIT_FGETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT_R
+INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_passwd(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
+ __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_group(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_GETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(getpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrent_r);
+#else
+#define INIT_GETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT_R
+INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_passwd(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_FGETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent_r);
+#else
+#define INIT_FGETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETGRENT_R
+INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_group(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_FGETGRENT_R \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent_r);
+#else
+#define INIT_FGETGRENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SETPWENT
+// The only thing these interceptors do is disable any nested interceptors.
+// These functions may open nss modules and call uninstrumented functions from
+// them, and we don't want things like strlen() to trigger.
+INTERCEPTOR(void, setpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);
+ REAL(setpwent)(dummy);
+}
+INTERCEPTOR(void, endpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);
+ REAL(endpwent)(dummy);
+}
+INTERCEPTOR(void, setgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);
+ REAL(setgrent)(dummy);
+}
+INTERCEPTOR(void, endgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);
+ REAL(endgrent)(dummy);
+}
+#define INIT_SETPWENT \
+ COMMON_INTERCEPT_FUNCTION(setpwent); \
+ COMMON_INTERCEPT_FUNCTION(endpwent); \
+ COMMON_INTERCEPT_FUNCTION(setgrent); \
+ COMMON_INTERCEPT_FUNCTION(endgrent);
+#else
+#define INIT_SETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_CLOCK_GETTIME
+INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(clock_getres)(clk_id, tp);
+ if (!res && tp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(clock_gettime)(clk_id, tp);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
+ return res;
+}
+namespace __sanitizer {
+extern "C" {
+int real_clock_gettime(u32 clk_id, void *tp) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_clock_gettime(clk_id, tp);
+ return REAL(clock_gettime)(clk_id, tp);
+}
+} // extern "C"
+} // namespace __sanitizer
+INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz);
+ return REAL(clock_settime)(clk_id, tp);
+}
+#define INIT_CLOCK_GETTIME \
+ COMMON_INTERCEPT_FUNCTION(clock_getres); \
+ COMMON_INTERCEPT_FUNCTION(clock_gettime); \
+ COMMON_INTERCEPT_FUNCTION(clock_settime);
+#else
+#define INIT_CLOCK_GETTIME
+#endif
+
+#if SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
+ __sanitizer_clockid_t *clockid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_getcpuclockid, pid, clockid);
+ int res = REAL(clock_getcpuclockid)(pid, clockid);
+ if (!res && clockid) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
+ }
+ return res;
+}
+
+#define INIT_CLOCK_GETCPUCLOCKID \
+ COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
+#else
+#define INIT_CLOCK_GETCPUCLOCKID
+#endif
+
+#if SANITIZER_INTERCEPT_GETITIMER
+INTERCEPTOR(int, getitimer, int which, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getitimer)(which, curr_value);
+ if (!res && curr_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
+ if (new_value) {
+ // itimerval can contain padding that may be legitimately uninitialized
+ const struct __sanitizer_itimerval *nv =
+ (const struct __sanitizer_itimerval *)new_value;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_sec,
+ sizeof(__sanitizer_time_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_usec,
+ sizeof(__sanitizer_suseconds_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_sec,
+ sizeof(__sanitizer_time_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_usec,
+ sizeof(__sanitizer_suseconds_t));
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(setitimer)(which, new_value, old_value);
+ if (!res && old_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
+ }
+ return res;
+}
+#define INIT_GETITIMER \
+ COMMON_INTERCEPT_FUNCTION(getitimer); \
+ COMMON_INTERCEPT_FUNCTION(setitimer);
+#else
+#define INIT_GETITIMER
+#endif
+
+#if SANITIZER_INTERCEPT_GLOB
+static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob));
+ // +1 for NULL pointer at the end.
+ if (pglob->gl_pathv)
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
+ for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
+ char *p = pglob->gl_pathv[i];
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+ }
+}
+
+#if SANITIZER_SOLARIS
+INTERCEPTOR(int, glob, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ int res = REAL(glob)(pattern, flags, errfunc, pglob);
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#else
+static THREADLOCAL __sanitizer_glob_t *pglob_copy;
+
+static void wrapped_gl_closedir(void *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ pglob_copy->gl_closedir(dir);
+}
+
+static void *wrapped_gl_readdir(void *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ return pglob_copy->gl_readdir(dir);
+}
+
+static void *wrapped_gl_opendir(const char *s) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_opendir(s);
+}
+
+static int wrapped_gl_lstat(const char *s, void *st) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_lstat(s, st);
+}
+
+static int wrapped_gl_stat(const char *s, void *st) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_stat(s, st);
+}
+
+static const __sanitizer_glob_t kGlobCopy = {
+ 0, 0, 0,
+ 0, wrapped_gl_closedir, wrapped_gl_readdir,
+ wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat};
+
+INTERCEPTOR(int, glob, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ pglob_copy = &glob_copy;
+ }
+ int res = REAL(glob)(pattern, flags, errfunc, pglob);
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ }
+ pglob_copy = 0;
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#endif // SANITIZER_SOLARIS
+#define INIT_GLOB \
+ COMMON_INTERCEPT_FUNCTION(glob);
+#else // SANITIZER_INTERCEPT_GLOB
+#define INIT_GLOB
+#endif // SANITIZER_INTERCEPT_GLOB
+
+#if SANITIZER_INTERCEPT_GLOB64
+INTERCEPTOR(int, glob64, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ pglob_copy = &glob_copy;
+ }
+ int res = REAL(glob64)(pattern, flags, errfunc, pglob);
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ }
+ pglob_copy = 0;
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#define INIT_GLOB64 \
+ COMMON_INTERCEPT_FUNCTION(glob64);
+#else // SANITIZER_INTERCEPT_GLOB64
+#define INIT_GLOB64
+#endif // SANITIZER_INTERCEPT_GLOB64
+
+#if SANITIZER_INTERCEPT_WAIT
+// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version
+// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for
+// details.
+INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait)(status);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+// On FreeBSD id_t is always 64-bit wide.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,
+ int options) {
+#else
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
+ int options) {
+#endif
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(waitid)(idtype, id, infop, options);
+ if (res != -1 && infop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
+ return res;
+}
+INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(waitpid)(pid, status, options);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait3)(status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#if SANITIZER_ANDROID
+INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(__wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);
+#else
+INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);
+#endif // SANITIZER_ANDROID
+#define INIT_WAIT \
+ COMMON_INTERCEPT_FUNCTION(wait); \
+ COMMON_INTERCEPT_FUNCTION(waitid); \
+ COMMON_INTERCEPT_FUNCTION(waitpid); \
+ COMMON_INTERCEPT_FUNCTION(wait3);
+#else
+#define INIT_WAIT
+#define INIT_WAIT4
+#endif
+
+#if SANITIZER_INTERCEPT_INET
+INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size);
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
+ // FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(inet_ntop)(af, src, dst, size);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0);
+ // FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(inet_pton)(af, src, dst);
+ if (res == 1) {
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);
+ }
+ return res;
+}
+#define INIT_INET \
+ COMMON_INTERCEPT_FUNCTION(inet_ntop); \
+ COMMON_INTERCEPT_FUNCTION(inet_pton);
+#else
+#define INIT_INET
+#endif
+
+#if SANITIZER_INTERCEPT_INET
+INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
+ if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(inet_aton)(cp, dst);
+ if (res != 0) {
+ uptr sz = __sanitizer_in_addr_sz(af_inet);
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);
+ }
+ return res;
+}
+#define INIT_INET_ATON COMMON_INTERCEPT_FUNCTION(inet_aton);
+#else
+#define INIT_INET_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_getschedparam)(thread, policy, param);
+ if (res == 0) {
+ if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
+ if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param));
+ }
+ return res;
+}
+#define INIT_PTHREAD_GETSCHEDPARAM \
+ COMMON_INTERCEPT_FUNCTION(pthread_getschedparam);
+#else
+#define INIT_PTHREAD_GETSCHEDPARAM
+#endif
+
+#if SANITIZER_INTERCEPT_GETADDRINFO
+INTERCEPTOR(int, getaddrinfo, char *node, char *service,
+ struct __sanitizer_addrinfo *hints,
+ struct __sanitizer_addrinfo **out) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
+ if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+ if (service)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+ if (hints)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getaddrinfo)(node, service, hints, out);
+ if (res == 0 && out) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
+ struct __sanitizer_addrinfo *p = *out;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (p->ai_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);
+ if (p->ai_canonname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
+ REAL(strlen)(p->ai_canonname) + 1);
+ p = p->ai_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETADDRINFO COMMON_INTERCEPT_FUNCTION(getaddrinfo);
+#else
+#define INIT_GETADDRINFO
+#endif
+
+#if SANITIZER_INTERCEPT_GETNAMEINFO
+INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
+ unsigned hostlen, char *serv, unsigned servlen, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnameinfo, sockaddr, salen, host, hostlen,
+ serv, servlen, flags);
+ // FIXME: consider adding READ_RANGE(sockaddr, salen)
+ // There is padding in in_addr that may make this too noisy
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
+ if (res == 0) {
+ if (host && hostlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1);
+ if (serv && servlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1);
+ }
+ return res;
+}
+#define INIT_GETNAMEINFO COMMON_INTERCEPT_FUNCTION(getnameinfo);
+#else
+#define INIT_GETNAMEINFO
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKNAME
+INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ int addrlen_in = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getsockname)(sock_fd, addr, addrlen);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
+ }
+ return res;
+}
+#define INIT_GETSOCKNAME COMMON_INTERCEPT_FUNCTION(getsockname);
+#else
+#define INIT_GETSOCKNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
+ if (h->h_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+ char **p = h->h_aliases;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases));
+ p = h->h_addr_list;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list));
+}
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name);
+ struct __sanitizer_hostent *res = REAL(gethostbyname)(name);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len,
+ int type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostent, int fake) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent, fake);
+ struct __sanitizer_hostent *res = REAL(gethostent)(fake);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+#define INIT_GETHOSTBYNAME \
+ COMMON_INTERCEPT_FUNCTION(gethostent); \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr); \
+ COMMON_INTERCEPT_FUNCTION(gethostbyname);
+#else
+#define INIT_GETHOSTBYNAME
+#endif // SANITIZER_INTERCEPT_GETHOSTBYNAME
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af);
+ struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+#define INIT_GETHOSTBYNAME2 COMMON_INTERCEPT_FUNCTION(gethostbyname2);
+#else
+#define INIT_GETHOSTBYNAME2
+#endif // SANITIZER_INTERCEPT_GETHOSTBYNAME2
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTENT_R
+INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
+ SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTENT_R \
+ COMMON_INTERCEPT_FUNCTION(gethostent_r);
+#else
+#define INIT_GETHOSTENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
+ buflen, result, h_errnop);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
+ h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYADDR_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);
+#else
+#define INIT_GETHOSTBYADDR_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
+ result, h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME2_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);
+#else
+#define INIT_GETHOSTBYNAME2_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKOPT
+INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
+ int *optlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
+ optlen);
+ if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
+ if (res == 0)
+ if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
+ return res;
+}
+#define INIT_GETSOCKOPT COMMON_INTERCEPT_FUNCTION(getsockopt);
+#else
+#define INIT_GETSOCKOPT
+#endif
+
+#if SANITIZER_INTERCEPT_ACCEPT
+INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ int fd2 = REAL(accept)(fd, addr, addrlen);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_ACCEPT COMMON_INTERCEPT_FUNCTION(accept);
+#else
+#define INIT_ACCEPT
+#endif
+
+#if SANITIZER_INTERCEPT_ACCEPT4
+INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int fd2 = REAL(accept4)(fd, addr, addrlen, f);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_ACCEPT4 COMMON_INTERCEPT_FUNCTION(accept4);
+#else
+#define INIT_ACCEPT4
+#endif
+
+#if SANITIZER_INTERCEPT_PACCEPT
+INTERCEPTOR(int, paccept, int fd, void *addr, unsigned *addrlen,
+ __sanitizer_sigset_t *set, int f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, paccept, fd, addr, addrlen, set, f);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ int fd2 = REAL(paccept)(fd, addr, addrlen, set, f);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_PACCEPT COMMON_INTERCEPT_FUNCTION(paccept);
+#else
+#define INIT_PACCEPT
+#endif
+
+#if SANITIZER_INTERCEPT_MODF
+INTERCEPTOR(double, modf, double x, double *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(modf)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+INTERCEPTOR(float, modff, float x, float *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(modff)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(modfl)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+#define INIT_MODF \
+ COMMON_INTERCEPT_FUNCTION(modf); \
+ COMMON_INTERCEPT_FUNCTION(modff); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(modfl);
+#else
+#define INIT_MODF
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMSG || SANITIZER_INTERCEPT_RECVMMSG
+static void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg,
+ SSIZE_T maxlen) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg, sizeof(*msg));
+ if (msg->msg_name && msg->msg_namelen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_name, msg->msg_namelen);
+ if (msg->msg_iov && msg->msg_iovlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_iov,
+ sizeof(*msg->msg_iov) * msg->msg_iovlen);
+ write_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);
+ if (msg->msg_control && msg->msg_controllen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_control, msg->msg_controllen);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMSG
+INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
+ if (res >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ if (msg) {
+ write_msghdr(ctx, msg, res);
+ COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg);
+ }
+ }
+ return res;
+}
+#define INIT_RECVMSG COMMON_INTERCEPT_FUNCTION(recvmsg);
+#else
+#define INIT_RECVMSG
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMMSG
+INTERCEPTOR(int, recvmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
+ unsigned int vlen, int flags, void *timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvmmsg, fd, msgvec, vlen, flags, timeout);
+ if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
+ int res = REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout);
+ if (res >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ for (int i = 0; i < res; ++i) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,
+ sizeof(msgvec[i].msg_len));
+ write_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);
+ COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, &msgvec[i].msg_hdr);
+ }
+ }
+ return res;
+}
+#define INIT_RECVMMSG COMMON_INTERCEPT_FUNCTION(recvmmsg);
+#else
+#define INIT_RECVMMSG
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMSG || SANITIZER_INTERCEPT_SENDMMSG
+static void read_msghdr_control(void *ctx, void *control, uptr controllen) {
+ const unsigned kCmsgDataOffset =
+ RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr));
+
+ char *p = (char *)control;
+ char *const control_end = p + controllen;
+ while (true) {
+ if (p + sizeof(__sanitizer_cmsghdr) > control_end) break;
+ __sanitizer_cmsghdr *cmsg = (__sanitizer_cmsghdr *)p;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_len, sizeof(cmsg->cmsg_len));
+
+ if (p + RoundUpTo(cmsg->cmsg_len, sizeof(uptr)) > control_end) break;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_level,
+ sizeof(cmsg->cmsg_level));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_type,
+ sizeof(cmsg->cmsg_type));
+
+ if (cmsg->cmsg_len > kCmsgDataOffset) {
+ char *data = p + kCmsgDataOffset;
+ unsigned data_len = cmsg->cmsg_len - kCmsgDataOffset;
+ if (data_len > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, data_len);
+ }
+
+ p += RoundUpTo(cmsg->cmsg_len, sizeof(uptr));
+ }
+}
+
+static void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg,
+ SSIZE_T maxlen) {
+#define R(f) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &msg->msg_##f, sizeof(msg->msg_##f))
+ R(name);
+ R(namelen);
+ R(iov);
+ R(iovlen);
+ R(control);
+ R(controllen);
+ R(flags);
+#undef R
+ if (msg->msg_name && msg->msg_namelen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_name, msg->msg_namelen);
+ if (msg->msg_iov && msg->msg_iovlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_iov,
+ sizeof(*msg->msg_iov) * msg->msg_iovlen);
+ read_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);
+ if (msg->msg_control && msg->msg_controllen)
+ read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMSG
+INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendmsg, fd, msg, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(sendmsg)(fd, msg, flags);
+ if (common_flags()->intercept_send && res >= 0 && msg)
+ read_msghdr(ctx, msg, res);
+ return res;
+}
+#define INIT_SENDMSG COMMON_INTERCEPT_FUNCTION(sendmsg);
+#else
+#define INIT_SENDMSG
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMMSG
+INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
+ unsigned vlen, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendmmsg, fd, msgvec, vlen, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ int res = REAL(sendmmsg)(fd, msgvec, vlen, flags);
+ if (res >= 0 && msgvec) {
+ for (int i = 0; i < res; ++i) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,
+ sizeof(msgvec[i].msg_len));
+ if (common_flags()->intercept_send)
+ read_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);
+ }
+ }
+ return res;
+}
+#define INIT_SENDMMSG COMMON_INTERCEPT_FUNCTION(sendmmsg);
+#else
+#define INIT_SENDMMSG
+#endif
+
+#if SANITIZER_INTERCEPT_SYSMSG
+INTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz,
+ int msgflg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg);
+ if (msgp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz);
+ int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz,
+ long msgtyp, int msgflg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg);
+ SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);
+ if (len != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len);
+ return len;
+}
+
+#define INIT_SYSMSG \
+ COMMON_INTERCEPT_FUNCTION(msgsnd); \
+ COMMON_INTERCEPT_FUNCTION(msgrcv);
+#else
+#define INIT_SYSMSG
+#endif
+
+#if SANITIZER_INTERCEPT_GETPEERNAME
+INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
+ unsigned addr_sz;
+ if (addrlen) addr_sz = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpeername)(sockfd, addr, addrlen);
+ if (!res && addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
+ return res;
+}
+#define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername);
+#else
+#define INIT_GETPEERNAME
+#endif
+
+#if SANITIZER_INTERCEPT_SYSINFO
+INTERCEPTOR(int, sysinfo, void *info) {
+ void *ctx;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
+ int res = REAL(sysinfo)(info);
+ if (!res && info)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, struct_sysinfo_sz);
+ return res;
+}
+#define INIT_SYSINFO COMMON_INTERCEPT_FUNCTION(sysinfo);
+#else
+#define INIT_SYSINFO
+#endif
+
+#if SANITIZER_INTERCEPT_READDIR
+INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ __sanitizer_dirent *res = REAL(opendir)(path);
+ if (res)
+ COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_dirent *res = REAL(readdir)(dirp);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ return res;
+}
+
+INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
+ __sanitizer_dirent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(readdir_r)(dirp, entry, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ }
+ return res;
+}
+
+#define INIT_READDIR \
+ COMMON_INTERCEPT_FUNCTION(opendir); \
+ COMMON_INTERCEPT_FUNCTION(readdir); \
+ COMMON_INTERCEPT_FUNCTION(readdir_r);
+#else
+#define INIT_READDIR
+#endif
+
+#if SANITIZER_INTERCEPT_READDIR64
+INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_dirent64 *res = REAL(readdir64)(dirp);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ return res;
+}
+
+INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
+ __sanitizer_dirent64 **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(readdir64_r)(dirp, entry, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ }
+ return res;
+}
+#define INIT_READDIR64 \
+ COMMON_INTERCEPT_FUNCTION(readdir64); \
+ COMMON_INTERCEPT_FUNCTION(readdir64_r);
+#else
+#define INIT_READDIR64
+#endif
+
+#if SANITIZER_INTERCEPT_PTRACE
+INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptrace, request, pid, addr, data);
+ __sanitizer_iovec local_iovec;
+
+ if (data) {
+ if (request == ptrace_setregs) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_setfpregs) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_setfpxregs) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_setvfpregs) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
+ } else if (request == ptrace_setsiginfo) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, siginfo_t_sz);
+
+ // Some kernel might zero the iovec::iov_base in case of invalid
+ // write access. In this case copy the invalid address for further
+ // inspection.
+ } else if (request == ptrace_setregset || request == ptrace_getregset) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec));
+ local_iovec = *iovec;
+ if (request == ptrace_setregset)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec->iov_base, iovec->iov_len);
+ }
+ }
+
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ uptr res = REAL(ptrace)(request, pid, addr, data);
+
+ if (!res && data) {
+ // Note that PEEK* requests assign different meaning to the return value.
+ // This function does not handle them (nor does it need to).
+ if (request == ptrace_getregs) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_getfpregs) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_getfpxregs) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_getvfpregs) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
+ } else if (request == ptrace_getsiginfo) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);
+ } else if (request == ptrace_geteventmsg) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));
+ } else if (request == ptrace_getregset) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec, sizeof(*iovec));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, local_iovec.iov_base,
+ local_iovec.iov_len);
+ }
+ }
+ return res;
+}
+
+#define INIT_PTRACE COMMON_INTERCEPT_FUNCTION(ptrace);
+#else
+#define INIT_PTRACE
+#endif
+
+#if SANITIZER_INTERCEPT_SETLOCALE
+static void unpoison_ctype_arrays(void *ctx) {
+#if SANITIZER_NETBSD
+ // These arrays contain 256 regular elements in unsigned char range + 1 EOF
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _ctype_tab_, 257 * sizeof(short));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _toupper_tab_, 257 * sizeof(short));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _tolower_tab_, 257 * sizeof(short));
+#endif
+}
+
+INTERCEPTOR(char *, setlocale, int category, char *locale) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);
+ if (locale)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
+ char *res = REAL(setlocale)(category, locale);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ unpoison_ctype_arrays(ctx);
+ }
+ return res;
+}
+
+#define INIT_SETLOCALE COMMON_INTERCEPT_FUNCTION(setlocale);
+#else
+#define INIT_SETLOCALE
+#endif
+
+#if SANITIZER_INTERCEPT_GETCWD
+INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(getcwd)(buf, size);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);
+#else
+#define INIT_GETCWD
+#endif
+
+#if SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+INTERCEPTOR(char *, get_current_dir_name, int fake) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(get_current_dir_name)(fake);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+#define INIT_GET_CURRENT_DIR_NAME \
+ COMMON_INTERCEPT_FUNCTION(get_current_dir_name);
+#else
+#define INIT_GET_CURRENT_DIR_NAME
+#endif
+
+UNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
+ CHECK(endptr);
+ if (nptr == *endptr) {
+ // No digits were found at strtol call, we need to find out the last
+ // symbol accessed by strtoll on our own.
+ // We get this symbol by skipping leading blanks and optional +/- sign.
+ while (IsSpace(*nptr)) nptr++;
+ if (*nptr == '+' || *nptr == '-') nptr++;
+ *endptr = const_cast<char *>(nptr);
+ }
+ CHECK(*endptr >= nptr);
+}
+
+UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
+ char **endptr, char *real_endptr, int base) {
+ if (endptr) {
+ *endptr = real_endptr;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
+ }
+ // If base has unsupported value, strtol can exit with EINVAL
+ // without reading any characters. So do additional checks only
+ // if base is valid.
+ bool is_valid_base = (base == 0) || (2 <= base && base <= 36);
+ if (is_valid_base) {
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ?
+ (real_endptr - nptr) + 1 : 0);
+}
+
+
+#if SANITIZER_INTERCEPT_STRTOIMAX
+INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *real_endptr;
+ INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return res;
+}
+
+INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *real_endptr;
+ UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return res;
+}
+
+#define INIT_STRTOIMAX \
+ COMMON_INTERCEPT_FUNCTION(strtoimax); \
+ COMMON_INTERCEPT_FUNCTION(strtoumax);
+#else
+#define INIT_STRTOIMAX
+#endif
+
+#if SANITIZER_INTERCEPT_MBSTOWCS
+INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbstowcs)(dest, src, len);
+ if (res != (SIZE_T) - 1 && dest) {
+ SIZE_T write_cnt = res + (res < len);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,
+ void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);
+ if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
+ if (res != (SIZE_T)(-1) && dest && src) {
+ // This function, and several others, may or may not write the terminating
+ // \0 character. They write it iff they clear *src.
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+#define INIT_MBSTOWCS \
+ COMMON_INTERCEPT_FUNCTION(mbstowcs); \
+ COMMON_INTERCEPT_FUNCTION(mbsrtowcs);
+#else
+#define INIT_MBSTOWCS
+#endif
+
+#if SANITIZER_INTERCEPT_MBSNRTOWCS
+INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
+ SIZE_T len, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbsnrtowcs, dest, src, nms, len, ps);
+ if (src) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
+ }
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
+ if (res != (SIZE_T)(-1) && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+#define INIT_MBSNRTOWCS COMMON_INTERCEPT_FUNCTION(mbsnrtowcs);
+#else
+#define INIT_MBSNRTOWCS
+#endif
+
+#if SANITIZER_INTERCEPT_WCSTOMBS
+INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcstombs)(dest, src, len);
+ if (res != (SIZE_T) - 1 && dest) {
+ SIZE_T write_cnt = res + (res < len);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,
+ void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);
+ if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
+ if (res != (SIZE_T) - 1 && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+#define INIT_WCSTOMBS \
+ COMMON_INTERCEPT_FUNCTION(wcstombs); \
+ COMMON_INTERCEPT_FUNCTION(wcsrtombs);
+#else
+#define INIT_WCSTOMBS
+#endif
+
+#if SANITIZER_INTERCEPT_WCSNRTOMBS
+INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
+ SIZE_T len, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsnrtombs, dest, src, nms, len, ps);
+ if (src) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
+ }
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
+ if (res != ((SIZE_T)-1) && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+#define INIT_WCSNRTOMBS COMMON_INTERCEPT_FUNCTION(wcsnrtombs);
+#else
+#define INIT_WCSNRTOMBS
+#endif
+
+
+#if SANITIZER_INTERCEPT_WCRTOMB
+INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps);
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+
+ if (!dest)
+ return REAL(wcrtomb)(dest, src, ps);
+
+ char local_dest[32];
+ SIZE_T res = REAL(wcrtomb)(local_dest, src, ps);
+ if (res != ((SIZE_T)-1)) {
+ CHECK_LE(res, sizeof(local_dest));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
+ REAL(memcpy)(dest, local_dest, res);
+ }
+ return res;
+}
+
+#define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb);
+#else
+#define INIT_WCRTOMB
+#endif
+
+#if SANITIZER_INTERCEPT_WCTOMB
+INTERCEPTOR(int, wctomb, char *dest, wchar_t src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wctomb, dest, src);
+ if (!dest)
+ return REAL(wctomb)(dest, src);
+
+ char local_dest[32];
+ int res = REAL(wctomb)(local_dest, src);
+ if (res != -1) {
+ CHECK_LE(res, sizeof(local_dest));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
+ REAL(memcpy)(dest, local_dest, res);
+ }
+ return res;
+}
+
+#define INIT_WCTOMB COMMON_INTERCEPT_FUNCTION(wctomb);
+#else
+#define INIT_WCTOMB
+#endif
+
+#if SANITIZER_INTERCEPT_TCGETATTR
+INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(tcgetattr)(fd, termios_p);
+ if (!res && termios_p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
+ return res;
+}
+
+#define INIT_TCGETATTR COMMON_INTERCEPT_FUNCTION(tcgetattr);
+#else
+#define INIT_TCGETATTR
+#endif
+
+#if SANITIZER_INTERCEPT_REALPATH
+INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+
+ // Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest
+ // version of a versioned symbol. For realpath(), this gives us something
+ // (called __old_realpath) that does not handle NULL in the second argument.
+ // Handle it as part of the interceptor.
+ char *allocated_path = nullptr;
+ if (!resolved_path)
+ allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
+
+ char *res = REAL(realpath)(path, resolved_path);
+ if (allocated_path && !res) WRAP(free)(allocated_path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
+#else
+#define INIT_REALPATH
+#endif
+
+#if SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ char *res = REAL(canonicalize_file_name)(path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_CANONICALIZE_FILE_NAME \
+ COMMON_INTERCEPT_FUNCTION(canonicalize_file_name);
+#else
+#define INIT_CANONICALIZE_FILE_NAME
+#endif
+
+#if SANITIZER_INTERCEPT_CONFSTR
+INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(confstr)(name, buf, len);
+ if (buf && res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
+ return res;
+}
+#define INIT_CONFSTR COMMON_INTERCEPT_FUNCTION(confstr);
+#else
+#define INIT_CONFSTR
+#endif
+
+#if SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
+ if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
+ return res;
+}
+#define INIT_SCHED_GETAFFINITY COMMON_INTERCEPT_FUNCTION(sched_getaffinity);
+#else
+#define INIT_SCHED_GETAFFINITY
+#endif
+
+#if SANITIZER_INTERCEPT_SCHED_GETPARAM
+INTERCEPTOR(int, sched_getparam, int pid, void *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
+ int res = REAL(sched_getparam)(pid, param);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
+ return res;
+}
+#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
+#else
+#define INIT_SCHED_GETPARAM
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR
+INTERCEPTOR(char *, strerror, int errnum) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
+ COMMON_INTERCEPTOR_STRERROR();
+ char *res = REAL(strerror)(errnum);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
+#else
+#define INIT_STRERROR
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR_R
+// There are 2 versions of strerror_r:
+// * POSIX version returns 0 on success, negative error code on failure,
+// writes message to buf.
+// * GNU version returns message pointer, which points to either buf or some
+// static storage.
+#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
+ SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_FREEBSD || SANITIZER_OPENBSD
+// POSIX version. Spec is not clear on whether buf is NULL-terminated.
+// At least on OSX, buf contents are valid even when the call fails.
+INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(strerror_r)(errnum, buf, buflen);
+
+ SIZE_T sz = internal_strnlen(buf, buflen);
+ if (sz < buflen) ++sz;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ return res;
+}
+#else
+// GNU version.
+INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(strerror_r)(errnum, buf, buflen);
+ if (res == buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ else
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
+ //SANITIZER_MAC
+#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
+#else
+#define INIT_STRERROR_R
+#endif
+
+#if SANITIZER_INTERCEPT_XPG_STRERROR_R
+INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
+ // This version always returns a null-terminated string.
+ if (buf && buflen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ return res;
+}
+#define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);
+#else
+#define INIT_XPG_STRERROR_R
+#endif
+
+#if SANITIZER_INTERCEPT_SCANDIR
+typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);
+typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,
+ const struct __sanitizer_dirent **);
+
+static THREADLOCAL scandir_filter_f scandir_filter;
+static THREADLOCAL scandir_compar_f scandir_compar;
+
+static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return scandir_filter(dir);
+}
+
+static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
+ const struct __sanitizer_dirent **b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return scandir_compar(a, b);
+}
+
+INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
+ scandir_filter_f filter, scandir_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ scandir_filter = filter;
+ scandir_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(scandir)(dirp, namelist,
+ filter ? wrapped_scandir_filter : nullptr,
+ compar ? wrapped_scandir_compar : nullptr);
+ scandir_filter = nullptr;
+ scandir_compar = nullptr;
+ if (namelist && res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
+ for (int i = 0; i < res; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
+ (*namelist)[i]->d_reclen);
+ }
+ return res;
+}
+#define INIT_SCANDIR COMMON_INTERCEPT_FUNCTION(scandir);
+#else
+#define INIT_SCANDIR
+#endif
+
+#if SANITIZER_INTERCEPT_SCANDIR64
+typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);
+typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,
+ const struct __sanitizer_dirent64 **);
+
+static THREADLOCAL scandir64_filter_f scandir64_filter;
+static THREADLOCAL scandir64_compar_f scandir64_compar;
+
+static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return scandir64_filter(dir);
+}
+
+static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
+ const struct __sanitizer_dirent64 **b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return scandir64_compar(a, b);
+}
+
+INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
+ scandir64_filter_f filter, scandir64_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ scandir64_filter = filter;
+ scandir64_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(scandir64)(dirp, namelist,
+ filter ? wrapped_scandir64_filter : nullptr,
+ compar ? wrapped_scandir64_compar : nullptr);
+ scandir64_filter = nullptr;
+ scandir64_compar = nullptr;
+ if (namelist && res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
+ for (int i = 0; i < res; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
+ (*namelist)[i]->d_reclen);
+ }
+ return res;
+}
+#define INIT_SCANDIR64 COMMON_INTERCEPT_FUNCTION(scandir64);
+#else
+#define INIT_SCANDIR64
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPS
+INTERCEPTOR(int, getgroups, int size, u32 *lst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgroups)(size, lst);
+ if (res >= 0 && lst && size > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
+ return res;
+}
+#define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups);
+#else
+#define INIT_GETGROUPS
+#endif
+
+#if SANITIZER_INTERCEPT_POLL
+static void read_pollfd(void *ctx, __sanitizer_pollfd *fds,
+ __sanitizer_nfds_t nfds) {
+ for (unsigned i = 0; i < nfds; ++i) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].fd, sizeof(fds[i].fd));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].events, sizeof(fds[i].events));
+ }
+}
+
+static void write_pollfd(void *ctx, __sanitizer_pollfd *fds,
+ __sanitizer_nfds_t nfds) {
+ for (unsigned i = 0; i < nfds; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &fds[i].revents,
+ sizeof(fds[i].revents));
+}
+
+INTERCEPTOR(int, poll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
+ int timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, poll, fds, nfds, timeout);
+ if (fds && nfds) read_pollfd(ctx, fds, nfds);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(poll)(fds, nfds, timeout);
+ if (fds && nfds) write_pollfd(ctx, fds, nfds);
+ return res;
+}
+#define INIT_POLL COMMON_INTERCEPT_FUNCTION(poll);
+#else
+#define INIT_POLL
+#endif
+
+#if SANITIZER_INTERCEPT_PPOLL
+INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
+ void *timeout_ts, __sanitizer_sigset_t *sigmask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ppoll, fds, nfds, timeout_ts, sigmask);
+ if (fds && nfds) read_pollfd(ctx, fds, nfds);
+ if (timeout_ts)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout_ts, struct_timespec_sz);
+ if (sigmask) COMMON_INTERCEPTOR_READ_RANGE(ctx, sigmask, sizeof(*sigmask));
+ int res =
+ COMMON_INTERCEPTOR_BLOCK_REAL(ppoll)(fds, nfds, timeout_ts, sigmask);
+ if (fds && nfds) write_pollfd(ctx, fds, nfds);
+ return res;
+}
+#define INIT_PPOLL COMMON_INTERCEPT_FUNCTION(ppoll);
+#else
+#define INIT_PPOLL
+#endif
+
+#if SANITIZER_INTERCEPT_WORDEXP
+INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
+ if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wordexp)(s, p, flags);
+ if (!res && p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (p->we_wordc)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
+ sizeof(*p->we_wordv) * p->we_wordc);
+ for (uptr i = 0; i < p->we_wordc; ++i) {
+ char *w = p->we_wordv[i];
+ if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1);
+ }
+ }
+ return res;
+}
+#define INIT_WORDEXP COMMON_INTERCEPT_FUNCTION(wordexp);
+#else
+#define INIT_WORDEXP
+#endif
+
+#if SANITIZER_INTERCEPT_SIGWAIT
+INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigwait)(set, sig);
+ if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
+ return res;
+}
+#define INIT_SIGWAIT COMMON_INTERCEPT_FUNCTION(sigwait);
+#else
+#define INIT_SIGWAIT
+#endif
+
+#if SANITIZER_INTERCEPT_SIGWAITINFO
+INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigwaitinfo)(set, info);
+ if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
+ return res;
+}
+#define INIT_SIGWAITINFO COMMON_INTERCEPT_FUNCTION(sigwaitinfo);
+#else
+#define INIT_SIGWAITINFO
+#endif
+
+#if SANITIZER_INTERCEPT_SIGTIMEDWAIT
+INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
+ void *timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
+ if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigtimedwait)(set, info, timeout);
+ if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
+ return res;
+}
+#define INIT_SIGTIMEDWAIT COMMON_INTERCEPT_FUNCTION(sigtimedwait);
+#else
+#define INIT_SIGTIMEDWAIT
+#endif
+
+#if SANITIZER_INTERCEPT_SIGSETOPS
+INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigemptyset)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+
+INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigfillset)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+#define INIT_SIGSETOPS \
+ COMMON_INTERCEPT_FUNCTION(sigemptyset); \
+ COMMON_INTERCEPT_FUNCTION(sigfillset);
+#else
+#define INIT_SIGSETOPS
+#endif
+
+#if SANITIZER_INTERCEPT_SIGPENDING
+INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigpending)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+#define INIT_SIGPENDING COMMON_INTERCEPT_FUNCTION(sigpending);
+#else
+#define INIT_SIGPENDING
+#endif
+
+#if SANITIZER_INTERCEPT_SIGPROCMASK
+INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigprocmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define INIT_SIGPROCMASK COMMON_INTERCEPT_FUNCTION(sigprocmask);
+#else
+#define INIT_SIGPROCMASK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_sigmask, how, set, oldset);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_sigmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define INIT_PTHREAD_SIGMASK COMMON_INTERCEPT_FUNCTION(pthread_sigmask);
+#else
+#define INIT_PTHREAD_SIGMASK
+#endif
+
+#if SANITIZER_INTERCEPT_BACKTRACE
+INTERCEPTOR(int, backtrace, void **buffer, int size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(backtrace)(buffer, size);
+ if (res && buffer)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
+ return res;
+}
+
+INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
+ if (buffer && size)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char **res = REAL(backtrace_symbols)(buffer, size);
+ if (res && size) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
+ for (int i = 0; i < size; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1);
+ }
+ return res;
+}
+#define INIT_BACKTRACE \
+ COMMON_INTERCEPT_FUNCTION(backtrace); \
+ COMMON_INTERCEPT_FUNCTION(backtrace_symbols);
+#else
+#define INIT_BACKTRACE
+#endif
+
+#if SANITIZER_INTERCEPT__EXIT
+INTERCEPTOR(void, _exit, int status) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _exit, status);
+ COMMON_INTERCEPTOR_USER_CALLBACK_START();
+ int status1 = COMMON_INTERCEPTOR_ON_EXIT(ctx);
+ COMMON_INTERCEPTOR_USER_CALLBACK_END();
+ if (status == 0) status = status1;
+ REAL(_exit)(status);
+}
+#define INIT__EXIT COMMON_INTERCEPT_FUNCTION(_exit);
+#else
+#define INIT__EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEX
+INTERCEPTOR(int, pthread_mutex_lock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
+ int res = REAL(pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
+ COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
+#define INIT_PTHREAD_MUTEX_UNLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)
+#else
+#define INIT_PTHREAD_MUTEX_LOCK
+#define INIT_PTHREAD_MUTEX_UNLOCK
+#endif
+
+#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
+INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
+ int res = REAL(__pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);
+ COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
+ int res = REAL(__pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+#define INIT___PTHREAD_MUTEX_LOCK \
+ COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)
+#define INIT___PTHREAD_MUTEX_UNLOCK \
+ COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)
+#else
+#define INIT___PTHREAD_MUTEX_LOCK
+#define INIT___PTHREAD_MUTEX_UNLOCK
+#endif
+
+#if SANITIZER_INTERCEPT___LIBC_MUTEX
+INTERCEPTOR(int, __libc_mutex_lock, void *m)
+ALIAS(WRAPPER_NAME(pthread_mutex_lock));
+
+INTERCEPTOR(int, __libc_mutex_unlock, void *m)
+ALIAS(WRAPPER_NAME(pthread_mutex_unlock));
+
+INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
+ALIAS(WRAPPER_NAME(pthread_setcancelstate));
+
+#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)
+#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)
+#define INIT___LIBC_THR_SETCANCELSTATE \
+ COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
+#else
+#define INIT___LIBC_MUTEX_LOCK
+#define INIT___LIBC_MUTEX_UNLOCK
+#define INIT___LIBC_THR_SETCANCELSTATE
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R
+static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
+ if (mnt->mnt_fsname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,
+ REAL(strlen)(mnt->mnt_fsname) + 1);
+ if (mnt->mnt_dir)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,
+ REAL(strlen)(mnt->mnt_dir) + 1);
+ if (mnt->mnt_type)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,
+ REAL(strlen)(mnt->mnt_type) + 1);
+ if (mnt->mnt_opts)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,
+ REAL(strlen)(mnt->mnt_opts) + 1);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT
+INTERCEPTOR(__sanitizer_mntent *, getmntent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntent, fp);
+ __sanitizer_mntent *res = REAL(getmntent)(fp);
+ if (res) write_mntent(ctx, res);
+ return res;
+}
+#define INIT_GETMNTENT COMMON_INTERCEPT_FUNCTION(getmntent);
+#else
+#define INIT_GETMNTENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT_R
+INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,
+ __sanitizer_mntent *mntbuf, char *buf, int buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntent_r, fp, mntbuf, buf, buflen);
+ __sanitizer_mntent *res = REAL(getmntent_r)(fp, mntbuf, buf, buflen);
+ if (res) write_mntent(ctx, res);
+ return res;
+}
+#define INIT_GETMNTENT_R COMMON_INTERCEPT_FUNCTION(getmntent_r);
+#else
+#define INIT_GETMNTENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_STATFS
+INTERCEPTOR(int, statfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatfs)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
+ return res;
+}
+#define INIT_STATFS \
+ COMMON_INTERCEPT_FUNCTION(statfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatfs);
+#else
+#define INIT_STATFS
+#endif
+
+#if SANITIZER_INTERCEPT_STATFS64
+INTERCEPTOR(int, statfs64, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statfs64)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatfs64)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
+ return res;
+}
+#define INIT_STATFS64 \
+ COMMON_INTERCEPT_FUNCTION(statfs64); \
+ COMMON_INTERCEPT_FUNCTION(fstatfs64);
+#else
+#define INIT_STATFS64
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS
+INTERCEPTOR(int, statvfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs)(fd, buf);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+#define INIT_STATVFS \
+ COMMON_INTERCEPT_FUNCTION(statvfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs);
+#else
+#define INIT_STATVFS
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS64
+INTERCEPTOR(int, statvfs64, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs64)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs64)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
+ return res;
+}
+#define INIT_STATVFS64 \
+ COMMON_INTERCEPT_FUNCTION(statvfs64); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs64);
+#else
+#define INIT_STATVFS64
+#endif
+
+#if SANITIZER_INTERCEPT_INITGROUPS
+INTERCEPTOR(int, initgroups, char *user, u32 group) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);
+ if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ int res = REAL(initgroups)(user, group);
+ return res;
+}
+#define INIT_INITGROUPS COMMON_INTERCEPT_FUNCTION(initgroups);
+#else
+#define INIT_INITGROUPS
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ char *res = REAL(ether_ntoa)(addr);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ __sanitizer_ether_addr *res = REAL(ether_aton)(buf);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
+ return res;
+}
+#define INIT_ETHER_NTOA_ATON \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton);
+#else
+#define INIT_ETHER_NTOA_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_HOST
+INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_ntohost)(hostname, addr);
+ if (!res && hostname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ return res;
+}
+INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
+ if (hostname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_hostton)(hostname, addr);
+ if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ return res;
+}
+INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
+ char *hostname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
+ if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_line)(line, addr, hostname);
+ if (!res) {
+ if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ if (hostname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ }
+ return res;
+}
+#define INIT_ETHER_HOST \
+ COMMON_INTERCEPT_FUNCTION(ether_ntohost); \
+ COMMON_INTERCEPT_FUNCTION(ether_hostton); \
+ COMMON_INTERCEPT_FUNCTION(ether_line);
+#else
+#define INIT_ETHER_HOST
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_R
+INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ether_ntoa_r)(addr, buf);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
+ __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
+ return res;
+}
+#define INIT_ETHER_R \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa_r); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton_r);
+#else
+#define INIT_ETHER_R
+#endif
+
+#if SANITIZER_INTERCEPT_SHMCTL
+INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(shmctl)(shmid, cmd, buf);
+ if (res >= 0) {
+ unsigned sz = 0;
+ if (cmd == shmctl_ipc_stat || cmd == shmctl_shm_stat)
+ sz = sizeof(__sanitizer_shmid_ds);
+ else if (cmd == shmctl_ipc_info)
+ sz = struct_shminfo_sz;
+ else if (cmd == shmctl_shm_info)
+ sz = struct_shm_info_sz;
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ }
+ return res;
+}
+#define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION(shmctl);
+#else
+#define INIT_SHMCTL
+#endif
+
+#if SANITIZER_INTERCEPT_RANDOM_R
+INTERCEPTOR(int, random_r, void *buf, u32 *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(random_r)(buf, result);
+ if (!res && result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_RANDOM_R COMMON_INTERCEPT_FUNCTION(random_r);
+#else
+#define INIT_RANDOM_R
+#endif
+
+// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
+// its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET
+#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \
+ INTERCEPTOR(int, fn, void *attr, void *r) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \
+ int res = REAL(fn)(attr, r); \
+ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
+ return res; \
+ }
+#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+INTERCEPTOR_PTHREAD_ATTR_GET(detachstate, sizeof(int))
+INTERCEPTOR_PTHREAD_ATTR_GET(guardsize, sizeof(SIZE_T))
+INTERCEPTOR_PTHREAD_ATTR_GET(scope, sizeof(int))
+INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T))
+INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_attr_getstack)(attr, addr, size);
+ if (!res) {
+ if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ if (size) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, size, sizeof(*size));
+ }
+ return res;
+}
+
+// We may need to call the real pthread_attr_getstack from the run-time
+// in sanitizer_common, but we don't want to include the interception headers
+// there. So, just define this function here.
+namespace __sanitizer {
+extern "C" {
+int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
+ return REAL(pthread_attr_getstack)(attr, addr, size);
+}
+} // extern "C"
+} // namespace __sanitizer
+
+#define INIT_PTHREAD_ATTR_GET \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getguardsize); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getscope); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstacksize); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstack);
+#else
+#define INIT_PTHREAD_ATTR_GET
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+INTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz)
+INTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int))
+
+#define INIT_PTHREAD_ATTR_GET_SCHED \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy);
+#else
+#define INIT_PTHREAD_ATTR_GET_SCHED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+INTERCEPTOR_PTHREAD_ATTR_GET(inheritsched, sizeof(int))
+
+#define INIT_PTHREAD_ATTR_GETINHERITSCHED \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getinheritsched);
+#else
+#define INIT_PTHREAD_ATTR_GETINHERITSCHED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
+ void *cpuset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,
+ cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
+ if (!res && cpusetsize && cpuset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
+ return res;
+}
+
+#define INIT_PTHREAD_ATTR_GETAFFINITY_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getaffinity_np);
+#else
+#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);
+#else
+#define INIT_PTHREAD_CONDATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETCLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);
+#else
+#define INIT_PTHREAD_CONDATTR_GETCLOCK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);
+#else
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_TMPNAM
+INTERCEPTOR(char *, tmpnam, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tmpnam, s);
+ char *res = REAL(tmpnam)(s);
+ if (res) {
+ if (s)
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ else
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_TMPNAM COMMON_INTERCEPT_FUNCTION(tmpnam);
+#else
+#define INIT_TMPNAM
+#endif
+
+#if SANITIZER_INTERCEPT_TMPNAM_R
+INTERCEPTOR(char *, tmpnam_r, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(tmpnam_r)(s);
+ if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return res;
+}
+#define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);
+#else
+#define INIT_TMPNAM_R
+#endif
+
+#if SANITIZER_INTERCEPT_TTYNAME
+INTERCEPTOR(char *, ttyname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);
+ char *res = REAL(ttyname)(fd);
+ if (res != nullptr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);
+#else
+#define INIT_TTYNAME
+#endif
+
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_TEMPNAM
+INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);
+ if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
+ if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
+ char *res = REAL(tempnam)(dir, pfx);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
+#else
+#define INIT_TEMPNAM
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && !SANITIZER_NETBSD
+INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name);
+ return REAL(pthread_setname_np)(thread, name);
+}
+#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);
+#elif SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && SANITIZER_NETBSD
+INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name, void *arg) {
+ void *ctx;
+ char newname[32]; // PTHREAD_MAX_NAMELEN_NP=32
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name, arg);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ internal_snprintf(newname, sizeof(newname), name, arg);
+ COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, newname);
+ return REAL(pthread_setname_np)(thread, name, arg);
+}
+#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);
+#else
+#define INIT_PTHREAD_SETNAME_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+INTERCEPTOR(int, pthread_getname_np, uptr thread, char *name, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getname_np, thread, name, len);
+ int res = REAL(pthread_getname_np)(thread, name, len);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strnlen(name, len) + 1);
+ return res;
+}
+#define INIT_PTHREAD_GETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_getname_np);
+#else
+#define INIT_PTHREAD_GETNAME_NP
+#endif
+
+#if SANITIZER_INTERCEPT_SINCOS
+INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincos)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincosf)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincosl)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+#define INIT_SINCOS \
+ COMMON_INTERCEPT_FUNCTION(sincos); \
+ COMMON_INTERCEPT_FUNCTION(sincosf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sincosl);
+#else
+#define INIT_SINCOS
+#endif
+
+#if SANITIZER_INTERCEPT_REMQUO
+INTERCEPTOR(double, remquo, double x, double y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(remquo)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+INTERCEPTOR(float, remquof, float x, float y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(remquof)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+#define INIT_REMQUO \
+ COMMON_INTERCEPT_FUNCTION(remquo); \
+ COMMON_INTERCEPT_FUNCTION(remquof);
+#else
+#define INIT_REMQUO
+#endif
+
+#if SANITIZER_INTERCEPT_REMQUOL
+INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(remquol)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+#define INIT_REMQUOL \
+ COMMON_INTERCEPT_FUNCTION_LDBL(remquol);
+#else
+#define INIT_REMQUOL
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMA
+extern int signgam;
+INTERCEPTOR(double, lgamma, double x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgamma, x);
+ double res = REAL(lgamma)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+INTERCEPTOR(float, lgammaf, float x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammaf, x);
+ float res = REAL(lgammaf)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+#define INIT_LGAMMA \
+ COMMON_INTERCEPT_FUNCTION(lgamma); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf);
+#else
+#define INIT_LGAMMA
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL
+INTERCEPTOR(long double, lgammal, long double x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammal, x);
+ long double res = REAL(lgammal)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+#define INIT_LGAMMAL \
+ COMMON_INTERCEPT_FUNCTION_LDBL(lgammal);
+#else
+#define INIT_LGAMMAL
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMA_R
+INTERCEPTOR(double, lgamma_r, double x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(lgamma_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(lgammaf_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+#define INIT_LGAMMA_R \
+ COMMON_INTERCEPT_FUNCTION(lgamma_r); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf_r);
+#else
+#define INIT_LGAMMA_R
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL_R
+INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(lgammal_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION_LDBL(lgammal_r);
+#else
+#define INIT_LGAMMAL_R
+#endif
+
+#if SANITIZER_INTERCEPT_DRAND48_R
+INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(drand48_r)(buffer, result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(lrand48_r)(buffer, result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_DRAND48_R \
+ COMMON_INTERCEPT_FUNCTION(drand48_r); \
+ COMMON_INTERCEPT_FUNCTION(lrand48_r);
+#else
+#define INIT_DRAND48_R
+#endif
+
+#if SANITIZER_INTERCEPT_RAND_R
+INTERCEPTOR(int, rand_r, unsigned *seedp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));
+ return REAL(rand_r)(seedp);
+}
+#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);
+#else
+#define INIT_RAND_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETLINE
+INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(getline)(lineptr, n, stream);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1);
+ }
+ return res;
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt its
+// metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define GETDELIM_INTERCEPTOR_IMPL(vname) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream); \
+ SSIZE_T res = REAL(vname)(lineptr, n, delim, stream); \
+ if (res > 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); \
+ } \
+ return res; \
+ }
+
+INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(__getdelim)
+
+// There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor
+// with its own body.
+INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(getdelim)
+
+#define INIT_GETLINE \
+ COMMON_INTERCEPT_FUNCTION(getline); \
+ COMMON_INTERCEPT_FUNCTION(__getdelim); \
+ COMMON_INTERCEPT_FUNCTION(getdelim);
+#else
+#define INIT_GETLINE
+#endif
+
+#if SANITIZER_INTERCEPT_ICONV
+INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
+ char **outbuf, SIZE_T *outbytesleft) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, iconv, cd, inbuf, inbytesleft, outbuf,
+ outbytesleft);
+ if (inbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, inbytesleft, sizeof(*inbytesleft));
+ if (inbuf && inbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft);
+ if (outbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));
+ void *outbuf_orig = outbuf ? *outbuf : nullptr;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
+ if (outbuf && *outbuf > outbuf_orig) {
+ SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);
+ }
+ return res;
+}
+#define INIT_ICONV COMMON_INTERCEPT_FUNCTION(iconv);
+#else
+#define INIT_ICONV
+#endif
+
+#if SANITIZER_INTERCEPT_TIMES
+INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_clock_t res = REAL(times)(tms);
+ if (res != (__sanitizer_clock_t)-1 && tms)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
+ return res;
+}
+#define INIT_TIMES COMMON_INTERCEPT_FUNCTION(times);
+#else
+#define INIT_TIMES
+#endif
+
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#if !SANITIZER_S390
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+// If you see any crashes around this functions, there are 2 known issues with
+// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// 2. It can be called recursively if sanitizer code uses __tls_get_addr
+// to access thread local variables (it should not happen normally,
+// because sanitizers use initial-exec tls model).
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+#if SANITIZER_PPC
+// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
+// mostly the same semantics as __tls_get_addr, but its presence enables
+// some optimizations in linker (which are safe to ignore here).
+extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
+ visibility("default")))
+void *__tls_get_addr_opt(void *arg);
+#endif
+#else // SANITIZER_S390
+// On s390, we have to intercept two functions here:
+// - __tls_get_addr_internal, which is a glibc-internal function that is like
+// the usual __tls_get_addr, but returns a TP-relative offset instead of
+// a proper pointer. It is used by dlsym for TLS symbols.
+// - __tls_get_offset, which is like the above, but also takes a GOT-relative
+// descriptor offset as an argument instead of a pointer. GOT address
+// is passed in r12, so it's necessary to write it in assembly. This is
+// the function used by the compiler.
+extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
+DEFINE_REAL(uptr, __tls_get_offset, void *arg)
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr __interceptor___tls_get_offset(void *arg);
+INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
+ uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
+ uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());
+ void *ptr = reinterpret_cast<void *>(res + tp);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, ptr, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+// We need a hidden symbol aliasing the above, so that we can jump
+// directly to it from the assembly below.
+extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
+ visibility("hidden")))
+uptr __tls_get_addr_hidden(void *arg);
+// Now carefully intercept __tls_get_offset.
+asm(
+ ".text\n"
+// The __intercept_ version has to exist, so that gen_dynamic_list.py
+// exports our symbol.
+ ".weak __tls_get_offset\n"
+ ".type __tls_get_offset, @function\n"
+ "__tls_get_offset:\n"
+ ".global __interceptor___tls_get_offset\n"
+ ".type __interceptor___tls_get_offset, @function\n"
+ "__interceptor___tls_get_offset:\n"
+#ifdef __s390x__
+ "la %r2, 0(%r2,%r12)\n"
+ "jg __tls_get_addr_hidden\n"
+#else
+ "basr %r3,0\n"
+ "0: la %r2,0(%r2,%r12)\n"
+ "l %r4,1f-0b(%r3)\n"
+ "b 0(%r4,%r3)\n"
+ "1: .long __tls_get_addr_hidden - 0b\n"
+#endif
+ ".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\n"
+// Assembly wrapper to call REAL(__tls_get_offset)(arg)
+ ".type __tls_get_offset_wrapper, @function\n"
+ "__tls_get_offset_wrapper:\n"
+#ifdef __s390x__
+ "sgr %r2,%r12\n"
+#else
+ "sr %r2,%r12\n"
+#endif
+ "br %r3\n"
+ ".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
+);
+#endif // SANITIZER_S390
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_INTERCEPT_LISTXATTR
+INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(listxattr)(path, list, size);
+ // Here and below, size == 0 is a special case where nothing is written to the
+ // buffer, and res contains the desired buffer size.
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(llistxattr)(path, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(flistxattr)(fd, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+#define INIT_LISTXATTR \
+ COMMON_INTERCEPT_FUNCTION(listxattr); \
+ COMMON_INTERCEPT_FUNCTION(llistxattr); \
+ COMMON_INTERCEPT_FUNCTION(flistxattr);
+#else
+#define INIT_LISTXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETXATTR
+INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(getxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+#define INIT_GETXATTR \
+ COMMON_INTERCEPT_FUNCTION(getxattr); \
+ COMMON_INTERCEPT_FUNCTION(lgetxattr); \
+ COMMON_INTERCEPT_FUNCTION(fgetxattr);
+#else
+#define INIT_GETXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETRESID
+INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getresuid)(ruid, euid, suid);
+ if (res >= 0) {
+ if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
+ if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);
+ if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getresgid)(rgid, egid, sgid);
+ if (res >= 0) {
+ if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
+ if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);
+ if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);
+ }
+ return res;
+}
+#define INIT_GETRESID \
+ COMMON_INTERCEPT_FUNCTION(getresuid); \
+ COMMON_INTERCEPT_FUNCTION(getresgid);
+#else
+#define INIT_GETRESID
+#endif
+
+#if SANITIZER_INTERCEPT_GETIFADDRS
+// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to
+// intercept freeifaddrs(). If that ceases to be the case, we might need to
+// intercept it to poison the memory again.
+INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getifaddrs)(ifap);
+ if (res == 0 && ifap) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
+ __sanitizer_ifaddrs *p = *ifap;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
+ if (p->ifa_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
+ REAL(strlen)(p->ifa_name) + 1);
+ if (p->ifa_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
+ if (p->ifa_netmask)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);
+ // On Linux this is a union, but the other member also points to a
+ // struct sockaddr, so the following is sufficient.
+ if (p->ifa_dstaddr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);
+ // FIXME(smatveev): Unpoison p->ifa_data as well.
+ p = p->ifa_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETIFADDRS \
+ COMMON_INTERCEPT_FUNCTION(getifaddrs);
+#else
+#define INIT_GETIFADDRS
+#endif
+
+#if SANITIZER_INTERCEPT_IF_INDEXTONAME
+INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(if_indextoname)(ifindex, ifname);
+ if (res && ifname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return res;
+}
+INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
+ if (ifname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return REAL(if_nametoindex)(ifname);
+}
+#define INIT_IF_INDEXTONAME \
+ COMMON_INTERCEPT_FUNCTION(if_indextoname); \
+ COMMON_INTERCEPT_FUNCTION(if_nametoindex);
+#else
+#define INIT_IF_INDEXTONAME
+#endif
+
+#if SANITIZER_INTERCEPT_CAPGET
+INTERCEPTOR(int, capget, void *hdrp, void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(capget)(hdrp, datap);
+ if (res == 0 && datap)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ // We can also return -1 and write to hdrp->version if the version passed in
+ // hdrp->version is unsupported. But that's not a trivial condition to check,
+ // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
+ return res;
+}
+INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ if (datap)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ return REAL(capset)(hdrp, datap);
+}
+#define INIT_CAPGET \
+ COMMON_INTERCEPT_FUNCTION(capget); \
+ COMMON_INTERCEPT_FUNCTION(capset);
+#else
+#define INIT_CAPGET
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_BZERO
+INTERCEPTOR(void *, bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
+#else
+#define INIT_BZERO
+#endif // SANITIZER_INTERCEPT_BZERO
+
+#if SANITIZER_INTERCEPT_FTIME
+INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ftime)(tp);
+ if (tp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
+ return res;
+}
+#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);
+#else
+#define INIT_FTIME
+#endif // SANITIZER_INTERCEPT_FTIME
+
+#if SANITIZER_INTERCEPT_XDR
+INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
+ unsigned size, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(xdrmem_create)(xdrs, addr, size, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+ if (op == __sanitizer_XDR_ENCODE) {
+ // It's not obvious how much data individual xdr_ routines write.
+ // Simply unpoison the entire target buffer in advance.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);
+ }
+}
+
+INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(xdrstdio_create)(xdrs, file, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt
+// its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define XDR_INTERCEPTOR(F, T) \
+ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \
+ int res = REAL(F)(xdrs, p); \
+ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \
+ return res; \
+ }
+
+XDR_INTERCEPTOR(xdr_short, short)
+XDR_INTERCEPTOR(xdr_u_short, unsigned short)
+XDR_INTERCEPTOR(xdr_int, int)
+XDR_INTERCEPTOR(xdr_u_int, unsigned)
+XDR_INTERCEPTOR(xdr_long, long)
+XDR_INTERCEPTOR(xdr_u_long, unsigned long)
+XDR_INTERCEPTOR(xdr_hyper, long long)
+XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)
+XDR_INTERCEPTOR(xdr_longlong_t, long long)
+XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_int8_t, u8)
+XDR_INTERCEPTOR(xdr_uint8_t, u8)
+XDR_INTERCEPTOR(xdr_int16_t, u16)
+XDR_INTERCEPTOR(xdr_uint16_t, u16)
+XDR_INTERCEPTOR(xdr_int32_t, u32)
+XDR_INTERCEPTOR(xdr_uint32_t, u32)
+XDR_INTERCEPTOR(xdr_int64_t, u64)
+XDR_INTERCEPTOR(xdr_uint64_t, u64)
+XDR_INTERCEPTOR(xdr_quad_t, long long)
+XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_bool, bool)
+XDR_INTERCEPTOR(xdr_enum, int)
+XDR_INTERCEPTOR(xdr_char, char)
+XDR_INTERCEPTOR(xdr_u_char, unsigned char)
+XDR_INTERCEPTOR(xdr_float, float)
+XDR_INTERCEPTOR(xdr_double, double)
+
+// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,
+// wrapstring, sizeof
+
+INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));
+ if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(xdr_string)(xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (res && *p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ return res;
+}
+
+#define INIT_XDR \
+ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \
+ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bool); \
+ COMMON_INTERCEPT_FUNCTION(xdr_enum); \
+ COMMON_INTERCEPT_FUNCTION(xdr_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_float); \
+ COMMON_INTERCEPT_FUNCTION(xdr_double); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \
+ COMMON_INTERCEPT_FUNCTION(xdr_string);
+#else
+#define INIT_XDR
+#endif // SANITIZER_INTERCEPT_XDR
+
+#if SANITIZER_INTERCEPT_TSEARCH
+INTERCEPTOR(void *, tsearch, void *key, void **rootp,
+ int (*compar)(const void *, const void *)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ void *res = REAL(tsearch)(key, rootp, compar);
+ if (res && *(void **)res == key)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
+ return res;
+}
+#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);
+#else
+#define INIT_TSEARCH
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \
+ SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+void unpoison_file(__sanitizer_FILE *fp) {
+#if SANITIZER_HAS_STRUCT_FILE
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));
+#if SANITIZER_NETBSD
+ if (fp->_bf._base && fp->_bf._size > 0)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_bf._base,
+ fp->_bf._size);
+#else
+ if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
+ fp->_IO_read_end - fp->_IO_read_base);
+#endif
+#endif // SANITIZER_HAS_STRUCT_FILE
+}
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS
+// These guys are called when a .c source is built with -O2.
+INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);
+ int res = REAL(__uflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);
+ int res = REAL(__underflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);
+ int res = REAL(__overflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);
+ int res = REAL(__wuflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);
+ int res = REAL(__wunderflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);
+ int res = REAL(__woverflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+#define INIT_LIBIO_INTERNALS \
+ COMMON_INTERCEPT_FUNCTION(__uflow); \
+ COMMON_INTERCEPT_FUNCTION(__underflow); \
+ COMMON_INTERCEPT_FUNCTION(__overflow); \
+ COMMON_INTERCEPT_FUNCTION(__wuflow); \
+ COMMON_INTERCEPT_FUNCTION(__wunderflow); \
+ COMMON_INTERCEPT_FUNCTION(__woverflow);
+#else
+#define INIT_LIBIO_INTERNALS
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN
+INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN \
+ COMMON_INTERCEPT_FUNCTION(fopen); \
+ COMMON_INTERCEPT_FUNCTION(fdopen); \
+ COMMON_INTERCEPT_FUNCTION(freopen);
+#else
+#define INIT_FOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN64
+INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen64)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN64 \
+ COMMON_INTERCEPT_FUNCTION(fopen64); \
+ COMMON_INTERCEPT_FUNCTION(freopen64);
+#else
+#define INIT_FOPEN64
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,
+ SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {(char **)ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
+ const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_OPEN_MEMSTREAM \
+ COMMON_INTERCEPT_FUNCTION(open_memstream); \
+ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \
+ COMMON_INTERCEPT_FUNCTION(fmemopen);
+#else
+#define INIT_OPEN_MEMSTREAM
+#endif
+
+#if SANITIZER_INTERCEPT_OBSTACK
+static void initialize_obstack(__sanitizer_obstack *obstack) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,
+ sizeof(*obstack->chunk));
+}
+
+INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr arg, uptr sz),
+ void (*free_fn)(uptr arg, void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);
+ REAL(_obstack_newchunk)(obstack, length);
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(
+ obstack->chunk, obstack->next_free - (char *)obstack->chunk);
+}
+#define INIT_OBSTACK \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);
+#else
+#define INIT_OBSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_FFLUSH
+INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ int res = REAL(fflush)(fp);
+ // FIXME: handle fp == NULL
+ if (fp) {
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ }
+ return res;
+}
+#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);
+#else
+#define INIT_FFLUSH
+#endif
+
+#if SANITIZER_INTERCEPT_FCLOSE
+INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ int res = REAL(fclose)(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ return res;
+}
+#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);
+#else
+#define INIT_FCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+ COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
+ void *res = REAL(dlopen)(filename, flag);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
+ COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
+ return res;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
+ int res = REAL(dlclose)(handle);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
+ COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
+ return res;
+}
+#define INIT_DLOPEN_DLCLOSE \
+ COMMON_INTERCEPT_FUNCTION(dlopen); \
+ COMMON_INTERCEPT_FUNCTION(dlclose);
+#else
+#define INIT_DLOPEN_DLCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_GETPASS
+INTERCEPTOR(char *, getpass, const char *prompt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
+ if (prompt)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ char *res = REAL(getpass)(prompt);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ return res;
+}
+
+#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);
+#else
+#define INIT_GETPASS
+#endif
+
+#if SANITIZER_INTERCEPT_TIMERFD
+INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,
+ void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,
+ old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);
+ int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);
+ if (res != -1 && old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);
+ return res;
+}
+
+INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);
+ int res = REAL(timerfd_gettime)(fd, curr_value);
+ if (res != -1 && curr_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);
+ return res;
+}
+#define INIT_TIMERFD \
+ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \
+ COMMON_INTERCEPT_FUNCTION(timerfd_gettime);
+#else
+#define INIT_TIMERFD
+#endif
+
+#if SANITIZER_INTERCEPT_MLOCKX
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ VPrintf(1, "%s ignores mlock/mlockall/munlock/munlockall\n",
+ SanitizerToolName);
+}
+
+INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+#define INIT_MLOCKX \
+ COMMON_INTERCEPT_FUNCTION(mlock); \
+ COMMON_INTERCEPT_FUNCTION(munlock); \
+ COMMON_INTERCEPT_FUNCTION(mlockall); \
+ COMMON_INTERCEPT_FUNCTION(munlockall);
+
+#else
+#define INIT_MLOCKX
+#endif // SANITIZER_INTERCEPT_MLOCKX
+
+#if SANITIZER_INTERCEPT_FOPENCOOKIE
+struct WrappedCookie {
+ void *real_cookie;
+ __sanitizer_cookie_io_functions_t real_io_funcs;
+};
+
+static uptr wrapped_read(void *cookie, char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read;
+ return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0;
+}
+
+static uptr wrapped_write(void *cookie, const char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write;
+ return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size;
+}
+
+static int wrapped_seek(void *cookie, u64 *offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset));
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek;
+ return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence)
+ : -1;
+}
+
+static int wrapped_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close;
+ int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0;
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode,
+ __sanitizer_cookie_io_functions_t io_funcs) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs);
+ WrappedCookie *wrapped_cookie =
+ (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_io_funcs = io_funcs;
+ __sanitizer_FILE *res =
+ REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write,
+ wrapped_seek, wrapped_close});
+ return res;
+}
+
+#define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie);
+#else
+#define INIT_FOPENCOOKIE
+#endif // SANITIZER_INTERCEPT_FOPENCOOKIE
+
+#if SANITIZER_INTERCEPT_SEM
+INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
+ // Workaround a bug in glibc's "old" semaphore implementation by
+ // zero-initializing the sem_t contents. This has to be done here because
+ // interceptors bind to the lowest symbols version by default, hitting the
+ // buggy code path while the non-sanitized build of the same code works fine.
+ REAL(memset)(s, 0, sizeof(*s));
+ int res = REAL(sem_init)(s, pshared, value);
+ return res;
+}
+
+INTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s);
+ int res = REAL(sem_destroy)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s);
+ COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s);
+ int res = REAL(sem_post)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval);
+ int res = REAL(sem_getvalue)(s, sval);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval));
+ }
+ return res;
+}
+#define INIT_SEM \
+ COMMON_INTERCEPT_FUNCTION(sem_init); \
+ COMMON_INTERCEPT_FUNCTION(sem_destroy); \
+ COMMON_INTERCEPT_FUNCTION(sem_wait); \
+ COMMON_INTERCEPT_FUNCTION(sem_trywait); \
+ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+ COMMON_INTERCEPT_FUNCTION(sem_post); \
+ COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+#else
+#define INIT_SEM
+#endif // SANITIZER_INTERCEPT_SEM
+
+#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate);
+ int res = REAL(pthread_setcancelstate)(state, oldstate);
+ if (res == 0 && oldstate != nullptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate));
+ return res;
+}
+
+INTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype);
+ int res = REAL(pthread_setcanceltype)(type, oldtype);
+ if (res == 0 && oldtype != nullptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype));
+ return res;
+}
+#define INIT_PTHREAD_SETCANCEL \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate); \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype);
+#else
+#define INIT_PTHREAD_SETCANCEL
+#endif
+
+#if SANITIZER_INTERCEPT_MINCORE
+INTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec);
+ int res = REAL(mincore)(addr, length, vec);
+ if (res == 0) {
+ uptr page_size = GetPageSizeCached();
+ uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size);
+ }
+ return res;
+}
+#define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore);
+#else
+#define INIT_MINCORE
+#endif
+
+#if SANITIZER_INTERCEPT_PROCESS_VM_READV
+INTERCEPTOR(SSIZE_T, process_vm_readv, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_readv, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_readv)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ write_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_writev, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_writev)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ read_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+#define INIT_PROCESS_VM_READV \
+ COMMON_INTERCEPT_FUNCTION(process_vm_readv); \
+ COMMON_INTERCEPT_FUNCTION(process_vm_writev);
+#else
+#define INIT_PROCESS_VM_READV
+#endif
+
+#if SANITIZER_INTERCEPT_CTERMID
+INTERCEPTOR(char *, ctermid, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
+ char *res = REAL(ctermid)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID COMMON_INTERCEPT_FUNCTION(ctermid);
+#else
+#define INIT_CTERMID
+#endif
+
+#if SANITIZER_INTERCEPT_CTERMID_R
+INTERCEPTOR(char *, ctermid_r, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
+ char *res = REAL(ctermid_r)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID_R COMMON_INTERCEPT_FUNCTION(ctermid_r);
+#else
+#define INIT_CTERMID_R
+#endif
+
+#if SANITIZER_INTERCEPT_RECV_RECVFROM
+INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(recv)(fd, buf, len, flags);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ }
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
+ void *srcaddr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvfrom, fd, buf, len, flags, srcaddr,
+ addrlen);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SIZE_T srcaddr_sz;
+ if (srcaddr) srcaddr_sz = *addrlen;
+ (void)srcaddr_sz; // prevent "set but not used" warning
+ SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ if (res >= 0 && srcaddr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr,
+ Min((SIZE_T)*addrlen, srcaddr_sz));
+ return res;
+}
+#define INIT_RECV_RECVFROM \
+ COMMON_INTERCEPT_FUNCTION(recv); \
+ COMMON_INTERCEPT_FUNCTION(recvfrom);
+#else
+#define INIT_RECV_RECVFROM
+#endif
+
+#if SANITIZER_INTERCEPT_SEND_SENDTO
+INTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, send, fd, buf, len, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(send)(fd, buf, len, flags);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
+ void *dstaddr, int addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendto, fd, buf, len, flags, dstaddr, addrlen);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ // Can't check dstaddr as it may have uninitialized padding at the end.
+ SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+#define INIT_SEND_SENDTO \
+ COMMON_INTERCEPT_FUNCTION(send); \
+ COMMON_INTERCEPT_FUNCTION(sendto);
+#else
+#define INIT_SEND_SENDTO
+#endif
+
+#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(eventfd_read)(fd, value);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value));
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+INTERCEPTOR(int, eventfd_write, int fd, u64 value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ int res = REAL(eventfd_write)(fd, value);
+ return res;
+}
+#define INIT_EVENTFD_READ_WRITE \
+ COMMON_INTERCEPT_FUNCTION(eventfd_read); \
+ COMMON_INTERCEPT_FUNCTION(eventfd_write)
+#else
+#define INIT_EVENTFD_READ_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_STAT
+INTERCEPTOR(int, stat, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stat, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(stat)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT_STAT COMMON_INTERCEPT_FUNCTION(stat)
+#else
+#define INIT_STAT
+#endif
+
+#if SANITIZER_INTERCEPT_LSTAT
+INTERCEPTOR(int, lstat, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lstat, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(lstat)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT_LSTAT COMMON_INTERCEPT_FUNCTION(lstat)
+#else
+#define INIT_LSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT
+INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___XSTAT COMMON_INTERCEPT_FUNCTION(__xstat)
+#else
+#define INIT___XSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT64
+INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___XSTAT64 COMMON_INTERCEPT_FUNCTION(__xstat64)
+#else
+#define INIT___XSTAT64
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT
+INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___LXSTAT COMMON_INTERCEPT_FUNCTION(__lxstat)
+#else
+#define INIT___LXSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT64
+INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___LXSTAT64 COMMON_INTERCEPT_FUNCTION(__lxstat64)
+#else
+#define INIT___LXSTAT64
+#endif
+
+// FIXME: add other *stat interceptor
+
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, pututxline, const void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pututxline, ut);
+ if (ut)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ut, __sanitizer::struct_utmpx_sz);
+ void *res = REAL(pututxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline); \
+ COMMON_INTERCEPT_FUNCTION(pututxline);
+#else
+#define INIT_UTMPX
+#endif
+
+#if SANITIZER_INTERCEPT_GETLOADAVG
+INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);
+ int res = REAL(getloadavg)(loadavg, nelem);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));
+ return res;
+}
+#define INIT_GETLOADAVG \
+ COMMON_INTERCEPT_FUNCTION(getloadavg);
+#else
+#define INIT_GETLOADAVG
+#endif
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif
+
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
+ SIZE_T res = REAL(wcslen)(s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1));
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n);
+ SIZE_T res = REAL(wcsnlen)(s, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n));
+ return res;
+}
+#define INIT_WCSLEN \
+ COMMON_INTERCEPT_FUNCTION(wcslen); \
+ COMMON_INTERCEPT_FUNCTION(wcsnlen);
+
+#if SANITIZER_INTERCEPT_WCSCAT
+INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
+ SIZE_T src_size = REAL(wcslen)(src);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcscat)(dst, src);
+}
+
+INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
+ SIZE_T src_size = REAL(wcsnlen)(src, n);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
+ Min(src_size + 1, n) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcsncat)(dst, src, n);
+}
+#define INIT_WCSCAT \
+ COMMON_INTERCEPT_FUNCTION(wcscat); \
+ COMMON_INTERCEPT_FUNCTION(wcsncat);
+#else
+#define INIT_WCSCAT
+#endif
+
+#if SANITIZER_INTERCEPT_WCSDUP
+INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);
+ SIZE_T len = REAL(wcslen)(s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));
+ wchar_t *result = REAL(wcsdup)(s);
+ if (result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(wchar_t) * (len + 1));
+ return result;
+}
+
+#define INIT_WCSDUP COMMON_INTERCEPT_FUNCTION(wcsdup);
+#else
+#define INIT_WCSDUP
+#endif
+
+#if SANITIZER_INTERCEPT_STRXFRM
+static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); }
+
+static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); }
+
+#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, strxfrm, dest, src, len, ##__VA_ARGS__); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, \
+ sizeof(*src) * (RealStrLen(src) + 1)); \
+ SIZE_T res = REAL(strxfrm)(dest, src, len, ##__VA_ARGS__); \
+ if (res < len) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, sizeof(*src) * (res + 1)); \
+ return res; \
+ }
+
+INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T len) {
+ STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len);
+}
+
+INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(strxfrm_l, dest, src, len, locale);
+}
+
+#define INIT_STRXFRM \
+ COMMON_INTERCEPT_FUNCTION(strxfrm); \
+ COMMON_INTERCEPT_FUNCTION(strxfrm_l);
+#else
+#define INIT_STRXFRM
+#endif
+
+#if SANITIZER_INTERCEPT___STRXFRM_L
+INTERCEPTOR(SIZE_T, __strxfrm_l, char *dest, const char *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(__strxfrm_l, dest, src, len, locale);
+}
+
+#define INIT___STRXFRM_L COMMON_INTERCEPT_FUNCTION(__strxfrm_l);
+#else
+#define INIT___STRXFRM_L
+#endif
+
+#if SANITIZER_INTERCEPT_WCSXFRM
+INTERCEPTOR(SIZE_T, wcsxfrm, wchar_t *dest, const wchar_t *src, SIZE_T len) {
+ STRXFRM_INTERCEPTOR_IMPL(wcsxfrm, dest, src, len);
+}
+
+INTERCEPTOR(SIZE_T, wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(wcsxfrm_l, dest, src, len, locale);
+}
+
+#define INIT_WCSXFRM \
+ COMMON_INTERCEPT_FUNCTION(wcsxfrm); \
+ COMMON_INTERCEPT_FUNCTION(wcsxfrm_l);
+#else
+#define INIT_WCSXFRM
+#endif
+
+#if SANITIZER_INTERCEPT___WCSXFRM_L
+INTERCEPTOR(SIZE_T, __wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(__wcsxfrm_l, dest, src, len, locale);
+}
+
+#define INIT___WCSXFRM_L COMMON_INTERCEPT_FUNCTION(__wcsxfrm_l);
+#else
+#define INIT___WCSXFRM_L
+#endif
+
+#if SANITIZER_INTERCEPT_ACCT
+INTERCEPTOR(int, acct, const char *file) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, acct, file);
+ if (file)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ return REAL(acct)(file);
+}
+#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)
+#else
+#define INIT_ACCT
+#endif
+
+#if SANITIZER_INTERCEPT_USER_FROM_UID
+INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {
+ void *ctx;
+ const char *user;
+ COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);
+ user = REAL(user_from_uid)(uid, nouser);
+ if (user)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ return user;
+}
+#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)
+#else
+#define INIT_USER_FROM_UID
+#endif
+
+#if SANITIZER_INTERCEPT_UID_FROM_USER
+INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ res = REAL(uid_from_user)(name, uid);
+ if (uid)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));
+ return res;
+}
+#define INIT_UID_FROM_USER COMMON_INTERCEPT_FUNCTION(uid_from_user)
+#else
+#define INIT_UID_FROM_USER
+#endif
+
+#if SANITIZER_INTERCEPT_GROUP_FROM_GID
+INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {
+ void *ctx;
+ const char *group;
+ COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);
+ group = REAL(group_from_gid)(gid, nogroup);
+ if (group)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ return group;
+}
+#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)
+#else
+#define INIT_GROUP_FROM_GID
+#endif
+
+#if SANITIZER_INTERCEPT_GID_FROM_GROUP
+INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);
+ if (group)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ res = REAL(gid_from_group)(group, gid);
+ if (gid)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));
+ return res;
+}
+#define INIT_GID_FROM_GROUP COMMON_INTERCEPT_FUNCTION(gid_from_group)
+#else
+#define INIT_GID_FROM_GROUP
+#endif
+
+#if SANITIZER_INTERCEPT_ACCESS
+INTERCEPTOR(int, access, const char *path, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(access)(path, mode);
+}
+#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)
+#else
+#define INIT_ACCESS
+#endif
+
+#if SANITIZER_INTERCEPT_FACCESSAT
+INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(faccessat)(fd, path, mode, flags);
+}
+#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)
+#else
+#define INIT_FACCESSAT
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPLIST
+INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,
+ int *ngroups) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (ngroups)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));
+ res = REAL(getgrouplist)(name, basegid, groups, ngroups);
+ if (!res && groups && ngroups) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));
+ }
+ return res;
+}
+
+#define INIT_GETGROUPLIST COMMON_INTERCEPT_FUNCTION(getgrouplist);
+#else
+#define INIT_GETGROUPLIST
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPMEMBERSHIP
+INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
+ int maxgrp, int *ngroups) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,
+ maxgrp, ngroups);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);
+ if (!res && groups && ngroups) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));
+ }
+ return res;
+}
+
+#define INIT_GETGROUPMEMBERSHIP COMMON_INTERCEPT_FUNCTION(getgroupmembership);
+#else
+#define INIT_GETGROUPMEMBERSHIP
+#endif
+
+#if SANITIZER_INTERCEPT_READLINK
+INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(readlink)(path, buf, bufsiz);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
+ return res;
+}
+
+#define INIT_READLINK COMMON_INTERCEPT_FUNCTION(readlink)
+#else
+#define INIT_READLINK
+#endif
+
+#if SANITIZER_INTERCEPT_READLINKAT
+INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,
+ SIZE_T bufsiz) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
+ return res;
+}
+
+#define INIT_READLINKAT COMMON_INTERCEPT_FUNCTION(readlinkat)
+#else
+#define INIT_READLINKAT
+#endif
+
+#if SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,
+ struct file_handle *handle, int *mount_id, int flags) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,
+ mount_id, flags);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1);
+
+ __sanitizer_file_handle *sanitizer_handle =
+ reinterpret_cast<__sanitizer_file_handle*>(handle);
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+
+ int res = REAL(name_to_handle_at)(dirfd, pathname, handle, mount_id, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->handle_type,
+ sizeof(sanitizer_handle->handle_type));
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mount_id, sizeof(*mount_id));
+ }
+ return res;
+}
+
+#define INIT_NAME_TO_HANDLE_AT COMMON_INTERCEPT_FUNCTION(name_to_handle_at)
+#else
+#define INIT_NAME_TO_HANDLE_AT
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+INTERCEPTOR(int, open_by_handle_at, int mount_fd, struct file_handle* handle,
+ int flags) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_by_handle_at, mount_fd, handle, flags);
+
+ __sanitizer_file_handle *sanitizer_handle =
+ reinterpret_cast<__sanitizer_file_handle*>(handle);
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_type,
+ sizeof(sanitizer_handle->handle_type));
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);
+
+ return REAL(open_by_handle_at)(mount_fd, handle, flags);
+}
+
+#define INIT_OPEN_BY_HANDLE_AT COMMON_INTERCEPT_FUNCTION(open_by_handle_at)
+#else
+#define INIT_OPEN_BY_HANDLE_AT
+#endif
+
+#if SANITIZER_INTERCEPT_STRLCPY
+INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {
+ void *ctx;
+ SIZE_T res;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlcpy, dst, src, size);
+ if (src) {
+ // Keep strnlen as macro argument, as macro may ignore it.
+ COMMON_INTERCEPTOR_READ_STRING(
+ ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);
+ }
+ res = REAL(strlcpy)(dst, src, size);
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1);
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, strlcat, char *dst, char *src, SIZE_T size) {
+ void *ctx;
+ SIZE_T len = 0;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlcat, dst, src, size);
+ // src is checked in the strlcpy() interceptor
+ if (dst) {
+ len = internal_strnlen(dst, size);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, dst, Min(len, size - 1) + 1);
+ }
+ // Reuse the rest of the code in the strlcpy() interceptor
+ return WRAP(strlcpy)(dst + len, src, size - len) + len;
+}
+#define INIT_STRLCPY \
+ COMMON_INTERCEPT_FUNCTION(strlcpy); \
+ COMMON_INTERCEPT_FUNCTION(strlcat);
+#else
+#define INIT_STRLCPY
+#endif
+
+#if SANITIZER_INTERCEPT_MMAP
+INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
+ OFF_T off) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
+}
+
+INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (int)internal_mprotect(addr, sz, prot);
+ COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot);
+ MprotectMallocZones(addr, prot);
+ return REAL(mprotect)(addr, sz, prot);
+}
+#define INIT_MMAP \
+ COMMON_INTERCEPT_FUNCTION(mmap); \
+ COMMON_INTERCEPT_FUNCTION(mprotect);
+#else
+#define INIT_MMAP
+#endif
+
+#if SANITIZER_INTERCEPT_MMAP64
+INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd,
+ OFF64_T off) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap64, addr, sz, prot, flags, fd, off);
+}
+#define INIT_MMAP64 COMMON_INTERCEPT_FUNCTION(mmap64);
+#else
+#define INIT_MMAP64
+#endif
+
+#if SANITIZER_INTERCEPT_DEVNAME
+INTERCEPTOR(char *, devname, u64 dev, u32 type) {
+ void *ctx;
+ char *name;
+ COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);
+ name = REAL(devname)(dev, type);
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return name;
+}
+#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);
+#else
+#define INIT_DEVNAME
+#endif
+
+#if SANITIZER_INTERCEPT_DEVNAME_R
+#if SANITIZER_NETBSD
+#define DEVNAME_R_RETTYPE int
+#define DEVNAME_R_SUCCESS(x) (!(x))
+#else
+#define DEVNAME_R_RETTYPE char*
+#define DEVNAME_R_SUCCESS(x) (x)
+#endif
+INTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,
+ uptr len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);
+ DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);
+ if (DEVNAME_R_SUCCESS(res))
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return res;
+}
+#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);
+#else
+#define INIT_DEVNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETLN
+INTERCEPTOR(char *, fgetln, __sanitizer_FILE *stream, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetln, stream, len);
+ char *str = REAL(fgetln)(stream, len);
+ if (str && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, *len);
+ }
+ return str;
+}
+#define INIT_FGETLN COMMON_INTERCEPT_FUNCTION(fgetln)
+#else
+#define INIT_FGETLN
+#endif
+
+#if SANITIZER_INTERCEPT_STRMODE
+INTERCEPTOR(void, strmode, u32 mode, char *bp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);
+ REAL(strmode)(mode, bp);
+ if (bp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1);
+}
+#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)
+#else
+#define INIT_STRMODE
+#endif
+
+#if SANITIZER_INTERCEPT_TTYENT
+INTERCEPTOR(struct __sanitizer_ttyent *, getttyent, void) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getttyent);
+ struct __sanitizer_ttyent *ttyent = REAL(getttyent)();
+ if (ttyent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
+ return ttyent;
+}
+INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);
+ if (ttyent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
+ return ttyent;
+}
+INTERCEPTOR(int, setttyentpath, char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(setttyentpath)(path);
+}
+#define INIT_TTYENT \
+ COMMON_INTERCEPT_FUNCTION(getttyent); \
+ COMMON_INTERCEPT_FUNCTION(getttynam); \
+ COMMON_INTERCEPT_FUNCTION(setttyentpath)
+#else
+#define INIT_TTYENT
+#endif
+
+#if SANITIZER_INTERCEPT_PROTOENT
+static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+
+ SIZE_T pp_size = 1; // One handles the trailing \0
+
+ for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
+ pp_size * sizeof(char **));
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
+ struct __sanitizer_protoent *p = REAL(getprotoent)();
+ if (p)
+ write_protoent(ctx, p);
+ return p;
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
+ if (p)
+ write_protoent(ctx, p);
+ return p;
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto);
+ struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto);
+ if (p)
+ write_protoent(ctx, p);
+ return p;
+}
+#define INIT_PROTOENT \
+ COMMON_INTERCEPT_FUNCTION(getprotoent); \
+ COMMON_INTERCEPT_FUNCTION(getprotobyname); \
+ COMMON_INTERCEPT_FUNCTION(getprotobynumber)
+#else
+#define INIT_PROTOENT
+#endif
+
+#if SANITIZER_INTERCEPT_PROTOENT_R
+INTERCEPTOR(int, getprotoent_r, struct __sanitizer_protoent *result_buf,
+ char *buf, SIZE_T buflen, struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent_r, result_buf, buf, buflen,
+ result);
+ int res = REAL(getprotoent_r)(result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+INTERCEPTOR(int, getprotobyname_r, const char *name,
+ struct __sanitizer_protoent *result_buf, char *buf, SIZE_T buflen,
+ struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
+ buflen, result);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+INTERCEPTOR(int, getprotobynumber_r, int num,
+ struct __sanitizer_protoent *result_buf, char *buf,
+ SIZE_T buflen, struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber_r, num, result_buf, buf,
+ buflen, result);
+ int res = REAL(getprotobynumber_r)(num, result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+#define INIT_PROTOENT_R \
+ COMMON_INTERCEPT_FUNCTION(getprotoent_r); \
+ COMMON_INTERCEPT_FUNCTION(getprotobyname_r); \
+ COMMON_INTERCEPT_FUNCTION(getprotobynumber_r);
+#else
+#define INIT_PROTOENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_NETENT
+INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetent);
+ struct __sanitizer_netent *n = REAL(getnetent)();
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+
+INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_netent *n = REAL(getnetbyname)(name);
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+
+INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetbyaddr, net, type);
+ struct __sanitizer_netent *n = REAL(getnetbyaddr)(net, type);
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+#define INIT_NETENT \
+ COMMON_INTERCEPT_FUNCTION(getnetent); \
+ COMMON_INTERCEPT_FUNCTION(getnetbyname); \
+ COMMON_INTERCEPT_FUNCTION(getnetbyaddr)
+#else
+#define INIT_NETENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTINFO
+INTERCEPTOR(int, getmntinfo, void **mntbufp, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntinfo, mntbufp, flags);
+ int cnt = REAL(getmntinfo)(mntbufp, flags);
+ if (cnt > 0 && mntbufp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));
+ if (*mntbufp)
+#if SANITIZER_NETBSD
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs_sz);
+#else
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statfs_sz);
+#endif
+ }
+ return cnt;
+}
+#define INIT_GETMNTINFO COMMON_INTERCEPT_FUNCTION(getmntinfo)
+#else
+#define INIT_GETMNTINFO
+#endif
+
+#if SANITIZER_INTERCEPT_MI_VECTOR_HASH
+INTERCEPTOR(void, mi_vector_hash, const void *key, SIZE_T len, u32 seed,
+ u32 hashes[3]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mi_vector_hash, key, len, seed, hashes);
+ if (key)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, len);
+ REAL(mi_vector_hash)(key, len, seed, hashes);
+ if (hashes)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hashes, sizeof(hashes[0]) * 3);
+}
+#define INIT_MI_VECTOR_HASH COMMON_INTERCEPT_FUNCTION(mi_vector_hash)
+#else
+#define INIT_MI_VECTOR_HASH
+#endif
+
+#if SANITIZER_INTERCEPT_SETVBUF
+INTERCEPTOR(int, setvbuf, __sanitizer_FILE *stream, char *buf, int mode,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setvbuf, stream, buf, mode, size);
+ int ret = REAL(setvbuf)(stream, buf, mode, size);
+ if (buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ if (stream)
+ unpoison_file(stream);
+ return ret;
+}
+
+INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuf, stream, buf);
+ REAL(setbuf)(stream, buf);
+ if (buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ }
+ if (stream)
+ unpoison_file(stream);
+}
+
+INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
+ REAL(setbuffer)(stream, buf, mode);
+ if (buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ }
+ if (stream)
+ unpoison_file(stream);
+}
+
+INTERCEPTOR(void, setlinebuf, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setlinebuf, stream);
+ REAL(setlinebuf)(stream);
+ if (stream)
+ unpoison_file(stream);
+}
+#define INIT_SETVBUF COMMON_INTERCEPT_FUNCTION(setvbuf); \
+ COMMON_INTERCEPT_FUNCTION(setbuf); \
+ COMMON_INTERCEPT_FUNCTION(setbuffer); \
+ COMMON_INTERCEPT_FUNCTION(setlinebuf)
+#else
+#define INIT_SETVBUF
+#endif
+
+#if SANITIZER_INTERCEPT_GETVFSSTAT
+INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);
+ int ret = REAL(getvfsstat)(buf, bufsize, flags);
+ if (buf && ret > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs_sz);
+ return ret;
+}
+#define INIT_GETVFSSTAT COMMON_INTERCEPT_FUNCTION(getvfsstat)
+#else
+#define INIT_GETVFSSTAT
+#endif
+
+#if SANITIZER_INTERCEPT_REGEX
+INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);
+ if (pattern)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, REAL(strlen)(pattern) + 1);
+ int res = REAL(regcomp)(preg, pattern, cflags);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
+ return res;
+}
+INTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,
+ struct __sanitizer_regmatch *pmatch[], int eflags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regexec, preg, string, nmatch, pmatch, eflags);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ if (string)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, string, REAL(strlen)(string) + 1);
+ int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);
+ if (!res && pmatch)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);
+ return res;
+}
+INTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,
+ SIZE_T errbuf_size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regerror, errcode, preg, errbuf, errbuf_size);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);
+ if (errbuf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, REAL(strlen)(errbuf) + 1);
+ return res;
+}
+INTERCEPTOR(void, regfree, const void *preg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regfree, preg);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ REAL(regfree)(preg);
+}
+#define INIT_REGEX \
+ COMMON_INTERCEPT_FUNCTION(regcomp); \
+ COMMON_INTERCEPT_FUNCTION(regexec); \
+ COMMON_INTERCEPT_FUNCTION(regerror); \
+ COMMON_INTERCEPT_FUNCTION(regfree);
+#else
+#define INIT_REGEX
+#endif
+
+#if SANITIZER_INTERCEPT_REGEXSUB
+INTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,
+ const struct __sanitizer_regmatch *rm, const char *str) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);
+ if (sub)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ // The implementation demands and hardcodes 10 elements
+ if (rm)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
+ if (str)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);
+ if (res > 0 && buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
+ const struct __sanitizer_regmatch *rm, const char *sstr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);
+ if (sub)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ // Hardcode 10 elements as this is hardcoded size
+ if (rm)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
+ if (sstr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, REAL(strlen)(sstr) + 1);
+ SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);
+ if (res > 0 && buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, REAL(strlen)(*buf) + 1);
+ }
+ return res;
+}
+
+#define INIT_REGEXSUB \
+ COMMON_INTERCEPT_FUNCTION(regnsub); \
+ COMMON_INTERCEPT_FUNCTION(regasub);
+#else
+#define INIT_REGEXSUB
+#endif
+
+#if SANITIZER_INTERCEPT_FTS
+INTERCEPTOR(void *, fts_open, char *const *path_argv, int options,
+ int (*compar)(void **, void **)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_open, path_argv, options, compar);
+ if (path_argv) {
+ for (char *const *pa = path_argv; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ // TODO(kamil): handle compar callback
+ void *fts = REAL(fts_open)(path_argv, options, compar);
+ if (fts)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, fts, struct_FTS_sz);
+ return fts;
+}
+
+INTERCEPTOR(void *, fts_read, void *ftsp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_read, ftsp);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ void *ftsent = REAL(fts_read)(ftsp);
+ if (ftsent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);
+ return ftsent;
+}
+
+INTERCEPTOR(void *, fts_children, void *ftsp, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_children, ftsp, options);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ void *ftsent = REAL(fts_children)(ftsp, options);
+ if (ftsent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);
+ return ftsent;
+}
+
+INTERCEPTOR(int, fts_set, void *ftsp, void *f, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_set, ftsp, f, options);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ if (f)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, f, struct_FTSENT_sz);
+ return REAL(fts_set)(ftsp, f, options);
+}
+
+INTERCEPTOR(int, fts_close, void *ftsp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_close, ftsp);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ return REAL(fts_close)(ftsp);
+}
+#define INIT_FTS \
+ COMMON_INTERCEPT_FUNCTION(fts_open); \
+ COMMON_INTERCEPT_FUNCTION(fts_read); \
+ COMMON_INTERCEPT_FUNCTION(fts_children); \
+ COMMON_INTERCEPT_FUNCTION(fts_set); \
+ COMMON_INTERCEPT_FUNCTION(fts_close);
+#else
+#define INIT_FTS
+#endif
+
+#if SANITIZER_INTERCEPT_SYSCTL
+INTERCEPTOR(int, sysctl, int *name, unsigned int namelen, void *oldp,
+ SIZE_T *oldlenp, void *newp, SIZE_T newlen) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_sysctl(name, namelen, oldp, oldlenp, newp, newlen);
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctl, name, namelen, oldp, oldlenp, newp,
+ newlen);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, namelen * sizeof(*name));
+ if (oldlenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (newp && newlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);
+ int res = REAL(sysctl)(name, namelen, oldp, oldlenp, newp, newlen);
+ if (!res) {
+ if (oldlenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (oldp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);
+ }
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,
+ void *newp, SIZE_T newlen) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_sysctlbyname(sname, oldp, oldlenp, newp, newlen);
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,
+ newlen);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (oldlenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (newp && newlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);
+ int res = REAL(sysctlbyname)(sname, oldp, oldlenp, newp, newlen);
+ if (!res) {
+ if (oldlenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (oldp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);
+ }
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,
+ SIZE_T *namelenp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (namelenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
+ int res = REAL(sysctlnametomib)(sname, name, namelenp);
+ if (!res) {
+ if (namelenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));
+ }
+ }
+ return res;
+}
+
+#define INIT_SYSCTL \
+ COMMON_INTERCEPT_FUNCTION(sysctl); \
+ COMMON_INTERCEPT_FUNCTION(sysctlbyname); \
+ COMMON_INTERCEPT_FUNCTION(sysctlnametomib);
+#else
+#define INIT_SYSCTL
+#endif
+
+#if SANITIZER_INTERCEPT_ASYSCTL
+INTERCEPTOR(void *, asysctl, const int *name, SIZE_T namelen, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asysctl, name, namelen, len);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, sizeof(*name) * namelen);
+ void *res = REAL(asysctl)(name, namelen, len);
+ if (res && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);
+ }
+ return res;
+}
+
+INTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ void *res = REAL(asysctlbyname)(sname, len);
+ if (res && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);
+ }
+ return res;
+}
+#define INIT_ASYSCTL \
+ COMMON_INTERCEPT_FUNCTION(asysctl); \
+ COMMON_INTERCEPT_FUNCTION(asysctlbyname);
+#else
+#define INIT_ASYSCTL
+#endif
+
+#if SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+INTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,
+ unsigned int *namelenp, char *cname, SIZE_T *csz, void **rnode,
+ int v) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,
+ csz, rnode, v);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (namelenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (csz)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, csz, sizeof(*csz));
+ // Skip rnode, it's rarely used and not trivial to sanitize
+ // It's also used mostly internally
+ int res = REAL(sysctlgetmibinfo)(sname, name, namelenp, cname, csz, rnode, v);
+ if (!res) {
+ if (namelenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));
+ }
+ if (csz) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, csz, sizeof(*csz));
+ if (cname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cname, *csz);
+ }
+ }
+ return res;
+}
+#define INIT_SYSCTLGETMIBINFO \
+ COMMON_INTERCEPT_FUNCTION(sysctlgetmibinfo);
+#else
+#define INIT_SYSCTLGETMIBINFO
+#endif
+
+#if SANITIZER_INTERCEPT_NL_LANGINFO
+INTERCEPTOR(char *, nl_langinfo, long item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);
+ char *ret = REAL(nl_langinfo)(item);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ return ret;
+}
+#define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)
+#else
+#define INIT_NL_LANGINFO
+#endif
+
+#if SANITIZER_INTERCEPT_MODCTL
+INTERCEPTOR(int, modctl, int operation, void *argp) {
+ void *ctx;
+ int ret;
+ COMMON_INTERCEPTOR_ENTER(ctx, modctl, operation, argp);
+
+ if (operation == modctl_load) {
+ if (argp) {
+ __sanitizer_modctl_load_t *ml = (__sanitizer_modctl_load_t *)argp;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));
+ if (ml->ml_filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,
+ REAL(strlen)(ml->ml_filename) + 1);
+ if (ml->ml_props)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);
+ }
+ ret = REAL(modctl)(operation, argp);
+ } else if (operation == modctl_unload) {
+ if (argp) {
+ const char *name = (const char *)argp;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ }
+ ret = REAL(modctl)(operation, argp);
+ } else if (operation == modctl_stat) {
+ uptr iov_len;
+ struct __sanitizer_iovec *iov = (struct __sanitizer_iovec *)argp;
+ if (iov) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iov, sizeof(*iov));
+ iov_len = iov->iov_len;
+ }
+ ret = REAL(modctl)(operation, argp);
+ if (iov)
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, iov->iov_base, Min(iov_len, iov->iov_len));
+ } else if (operation == modctl_exists) {
+ ret = REAL(modctl)(operation, argp);
+ } else {
+ ret = REAL(modctl)(operation, argp);
+ }
+
+ return ret;
+}
+#define INIT_MODCTL COMMON_INTERCEPT_FUNCTION(modctl)
+#else
+#define INIT_MODCTL
+#endif
+
+#if SANITIZER_INTERCEPT_STRTONUM
+INTERCEPTOR(long long, strtonum, const char *nptr, long long minval,
+ long long maxval, const char **errstr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtonum, nptr, minval, maxval, errstr);
+
+ // TODO(kamil): Implement strtoll as a common inteceptor
+ char *real_endptr;
+ long long ret = (long long)REAL(strtoimax)(nptr, &real_endptr, 10);
+ StrtolFixAndCheck(ctx, nptr, nullptr, real_endptr, 10);
+
+ ret = REAL(strtonum)(nptr, minval, maxval, errstr);
+ if (errstr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));
+ if (*errstr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, REAL(strlen)(*errstr) + 1);
+ }
+ return ret;
+}
+#define INIT_STRTONUM COMMON_INTERCEPT_FUNCTION(strtonum)
+#else
+#define INIT_STRTONUM
+#endif
+
+#if SANITIZER_INTERCEPT_FPARSELN
+INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
+ SIZE_T *lineno, const char delim[3], int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fparseln, stream, len, lineno, delim, flags);
+ if (lineno)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, lineno, sizeof(*lineno));
+ if (delim)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);
+ char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);
+ if (ret) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ if (len)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ if (lineno)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineno, sizeof(*lineno));
+ }
+ return ret;
+}
+#define INIT_FPARSELN COMMON_INTERCEPT_FUNCTION(fparseln)
+#else
+#define INIT_FPARSELN
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS1
+INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ int res = REAL(statvfs1)(path, buf, flags);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(fstatvfs1)(fd, buf, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+#define INIT_STATVFS1 \
+ COMMON_INTERCEPT_FUNCTION(statvfs1); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs1);
+#else
+#define INIT_STATVFS1
+#endif
+
+#if SANITIZER_INTERCEPT_STRTOI
+INTERCEPTOR(INTMAX_T, strtoi, const char *nptr, char **endptr, int base,
+ INTMAX_T low, INTMAX_T high, int *rstatus) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoi, nptr, endptr, base, low, high, rstatus);
+ char *real_endptr;
+ INTMAX_T ret = REAL(strtoi)(nptr, &real_endptr, base, low, high, rstatus);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ if (rstatus)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));
+ return ret;
+}
+
+INTERCEPTOR(UINTMAX_T, strtou, const char *nptr, char **endptr, int base,
+ UINTMAX_T low, UINTMAX_T high, int *rstatus) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtou, nptr, endptr, base, low, high, rstatus);
+ char *real_endptr;
+ UINTMAX_T ret = REAL(strtou)(nptr, &real_endptr, base, low, high, rstatus);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ if (rstatus)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));
+ return ret;
+}
+#define INIT_STRTOI \
+ COMMON_INTERCEPT_FUNCTION(strtoi); \
+ COMMON_INTERCEPT_FUNCTION(strtou)
+#else
+#define INIT_STRTOI
+#endif
+
+#if SANITIZER_INTERCEPT_CAPSICUM
+#define CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_init, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_init)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_set, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_set)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_clear, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_clear)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_set, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ return REAL(cap_rights_is_set)(rights, ##__VA_ARGS__); \
+ }
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_init,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights);
+}
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_set,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights);
+}
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_clear,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights);
+}
+
+INTERCEPTOR(bool, cap_rights_is_set,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights);
+}
+
+INTERCEPTOR(int, cap_rights_limit, int fd,
+ const __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_limit, fd, rights);
+ if (rights)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));
+
+ return REAL(cap_rights_limit)(fd, rights);
+}
+
+INTERCEPTOR(int, cap_rights_get, int fd, __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_get, fd, rights);
+ int ret = REAL(cap_rights_get)(fd, rights);
+ if (!ret && rights)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rights, sizeof(*rights));
+
+ return ret;
+}
+
+INTERCEPTOR(bool, cap_rights_is_valid, const __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_valid, rights);
+ if (rights)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));
+
+ return REAL(cap_rights_is_valid(rights));
+}
+
+INTERCEPTOR(__sanitizer_cap_rights *, cap_rights_merge,
+ __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_merge, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+
+ __sanitizer_cap_rights *ret = REAL(cap_rights_merge)(dst, src);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+
+ return ret;
+}
+
+INTERCEPTOR(__sanitizer_cap_rights *, cap_rights_remove,
+ __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_remove, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+
+ __sanitizer_cap_rights *ret = REAL(cap_rights_remove)(dst, src);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+
+ return ret;
+}
+
+INTERCEPTOR(bool, cap_rights_contains, const __sanitizer_cap_rights *big,
+ const __sanitizer_cap_rights *little) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_contains, big, little);
+ if (little)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, little, sizeof(*little));
+ if (big)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, big, sizeof(*big));
+
+ return REAL(cap_rights_contains)(big, little);
+}
+
+INTERCEPTOR(int, cap_ioctls_limit, int fd, const uptr *cmds, SIZE_T ncmds) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_limit, fd, cmds, ncmds);
+ if (cmds)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cmds, sizeof(*cmds) * ncmds);
+
+ return REAL(cap_ioctls_limit)(fd, cmds, ncmds);
+}
+
+INTERCEPTOR(int, cap_ioctls_get, int fd, uptr *cmds, SIZE_T maxcmds) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_get, fd, cmds, maxcmds);
+ int ret = REAL(cap_ioctls_get)(fd, cmds, maxcmds);
+ if (!ret && cmds)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cmds, sizeof(*cmds) * maxcmds);
+
+ return ret;
+}
+#define INIT_CAPSICUM \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_init); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_set); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_clear); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_is_set); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_get); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_limit); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_contains); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_remove); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_merge); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_is_valid); \
+ COMMON_INTERCEPT_FUNCTION(cap_ioctls_get); \
+ COMMON_INTERCEPT_FUNCTION(cap_ioctls_limit)
+#else
+#define INIT_CAPSICUM
+#endif
+
+#if SANITIZER_INTERCEPT_SHA1
+INTERCEPTOR(void, SHA1Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Init, context);
+ REAL(SHA1Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);
+}
+INTERCEPTOR(void, SHA1Update, void *context, const u8 *data, unsigned len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ REAL(SHA1Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);
+}
+INTERCEPTOR(void, SHA1Final, u8 digest[20], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ REAL(SHA1Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);
+}
+INTERCEPTOR(void, SHA1Transform, u32 state[5], u8 buffer[64]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Transform, state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);
+ if (buffer)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u8) * 64);
+ REAL(SHA1Transform)(state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);
+}
+INTERCEPTOR(char *, SHA1End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ char *ret = REAL(SHA1End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1File, char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(SHA1File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,
+ OFF_T length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1Data, u8 *data, SIZE_T len, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Data, data, len, buf);
+ if (data)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(SHA1Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+#define INIT_SHA1 \
+ COMMON_INTERCEPT_FUNCTION(SHA1Init); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Update); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Final); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Transform); \
+ COMMON_INTERCEPT_FUNCTION(SHA1End); \
+ COMMON_INTERCEPT_FUNCTION(SHA1File); \
+ COMMON_INTERCEPT_FUNCTION(SHA1FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Data)
+#else
+#define INIT_SHA1
+#endif
+
+#if SANITIZER_INTERCEPT_MD4
+INTERCEPTOR(void, MD4Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Init, context);
+ REAL(MD4Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);
+}
+
+INTERCEPTOR(void, MD4Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ REAL(MD4Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);
+}
+
+INTERCEPTOR(void, MD4Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ REAL(MD4Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD4End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ char *ret = REAL(MD4End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD4File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD4File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD4Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD4Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+#define INIT_MD4 \
+ COMMON_INTERCEPT_FUNCTION(MD4Init); \
+ COMMON_INTERCEPT_FUNCTION(MD4Update); \
+ COMMON_INTERCEPT_FUNCTION(MD4Final); \
+ COMMON_INTERCEPT_FUNCTION(MD4End); \
+ COMMON_INTERCEPT_FUNCTION(MD4File); \
+ COMMON_INTERCEPT_FUNCTION(MD4Data)
+#else
+#define INIT_MD4
+#endif
+
+#if SANITIZER_INTERCEPT_RMD160
+INTERCEPTOR(void, RMD160Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Init, context);
+ REAL(RMD160Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);
+}
+INTERCEPTOR(void, RMD160Update, void *context, const u8 *data, unsigned len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ REAL(RMD160Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);
+}
+INTERCEPTOR(void, RMD160Final, u8 digest[20], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ REAL(RMD160Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);
+}
+INTERCEPTOR(void, RMD160Transform, u32 state[5], u16 buffer[16]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Transform, state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);
+ if (buffer)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u32) * 16);
+ REAL(RMD160Transform)(state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);
+}
+INTERCEPTOR(char *, RMD160End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ char *ret = REAL(RMD160End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160File, char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(RMD160File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,
+ OFF_T length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160Data, u8 *data, SIZE_T len, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(RMD160Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+#define INIT_RMD160 \
+ COMMON_INTERCEPT_FUNCTION(RMD160Init); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Update); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Final); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Transform); \
+ COMMON_INTERCEPT_FUNCTION(RMD160End); \
+ COMMON_INTERCEPT_FUNCTION(RMD160File); \
+ COMMON_INTERCEPT_FUNCTION(RMD160FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Data)
+#else
+#define INIT_RMD160
+#endif
+
+#if SANITIZER_INTERCEPT_MD5
+INTERCEPTOR(void, MD5Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Init, context);
+ REAL(MD5Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);
+}
+
+INTERCEPTOR(void, MD5Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ REAL(MD5Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);
+}
+
+INTERCEPTOR(void, MD5Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ REAL(MD5Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD5End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ char *ret = REAL(MD5End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD5File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD5File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD5Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD5Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+#define INIT_MD5 \
+ COMMON_INTERCEPT_FUNCTION(MD5Init); \
+ COMMON_INTERCEPT_FUNCTION(MD5Update); \
+ COMMON_INTERCEPT_FUNCTION(MD5Final); \
+ COMMON_INTERCEPT_FUNCTION(MD5End); \
+ COMMON_INTERCEPT_FUNCTION(MD5File); \
+ COMMON_INTERCEPT_FUNCTION(MD5Data)
+#else
+#define INIT_MD5
+#endif
+
+#if SANITIZER_INTERCEPT_FSEEK
+INTERCEPTOR(int, fseek, __sanitizer_FILE *stream, long int offset, int whence) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fseek, stream, offset, whence);
+ return REAL(fseek)(stream, offset, whence);
+}
+INTERCEPTOR(int, fseeko, __sanitizer_FILE *stream, OFF_T offset, int whence) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fseeko, stream, offset, whence);
+ return REAL(fseeko)(stream, offset, whence);
+}
+INTERCEPTOR(long int, ftell, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftell, stream);
+ return REAL(ftell)(stream);
+}
+INTERCEPTOR(OFF_T, ftello, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftello, stream);
+ return REAL(ftello)(stream);
+}
+INTERCEPTOR(void, rewind, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rewind, stream);
+ return REAL(rewind)(stream);
+}
+INTERCEPTOR(int, fgetpos, __sanitizer_FILE *stream, void *pos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpos, stream, pos);
+ int ret = REAL(fgetpos)(stream, pos);
+ if (pos && !ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pos, fpos_t_sz);
+ return ret;
+}
+INTERCEPTOR(int, fsetpos, __sanitizer_FILE *stream, const void *pos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fsetpos, stream, pos);
+ if (pos)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pos, fpos_t_sz);
+ return REAL(fsetpos)(stream, pos);
+}
+#define INIT_FSEEK \
+ COMMON_INTERCEPT_FUNCTION(fseek); \
+ COMMON_INTERCEPT_FUNCTION(fseeko); \
+ COMMON_INTERCEPT_FUNCTION(ftell); \
+ COMMON_INTERCEPT_FUNCTION(ftello); \
+ COMMON_INTERCEPT_FUNCTION(rewind); \
+ COMMON_INTERCEPT_FUNCTION(fgetpos); \
+ COMMON_INTERCEPT_FUNCTION(fsetpos)
+#else
+#define INIT_FSEEK
+#endif
+
+#if SANITIZER_INTERCEPT_MD2
+INTERCEPTOR(void, MD2Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Init, context);
+ REAL(MD2Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);
+}
+
+INTERCEPTOR(void, MD2Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ REAL(MD2Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);
+}
+
+INTERCEPTOR(void, MD2Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ REAL(MD2Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD2End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ char *ret = REAL(MD2End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD2File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD2File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD2Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+#define INIT_MD2 \
+ COMMON_INTERCEPT_FUNCTION(MD2Init); \
+ COMMON_INTERCEPT_FUNCTION(MD2Update); \
+ COMMON_INTERCEPT_FUNCTION(MD2Final); \
+ COMMON_INTERCEPT_FUNCTION(MD2End); \
+ COMMON_INTERCEPT_FUNCTION(MD2File); \
+ COMMON_INTERCEPT_FUNCTION(MD2Data)
+#else
+#define INIT_MD2
+#endif
+
+#if SANITIZER_INTERCEPT_SHA2
+#define SHA2_INTERCEPTORS(LEN, SHA2_STATE_T) \
+ INTERCEPTOR(void, SHA##LEN##_Init, void *context) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Init, context); \
+ REAL(SHA##LEN##_Init)(context); \
+ if (context) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ } \
+ INTERCEPTOR(void, SHA##LEN##_Update, void *context, \
+ const u8 *data, SIZE_T len) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Update, context, data, len); \
+ if (data && len > 0) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ REAL(SHA##LEN##_Update)(context, data, len); \
+ if (context) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ } \
+ INTERCEPTOR(void, SHA##LEN##_Final, u8 digest[LEN/8], \
+ void *context) { \
+ void *ctx; \
+ CHECK_EQ(SHA##LEN##_digest_length, LEN/8); \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Final, digest, context); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ REAL(SHA##LEN##_Final)(digest, context); \
+ if (digest) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, \
+ sizeof(digest[0]) * \
+ SHA##LEN##_digest_length); \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_End, void *context, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_End, context, buf); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ char *ret = REAL(SHA##LEN##_End)(context, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_File, const char *filename, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \
+ if (filename) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ char *ret = REAL(SHA##LEN##_File)(filename, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_FileChunk, const char *filename, char *buf, \
+ OFF_T offset, OFF_T length) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \
+ length); \
+ if (filename) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_Data, u8 *data, SIZE_T len, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Data, data, len, buf); \
+ if (data && len > 0) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \
+ char *ret = REAL(SHA##LEN##_Data)(data, len, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ }
+
+SHA2_INTERCEPTORS(224, u32);
+SHA2_INTERCEPTORS(256, u32);
+SHA2_INTERCEPTORS(384, u64);
+SHA2_INTERCEPTORS(512, u64);
+
+#define INIT_SHA2_INTECEPTORS(LEN) \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Init); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Update); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Final); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_End); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_File); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Data)
+
+#define INIT_SHA2 \
+ INIT_SHA2_INTECEPTORS(224); \
+ INIT_SHA2_INTECEPTORS(256); \
+ INIT_SHA2_INTECEPTORS(384); \
+ INIT_SHA2_INTECEPTORS(512)
+#undef SHA2_INTERCEPTORS
+#else
+#define INIT_SHA2
+#endif
+
+#if SANITIZER_INTERCEPT_VIS
+INTERCEPTOR(char *, vis, char *dst, int c, int flag, int nextc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vis, dst, c, flag, nextc);
+ char *end = REAL(vis)(dst, c, flag, nextc);
+ // dst is NULL terminated and end points to the NULL char
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(char *, nvis, char *dst, SIZE_T dlen, int c, int flag, int nextc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, nvis, dst, dlen, c, flag, nextc);
+ char *end = REAL(nvis)(dst, dlen, c, flag, nextc);
+ // nvis cannot make sure the dst is NULL terminated
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(strvis)(dst, src, flag);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(stravis)(dst, src, flag);
+ if (dst) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));
+ if (*dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *dst, len + 1);
+ }
+ return len;
+}
+INTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(strnvis)(dst, dlen, src, flag);
+ // The interface will be valid even if there is no space for NULL char
+ if (dst && len > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strvisx, char *dst, const char *src, SIZE_T len, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strvisx, dst, src, len, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ int ret = REAL(strvisx)(dst, src, len, flag);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnvisx, dst, dlen, src, len, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ int ret = REAL(strnvisx)(dst, dlen, src, len, flag);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strenvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag, int *cerr_ptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strenvisx, dst, dlen, src, len, flag, cerr_ptr);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ // FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
+ // according to the implementation
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));
+ int ret = REAL(strenvisx)(dst, dlen, src, len, flag, cerr_ptr);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));
+ return ret;
+}
+INTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ char *end = REAL(svis)(dst, c, flag, nextc, extra);
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,
+ Min((SIZE_T)(end - dst + 1), dlen));
+ return end;
+}
+INTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int len = REAL(strsvis)(dst, src, flag, extra);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int len = REAL(strsnvis)(dst, dlen, src, flag, extra);
+ // The interface will be valid even if there is no space for NULL char
+ if (dst && len >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsvisx, dst, src, len, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int ret = REAL(strsvisx)(dst, src, len, flag, extra);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag, const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsnvisx, dst, dlen, src, len, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,
+ SIZE_T len, int flag, const char *extra, int *cerr_ptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsenvisx, dst, dlen, src, len, flag, extra,
+ cerr_ptr);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ // FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
+ // according to the implementation
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));
+ int ret = REAL(strsenvisx)(dst, dlen, src, len, flag, extra, cerr_ptr);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));
+ return ret;
+}
+INTERCEPTOR(int, unvis, char *cp, int c, int *astate, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, unvis, cp, c, astate, flag);
+ if (astate)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, astate, sizeof(*astate));
+ int ret = REAL(unvis)(cp, c, astate, flag);
+ if (ret == unvis_valid || ret == unvis_validpush) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cp, sizeof(*cp));
+ }
+ return ret;
+}
+INTERCEPTOR(int, strunvis, char *dst, const char *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strunvis)(dst, src);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strnunvis)(dst, dlen, src);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strunvisx)(dst, src, flag);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,
+ int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strnunvisx)(dst, dlen, src, flag);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+#define INIT_VIS \
+ COMMON_INTERCEPT_FUNCTION(vis); \
+ COMMON_INTERCEPT_FUNCTION(nvis); \
+ COMMON_INTERCEPT_FUNCTION(strvis); \
+ COMMON_INTERCEPT_FUNCTION(stravis); \
+ COMMON_INTERCEPT_FUNCTION(strnvis); \
+ COMMON_INTERCEPT_FUNCTION(strvisx); \
+ COMMON_INTERCEPT_FUNCTION(strnvisx); \
+ COMMON_INTERCEPT_FUNCTION(strenvisx); \
+ COMMON_INTERCEPT_FUNCTION(svis); \
+ COMMON_INTERCEPT_FUNCTION(snvis); \
+ COMMON_INTERCEPT_FUNCTION(strsvis); \
+ COMMON_INTERCEPT_FUNCTION(strsnvis); \
+ COMMON_INTERCEPT_FUNCTION(strsvisx); \
+ COMMON_INTERCEPT_FUNCTION(strsnvisx); \
+ COMMON_INTERCEPT_FUNCTION(strsenvisx); \
+ COMMON_INTERCEPT_FUNCTION(unvis); \
+ COMMON_INTERCEPT_FUNCTION(strunvis); \
+ COMMON_INTERCEPT_FUNCTION(strnunvis); \
+ COMMON_INTERCEPT_FUNCTION(strunvisx); \
+ COMMON_INTERCEPT_FUNCTION(strnunvisx)
+#else
+#define INIT_VIS
+#endif
+
+#if SANITIZER_INTERCEPT_CDB
+INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);
+ if (cdbr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return cdbr;
+}
+
+INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open_mem, void *base, SIZE_T size,
+ int flags, void (*unmap)(void *, void *, SIZE_T), void *cookie) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open_mem, base, size, flags, unmap,
+ cookie);
+ if (base && size)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, base, size);
+ struct __sanitizer_cdbr *cdbr =
+ REAL(cdbr_open_mem)(base, size, flags, unmap, cookie);
+ if (cdbr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return cdbr;
+}
+
+INTERCEPTOR(u32, cdbr_entries, struct __sanitizer_cdbr *cdbr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_entries, cdbr);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return REAL(cdbr_entries)(cdbr);
+}
+
+INTERCEPTOR(int, cdbr_get, struct __sanitizer_cdbr *cdbr, u32 index,
+ const void **data, SIZE_T *datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_get, cdbr, index, data, datalen);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ int ret = REAL(cdbr_get)(cdbr, index, data, datalen);
+ if (!ret) {
+ if (data)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));
+ if (datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);
+ }
+ return ret;
+}
+
+INTERCEPTOR(int, cdbr_find, struct __sanitizer_cdbr *cdbr, const void *key,
+ SIZE_T keylen, const void **data, SIZE_T *datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_find, cdbr, key, keylen, data, datalen);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ if (key)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbr_find)(cdbr, key, keylen, data, datalen);
+ if (!ret) {
+ if (data)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));
+ if (datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);
+ }
+ return ret;
+}
+
+INTERCEPTOR(void, cdbr_close, struct __sanitizer_cdbr *cdbr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_close, cdbr);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ REAL(cdbr_close)(cdbr);
+}
+
+INTERCEPTOR(struct __sanitizer_cdbw *, cdbw_open) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_open);
+ struct __sanitizer_cdbw *ret = REAL(cdbw_open)();
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put, struct __sanitizer_cdbw *cdbw, const void *key,
+ SIZE_T keylen, const void *data, SIZE_T datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put, cdbw, key, keylen, data, datalen);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);
+ if (key && keylen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbw_put)(cdbw, key, keylen, data, datalen);
+ if (!ret && cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put_data, struct __sanitizer_cdbw *cdbw, const void *data,
+ SIZE_T datalen, u32 *index) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_data, cdbw, data, datalen, index);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);
+ int ret = REAL(cdbw_put_data)(cdbw, data, datalen, index);
+ if (!ret) {
+ if (index)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, index, sizeof(*index));
+ if (cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ }
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put_key, struct __sanitizer_cdbw *cdbw, const void *key,
+ SIZE_T keylen, u32 index) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_key, cdbw, key, keylen, index);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (key && keylen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbw_put_key)(cdbw, key, keylen, index);
+ if (!ret && cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_output, struct __sanitizer_cdbw *cdbw, int output,
+ const char descr[16], u32 (*seedgen)(void)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_output, cdbw, output, descr, seedgen);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, output);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (descr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, descr, internal_strnlen(descr, 16));
+ if (seedgen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)seedgen, sizeof(seedgen));
+ int ret = REAL(cdbw_output)(cdbw, output, descr, seedgen);
+ if (!ret) {
+ if (cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (output >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, output);
+ }
+ return ret;
+}
+
+INTERCEPTOR(void, cdbw_close, struct __sanitizer_cdbw *cdbw) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_close, cdbw);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ REAL(cdbw_close)(cdbw);
+}
+
+#define INIT_CDB \
+ COMMON_INTERCEPT_FUNCTION(cdbr_open); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_open_mem); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_entries); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_get); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_find); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_close); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_open); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put_data); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put_key); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_output); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_close)
+#else
+#define INIT_CDB
+#endif
+
+#if SANITIZER_INTERCEPT_GETFSENT
+INTERCEPTOR(void *, getfsent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsent);
+ void *ret = REAL(getfsent)();
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+INTERCEPTOR(void *, getfsspec, const char *spec) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);
+ if (spec)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, REAL(strlen)(spec) + 1);
+ void *ret = REAL(getfsspec)(spec);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+INTERCEPTOR(void *, getfsfile, const char *file) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);
+ if (file)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ void *ret = REAL(getfsfile)(file);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+#define INIT_GETFSENT \
+ COMMON_INTERCEPT_FUNCTION(getfsent); \
+ COMMON_INTERCEPT_FUNCTION(getfsspec); \
+ COMMON_INTERCEPT_FUNCTION(getfsfile);
+#else
+#define INIT_GETFSENT
+#endif
+
+#if SANITIZER_INTERCEPT_ARC4RANDOM
+INTERCEPTOR(void, arc4random_buf, void *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, arc4random_buf, buf, len);
+ REAL(arc4random_buf)(buf, len);
+ if (buf && len)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, len);
+}
+
+INTERCEPTOR(void, arc4random_addrandom, u8 *dat, int datlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, arc4random_addrandom, dat, datlen);
+ if (dat && datlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dat, datlen);
+ REAL(arc4random_addrandom)(dat, datlen);
+}
+
+#define INIT_ARC4RANDOM \
+ COMMON_INTERCEPT_FUNCTION(arc4random_buf); \
+ COMMON_INTERCEPT_FUNCTION(arc4random_addrandom);
+#else
+#define INIT_ARC4RANDOM
+#endif
+
+#if SANITIZER_INTERCEPT_POPEN
+INTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);
+ if (command)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, command, REAL(strlen)(command) + 1);
+ if (type)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ __sanitizer_FILE *res = REAL(popen)(command, type);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_POPEN COMMON_INTERCEPT_FUNCTION(popen)
+#else
+#define INIT_POPEN
+#endif
+
+#if SANITIZER_INTERCEPT_POPENVE
+INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
+ char *const *argv, char *const *envp, const char *type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (argv) {
+ for (char *const *pa = argv; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ if (envp) {
+ for (char *const *pa = envp; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ if (type)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ __sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_POPENVE COMMON_INTERCEPT_FUNCTION(popenve)
+#else
+#define INIT_POPENVE
+#endif
+
+#if SANITIZER_INTERCEPT_PCLOSE
+INTERCEPTOR(int, pclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pclose, fp);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ int res = REAL(pclose)(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ return res;
+}
+#define INIT_PCLOSE COMMON_INTERCEPT_FUNCTION(pclose);
+#else
+#define INIT_PCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_FUNOPEN
+typedef int (*funopen_readfn)(void *cookie, char *buf, int len);
+typedef int (*funopen_writefn)(void *cookie, const char *buf, int len);
+typedef OFF_T (*funopen_seekfn)(void *cookie, OFF_T offset, int whence);
+typedef int (*funopen_closefn)(void *cookie);
+
+struct WrappedFunopenCookie {
+ void *real_cookie;
+ funopen_readfn real_read;
+ funopen_writefn real_write;
+ funopen_seekfn real_seek;
+ funopen_closefn real_close;
+};
+
+static int wrapped_funopen_read(void *cookie, char *buf, int len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_readfn real_read = wrapped_cookie->real_read;
+ return real_read(wrapped_cookie->real_cookie, buf, len);
+}
+
+static int wrapped_funopen_write(void *cookie, const char *buf, int len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_writefn real_write = wrapped_cookie->real_write;
+ return real_write(wrapped_cookie->real_cookie, buf, len);
+}
+
+static OFF_T wrapped_funopen_seek(void *cookie, OFF_T offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_seekfn real_seek = wrapped_cookie->real_seek;
+ return real_seek(wrapped_cookie->real_cookie, offset, whence);
+}
+
+static int wrapped_funopen_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_closefn real_close = wrapped_cookie->real_close;
+ int res = real_close(wrapped_cookie->real_cookie);
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, funopen, void *cookie, funopen_readfn readfn,
+ funopen_writefn writefn, funopen_seekfn seekfn,
+ funopen_closefn closefn) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, funopen, cookie, readfn, writefn, seekfn,
+ closefn);
+
+ WrappedFunopenCookie *wrapped_cookie =
+ (WrappedFunopenCookie *)InternalAlloc(sizeof(WrappedFunopenCookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_read = readfn;
+ wrapped_cookie->real_write = writefn;
+ wrapped_cookie->real_seek = seekfn;
+ wrapped_cookie->real_close = closefn;
+
+ __sanitizer_FILE *res =
+ REAL(funopen)(wrapped_cookie,
+ readfn ? wrapped_funopen_read : nullptr,
+ writefn ? wrapped_funopen_write : nullptr,
+ seekfn ? wrapped_funopen_seek : nullptr,
+ closefn ? wrapped_funopen_close : nullptr);
+ if (res)
+ unpoison_file(res);
+ return res;
+}
+#define INIT_FUNOPEN COMMON_INTERCEPT_FUNCTION(funopen)
+#else
+#define INIT_FUNOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FUNOPEN2
+typedef SSIZE_T (*funopen2_readfn)(void *cookie, void *buf, SIZE_T len);
+typedef SSIZE_T (*funopen2_writefn)(void *cookie, const void *buf, SIZE_T len);
+typedef OFF_T (*funopen2_seekfn)(void *cookie, OFF_T offset, int whence);
+typedef int (*funopen2_flushfn)(void *cookie);
+typedef int (*funopen2_closefn)(void *cookie);
+
+struct WrappedFunopen2Cookie {
+ void *real_cookie;
+ funopen2_readfn real_read;
+ funopen2_writefn real_write;
+ funopen2_seekfn real_seek;
+ funopen2_flushfn real_flush;
+ funopen2_closefn real_close;
+};
+
+static SSIZE_T wrapped_funopen2_read(void *cookie, void *buf, SIZE_T len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_readfn real_read = wrapped_cookie->real_read;
+ return real_read(wrapped_cookie->real_cookie, buf, len);
+}
+
+static SSIZE_T wrapped_funopen2_write(void *cookie, const void *buf,
+ SIZE_T len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_writefn real_write = wrapped_cookie->real_write;
+ return real_write(wrapped_cookie->real_cookie, buf, len);
+}
+
+static OFF_T wrapped_funopen2_seek(void *cookie, OFF_T offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_seekfn real_seek = wrapped_cookie->real_seek;
+ return real_seek(wrapped_cookie->real_cookie, offset, whence);
+}
+
+static int wrapped_funopen2_flush(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_flushfn real_flush = wrapped_cookie->real_flush;
+ return real_flush(wrapped_cookie->real_cookie);
+}
+
+static int wrapped_funopen2_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_closefn real_close = wrapped_cookie->real_close;
+ int res = real_close(wrapped_cookie->real_cookie);
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, funopen2, void *cookie, funopen2_readfn readfn,
+ funopen2_writefn writefn, funopen2_seekfn seekfn,
+ funopen2_flushfn flushfn, funopen2_closefn closefn) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, funopen2, cookie, readfn, writefn, seekfn,
+ flushfn, closefn);
+
+ WrappedFunopen2Cookie *wrapped_cookie =
+ (WrappedFunopen2Cookie *)InternalAlloc(sizeof(WrappedFunopen2Cookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_read = readfn;
+ wrapped_cookie->real_write = writefn;
+ wrapped_cookie->real_seek = seekfn;
+ wrapped_cookie->real_flush = flushfn;
+ wrapped_cookie->real_close = closefn;
+
+ __sanitizer_FILE *res =
+ REAL(funopen2)(wrapped_cookie,
+ readfn ? wrapped_funopen2_read : nullptr,
+ writefn ? wrapped_funopen2_write : nullptr,
+ seekfn ? wrapped_funopen2_seek : nullptr,
+ flushfn ? wrapped_funopen2_flush : nullptr,
+ closefn ? wrapped_funopen2_close : nullptr);
+ if (res)
+ unpoison_file(res);
+ return res;
+}
+#define INIT_FUNOPEN2 COMMON_INTERCEPT_FUNCTION(funopen2)
+#else
+#define INIT_FUNOPEN2
+#endif
+
+#if SANITIZER_INTERCEPT_FDEVNAME
+INTERCEPTOR(char *, fdevname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdevname, fd);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ char *name = REAL(fdevname)(fd);
+ if (name) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (fd > 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return name;
+}
+
+INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdevname_r, fd, buf, len);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ char *name = REAL(fdevname_r)(fd, buf, len);
+ if (name && buf && len > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (fd > 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return name;
+}
+
+#define INIT_FDEVNAME \
+ COMMON_INTERCEPT_FUNCTION(fdevname); \
+ COMMON_INTERCEPT_FUNCTION(fdevname_r);
+#else
+#define INIT_FDEVNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETUSERSHELL
+INTERCEPTOR(char *, getusershell) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
+ char *res = REAL(getusershell)();
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+#define INIT_GETUSERSHELL COMMON_INTERCEPT_FUNCTION(getusershell);
+#else
+#define INIT_GETUSERSHELL
+#endif
+
+#if SANITIZER_INTERCEPT_SL_INIT
+INTERCEPTOR(void *, sl_init) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_init);
+ void *res = REAL(sl_init)();
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_StringList_sz);
+ return res;
+}
+
+INTERCEPTOR(int, sl_add, void *sl, char *item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_add, sl, item);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ if (item)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ int res = REAL(sl_add)(sl, item);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ return res;
+}
+
+INTERCEPTOR(char *, sl_find, void *sl, const char *item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_find, sl, item);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ if (item)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ char *res = REAL(sl_find)(sl, item);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+INTERCEPTOR(void, sl_free, void *sl, int freeall) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_free, sl, freeall);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ REAL(sl_free)(sl, freeall);
+}
+
+#define INIT_SL_INIT \
+ COMMON_INTERCEPT_FUNCTION(sl_init); \
+ COMMON_INTERCEPT_FUNCTION(sl_add); \
+ COMMON_INTERCEPT_FUNCTION(sl_find); \
+ COMMON_INTERCEPT_FUNCTION(sl_free);
+#else
+#define INIT_SL_INIT
+#endif
+
+#if SANITIZER_INTERCEPT_GETRANDOM
+INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getrandom, buf, buflen, flags);
+ SSIZE_T n = REAL(getrandom)(buf, buflen, flags);
+ if (n > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, n);
+ }
+ return n;
+}
+#define INIT_GETRANDOM COMMON_INTERCEPT_FUNCTION(getrandom)
+#else
+#define INIT_GETRANDOM
+#endif
+
+#if SANITIZER_INTERCEPT_CRYPT
+INTERCEPTOR(char *, crypt, char *key, char *salt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, crypt, key, salt);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
+ char *res = REAL(crypt)(key, salt);
+ if (res != nullptr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
+ return res;
+}
+#define INIT_CRYPT COMMON_INTERCEPT_FUNCTION(crypt);
+#else
+#define INIT_CRYPT
+#endif
+
+#if SANITIZER_INTERCEPT_CRYPT_R
+INTERCEPTOR(char *, crypt_r, char *key, char *salt, void *data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, crypt_r, key, salt, data);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
+ char *res = REAL(crypt_r)(key, salt, data);
+ if (res != nullptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data,
+ __sanitizer::struct_crypt_data_sz);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
+ }
+ return res;
+}
+#define INIT_CRYPT_R COMMON_INTERCEPT_FUNCTION(crypt_r);
+#else
+#define INIT_CRYPT_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETENTROPY
+INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getentropy, buf, buflen);
+ int r = REAL(getentropy)(buf, buflen);
+ if (r == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ return r;
+}
+#define INIT_GETENTROPY COMMON_INTERCEPT_FUNCTION(getentropy)
+#else
+#define INIT_GETENTROPY
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT
+// Glibc qsort uses a temporary buffer allocated either on stack or on heap.
+// Poisoned memory from there may get copied into the comparator arguments,
+// where it needs to be dealt with. But even that is not enough - the results of
+// the sort may be copied into the input/output array based on the results of
+// the comparator calls, but directly from the temp memory, bypassing the
+// unpoisoning done in wrapped_qsort_compar. We deal with this by, again,
+// unpoisoning the entire array after the sort is done.
+//
+// We can not check that the entire array is initialized at the beginning. IMHO,
+// it's fine for parts of the sorted objects to contain uninitialized memory,
+// ex. as padding in structs.
+typedef int (*qsort_compar_f)(const void *, const void *);
+static THREADLOCAL qsort_compar_f qsort_compar;
+static THREADLOCAL SIZE_T qsort_size;
+int wrapped_qsort_compar(const void *a, const void *b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);
+ return qsort_compar(a, b);
+}
+
+INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ compar(p, q);
+ }
+ }
+ qsort_compar_f old_compar = qsort_compar;
+ qsort_compar = compar;
+ SIZE_T old_size = qsort_size;
+ qsort_size = size;
+ REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);
+ qsort_compar = old_compar;
+ qsort_size = old_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#else
+#define INIT_QSORT
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT_R
+typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
+static THREADLOCAL qsort_r_compar_f qsort_r_compar;
+static THREADLOCAL SIZE_T qsort_r_size;
+int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_r_size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_r_size);
+ return qsort_r_compar(a, b, arg);
+}
+
+INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ compar(p, q, arg);
+ }
+ }
+ qsort_r_compar_f old_compar = qsort_r_compar;
+ qsort_r_compar = compar;
+ SIZE_T old_size = qsort_r_size;
+ qsort_r_size = size;
+ REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
+ qsort_r_compar = old_compar;
+ qsort_r_size = old_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#else
+#define INIT_QSORT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SIGALTSTACK
+INTERCEPTOR(int, sigaltstack, void *ss, void *oss) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigaltstack, ss, oss);
+ int r = REAL(sigaltstack)(ss, oss);
+ if (r == 0 && oss != nullptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oss, struct_stack_t_sz);
+ }
+ return r;
+}
+#define INIT_SIGALTSTACK COMMON_INTERCEPT_FUNCTION(sigaltstack)
+#else
+#define INIT_SIGALTSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_UNAME
+INTERCEPTOR(int, uname, struct utsname *utsname) {
+#if SANITIZER_LINUX
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_uname(utsname);
+#endif
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, uname, utsname);
+ int res = REAL(uname)(utsname);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname,
+ __sanitizer::struct_utsname_sz);
+ return res;
+}
+#define INIT_UNAME COMMON_INTERCEPT_FUNCTION(uname)
+#else
+#define INIT_UNAME
+#endif
+
+#if SANITIZER_INTERCEPT___XUNAME
+// FreeBSD's <sys/utsname.h> define uname() as
+// static __inline int uname(struct utsname *name) {
+// return __xuname(SYS_NMLN, (void*)name);
+// }
+INTERCEPTOR(int, __xuname, int size, void *utsname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xuname, size, utsname);
+ int res = REAL(__xuname)(size, utsname);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, utsname,
+ __sanitizer::struct_utsname_sz);
+ return res;
+}
+#define INIT___XUNAME COMMON_INTERCEPT_FUNCTION(__xuname)
+#else
+#define INIT___XUNAME
+#endif
+
+#include "sanitizer_common_interceptors_netbsd_compat.inc"
+
+static void InitializeCommonInterceptors() {
+#if SI_POSIX
+ static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
+ interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();
+#endif
+
+ INIT_MMAP;
+ INIT_MMAP64;
+ INIT_TEXTDOMAIN;
+ INIT_STRLEN;
+ INIT_STRNLEN;
+ INIT_STRNDUP;
+ INIT___STRNDUP;
+ INIT_STRCMP;
+ INIT_STRNCMP;
+ INIT_STRCASECMP;
+ INIT_STRNCASECMP;
+ INIT_STRSTR;
+ INIT_STRCASESTR;
+ INIT_STRCHR;
+ INIT_STRCHRNUL;
+ INIT_STRRCHR;
+ INIT_STRSPN;
+ INIT_STRTOK;
+ INIT_STRPBRK;
+ INIT_STRXFRM;
+ INIT___STRXFRM_L;
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
+ INIT_MEMCHR;
+ INIT_MEMCMP;
+ INIT_BCMP;
+ INIT_MEMRCHR;
+ INIT_MEMMEM;
+ INIT_READ;
+ INIT_FREAD;
+ INIT_PREAD;
+ INIT_PREAD64;
+ INIT_READV;
+ INIT_PREADV;
+ INIT_PREADV64;
+ INIT_WRITE;
+ INIT_FWRITE;
+ INIT_PWRITE;
+ INIT_PWRITE64;
+ INIT_WRITEV;
+ INIT_PWRITEV;
+ INIT_PWRITEV64;
+ INIT_FGETS;
+ INIT_FPUTS;
+ INIT_PUTS;
+ INIT_PRCTL;
+ INIT_LOCALTIME_AND_FRIENDS;
+ INIT_STRPTIME;
+ INIT_SCANF;
+ INIT_ISOC99_SCANF;
+ INIT_PRINTF;
+ INIT_PRINTF_L;
+ INIT_ISOC99_PRINTF;
+ INIT_FREXP;
+ INIT_FREXPF_FREXPL;
+ INIT_GETPWNAM_AND_FRIENDS;
+ INIT_GETPWNAM_R_AND_FRIENDS;
+ INIT_GETPWENT;
+ INIT_FGETPWENT;
+ INIT_GETPWENT_R;
+ INIT_FGETPWENT_R;
+ INIT_FGETGRENT_R;
+ INIT_SETPWENT;
+ INIT_CLOCK_GETTIME;
+ INIT_CLOCK_GETCPUCLOCKID;
+ INIT_GETITIMER;
+ INIT_TIME;
+ INIT_GLOB;
+ INIT_GLOB64;
+ INIT_WAIT;
+ INIT_WAIT4;
+ INIT_INET;
+ INIT_PTHREAD_GETSCHEDPARAM;
+ INIT_GETADDRINFO;
+ INIT_GETNAMEINFO;
+ INIT_GETSOCKNAME;
+ INIT_GETHOSTBYNAME;
+ INIT_GETHOSTBYNAME2;
+ INIT_GETHOSTBYNAME_R;
+ INIT_GETHOSTBYNAME2_R;
+ INIT_GETHOSTBYADDR_R;
+ INIT_GETHOSTENT_R;
+ INIT_GETSOCKOPT;
+ INIT_ACCEPT;
+ INIT_ACCEPT4;
+ INIT_PACCEPT;
+ INIT_MODF;
+ INIT_RECVMSG;
+ INIT_SENDMSG;
+ INIT_RECVMMSG;
+ INIT_SENDMMSG;
+ INIT_SYSMSG;
+ INIT_GETPEERNAME;
+ INIT_IOCTL;
+ INIT_INET_ATON;
+ INIT_SYSINFO;
+ INIT_READDIR;
+ INIT_READDIR64;
+ INIT_PTRACE;
+ INIT_SETLOCALE;
+ INIT_GETCWD;
+ INIT_GET_CURRENT_DIR_NAME;
+ INIT_STRTOIMAX;
+ INIT_MBSTOWCS;
+ INIT_MBSNRTOWCS;
+ INIT_WCSTOMBS;
+ INIT_WCSNRTOMBS;
+ INIT_WCRTOMB;
+ INIT_WCTOMB;
+ INIT_TCGETATTR;
+ INIT_REALPATH;
+ INIT_CANONICALIZE_FILE_NAME;
+ INIT_CONFSTR;
+ INIT_SCHED_GETAFFINITY;
+ INIT_SCHED_GETPARAM;
+ INIT_STRERROR;
+ INIT_STRERROR_R;
+ INIT_XPG_STRERROR_R;
+ INIT_SCANDIR;
+ INIT_SCANDIR64;
+ INIT_GETGROUPS;
+ INIT_POLL;
+ INIT_PPOLL;
+ INIT_WORDEXP;
+ INIT_SIGWAIT;
+ INIT_SIGWAITINFO;
+ INIT_SIGTIMEDWAIT;
+ INIT_SIGSETOPS;
+ INIT_SIGPENDING;
+ INIT_SIGPROCMASK;
+ INIT_PTHREAD_SIGMASK;
+ INIT_BACKTRACE;
+ INIT__EXIT;
+ INIT_PTHREAD_MUTEX_LOCK;
+ INIT_PTHREAD_MUTEX_UNLOCK;
+ INIT___PTHREAD_MUTEX_LOCK;
+ INIT___PTHREAD_MUTEX_UNLOCK;
+ INIT___LIBC_MUTEX_LOCK;
+ INIT___LIBC_MUTEX_UNLOCK;
+ INIT___LIBC_THR_SETCANCELSTATE;
+ INIT_GETMNTENT;
+ INIT_GETMNTENT_R;
+ INIT_STATFS;
+ INIT_STATFS64;
+ INIT_STATVFS;
+ INIT_STATVFS64;
+ INIT_INITGROUPS;
+ INIT_ETHER_NTOA_ATON;
+ INIT_ETHER_HOST;
+ INIT_ETHER_R;
+ INIT_SHMCTL;
+ INIT_RANDOM_R;
+ INIT_PTHREAD_ATTR_GET;
+ INIT_PTHREAD_ATTR_GET_SCHED;
+ INIT_PTHREAD_ATTR_GETINHERITSCHED;
+ INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_MUTEXATTR_GETPSHARED;
+ INIT_PTHREAD_MUTEXATTR_GETTYPE;
+ INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
+ INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;
+ INIT_PTHREAD_RWLOCKATTR_GETPSHARED;
+ INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;
+ INIT_PTHREAD_CONDATTR_GETPSHARED;
+ INIT_PTHREAD_CONDATTR_GETCLOCK;
+ INIT_PTHREAD_BARRIERATTR_GETPSHARED;
+ INIT_TMPNAM;
+ INIT_TMPNAM_R;
+ INIT_TTYNAME;
+ INIT_TTYNAME_R;
+ INIT_TEMPNAM;
+ INIT_PTHREAD_SETNAME_NP;
+ INIT_PTHREAD_GETNAME_NP;
+ INIT_SINCOS;
+ INIT_REMQUO;
+ INIT_REMQUOL;
+ INIT_LGAMMA;
+ INIT_LGAMMAL;
+ INIT_LGAMMA_R;
+ INIT_LGAMMAL_R;
+ INIT_DRAND48_R;
+ INIT_RAND_R;
+ INIT_GETLINE;
+ INIT_ICONV;
+ INIT_TIMES;
+ INIT_TLS_GET_ADDR;
+ INIT_LISTXATTR;
+ INIT_GETXATTR;
+ INIT_GETRESID;
+ INIT_GETIFADDRS;
+ INIT_IF_INDEXTONAME;
+ INIT_CAPGET;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_BZERO;
+ INIT_FTIME;
+ INIT_XDR;
+ INIT_TSEARCH;
+ INIT_LIBIO_INTERNALS;
+ INIT_FOPEN;
+ INIT_FOPEN64;
+ INIT_OPEN_MEMSTREAM;
+ INIT_OBSTACK;
+ INIT_FFLUSH;
+ INIT_FCLOSE;
+ INIT_DLOPEN_DLCLOSE;
+ INIT_GETPASS;
+ INIT_TIMERFD;
+ INIT_MLOCKX;
+ INIT_FOPENCOOKIE;
+ INIT_SEM;
+ INIT_PTHREAD_SETCANCEL;
+ INIT_MINCORE;
+ INIT_PROCESS_VM_READV;
+ INIT_CTERMID;
+ INIT_CTERMID_R;
+ INIT_RECV_RECVFROM;
+ INIT_SEND_SENDTO;
+ INIT_STAT;
+ INIT_EVENTFD_READ_WRITE;
+ INIT_LSTAT;
+ INIT___XSTAT;
+ INIT___XSTAT64;
+ INIT___LXSTAT;
+ INIT___LXSTAT64;
+ // FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
+ INIT_GETLOADAVG;
+ INIT_WCSLEN;
+ INIT_WCSCAT;
+ INIT_WCSDUP;
+ INIT_WCSXFRM;
+ INIT___WCSXFRM_L;
+ INIT_ACCT;
+ INIT_USER_FROM_UID;
+ INIT_UID_FROM_USER;
+ INIT_GROUP_FROM_GID;
+ INIT_GID_FROM_GROUP;
+ INIT_ACCESS;
+ INIT_FACCESSAT;
+ INIT_GETGROUPLIST;
+ INIT_GETGROUPMEMBERSHIP;
+ INIT_READLINK;
+ INIT_READLINKAT;
+ INIT_NAME_TO_HANDLE_AT;
+ INIT_OPEN_BY_HANDLE_AT;
+ INIT_STRLCPY;
+ INIT_DEVNAME;
+ INIT_DEVNAME_R;
+ INIT_FGETLN;
+ INIT_STRMODE;
+ INIT_TTYENT;
+ INIT_PROTOENT;
+ INIT_PROTOENT_R;
+ INIT_NETENT;
+ INIT_GETMNTINFO;
+ INIT_MI_VECTOR_HASH;
+ INIT_SETVBUF;
+ INIT_GETVFSSTAT;
+ INIT_REGEX;
+ INIT_REGEXSUB;
+ INIT_FTS;
+ INIT_SYSCTL;
+ INIT_ASYSCTL;
+ INIT_SYSCTLGETMIBINFO;
+ INIT_NL_LANGINFO;
+ INIT_MODCTL;
+ INIT_STRTONUM;
+ INIT_FPARSELN;
+ INIT_STATVFS1;
+ INIT_STRTOI;
+ INIT_CAPSICUM;
+ INIT_SHA1;
+ INIT_MD4;
+ INIT_RMD160;
+ INIT_MD5;
+ INIT_FSEEK;
+ INIT_MD2;
+ INIT_SHA2;
+ INIT_VIS;
+ INIT_CDB;
+ INIT_GETFSENT;
+ INIT_ARC4RANDOM;
+ INIT_POPEN;
+ INIT_POPENVE;
+ INIT_PCLOSE;
+ INIT_FUNOPEN;
+ INIT_FUNOPEN2;
+ INIT_FDEVNAME;
+ INIT_GETUSERSHELL;
+ INIT_SL_INIT;
+ INIT_GETRANDOM;
+ INIT_CRYPT;
+ INIT_CRYPT_R;
+ INIT_GETENTROPY;
+ INIT_QSORT;
+ INIT_QSORT_R;
+ INIT_SIGALTSTACK;
+ INIT_UNAME;
+ INIT___XUNAME;
+
+ INIT___PRINTF_CHK;
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interceptors_format.inc b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_format.inc
new file mode 100644
index 0000000000..bbbedda8fb
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -0,0 +1,562 @@
+//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf/printf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdarg.h>
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static const char *maybe_parse_param_index(const char *p, int *out) {
+ // n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ CHECK(q);
+ if (*q == '$') {
+ *out = number;
+ p = q + 1;
+ }
+ }
+
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
+ if (char_is_one_of(*p, "jztLq")) {
+ ll[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ ll[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ ll[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ ll[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ ll[1] = 'l';
+ ++p;
+ }
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool format_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool format_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int format_get_char_size(char convSpecifier,
+ const char lengthModifier[2]) {
+ if (char_is_one_of(convSpecifier, "CS")) {
+ return sizeof(wchar_t);
+ }
+
+ if (char_is_one_of(convSpecifier, "cs[")) {
+ if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
+ return sizeof(wchar_t);
+ else if (lengthModifier[0] == '\0')
+ return sizeof(char);
+ }
+
+ return 0;
+}
+
+enum FormatStoreSize {
+ // Store size not known in advance; can be calculated as wcslen() of the
+ // destination buffer.
+ FSS_WCSLEN = -2,
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ FSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ FSS_INVALID = 0
+};
+
+// Returns the memory size of a format directive (if >0), or a value of
+// FormatStoreSize.
+static int format_get_value_size(char convSpecifier,
+ const char lengthModifier[2],
+ bool promote_float) {
+ if (format_is_integer_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'h':
+ return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'q':
+ return sizeof(long long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (format_is_float_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ // Printf promotes floats to doubles but scanf does not
+ return promote_float ? sizeof(double) : sizeof(float);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (convSpecifier == 'p') {
+ if (lengthModifier[0] != 0)
+ return FSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return FSS_INVALID;
+}
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 if not specified ("%n$")
+ int fieldWidth;
+ const char *begin;
+ const char *end;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->argIdx);
+ CHECK(p);
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ CHECK(p);
+ if (dir->fieldWidth <= 0) // Width if at all must be non-zero
+ return nullptr;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return nullptr; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return nullptr;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int scanf_get_value_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return FSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return FSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
+ bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (dir->fieldWidth == 0) {
+ if (!needsTerminator)
+ return charSize;
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return (dir->fieldWidth + needsTerminator) * charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ while (*p) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
+ SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (n_inputs < 0)
+ break;
+ if (size == FSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ } else if (size == FSS_WCSLEN) {
+ // FIXME: actually use wcslen() to calculate it.
+ size = 0;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+struct PrintfDirective {
+ int fieldWidth;
+ int fieldPrecision;
+ int argIdx; // width argument index, or -1 if not specified ("%*n$")
+ int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
+ const char *begin;
+ const char *end;
+ bool starredWidth;
+ bool starredPrecision;
+ char lengthModifier[2];
+ char convSpecifier;
+};
+
+static const char *maybe_parse_number(const char *p, int *out) {
+ if (*p >= '0' && *p <= '9')
+ p = parse_number(p, out);
+ return p;
+}
+
+static const char *maybe_parse_number_or_star(const char *p, int *out,
+ bool *star) {
+ if (*p == '*') {
+ *star = true;
+ ++p;
+ } else {
+ *star = false;
+ p = maybe_parse_number(p, out);
+ }
+ return p;
+}
+
+// Parse printf format string. Same as scanf_parse_next.
+static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+ dir->precisionIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ // Flags
+ while (char_is_one_of(*p, "'-+ #0")) {
+ ++p;
+ }
+ // Field width
+ p = maybe_parse_number_or_star(p, &dir->fieldWidth,
+ &dir->starredWidth);
+ if (!p)
+ return nullptr;
+ // Precision
+ if (*p == '.') {
+ ++p;
+ // Actual precision is optional (surprise!)
+ p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
+ &dir->starredPrecision);
+ if (!p)
+ return nullptr;
+ // m$
+ if (dir->starredPrecision) {
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ }
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int printf_get_value_size(PrintfDirective *dir) {
+ if (char_is_one_of(dir->convSpecifier, "cCsS")) {
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (char_is_one_of(dir->convSpecifier, "sS")) {
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
+}
+
+#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
+ do { \
+ if (format_is_float_conv(convSpecifier)) { \
+ switch (size) { \
+ case 8: \
+ va_arg(*aq, double); \
+ break; \
+ case 12: \
+ va_arg(*aq, long double); \
+ break; \
+ case 16: \
+ va_arg(*aq, long double); \
+ break; \
+ default: \
+ Report("WARNING: unexpected floating-point arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } else { \
+ switch (size) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ va_arg(*aq, u32); \
+ break; \
+ case 8: \
+ va_arg(*aq, u64); \
+ break; \
+ default: \
+ Report("WARNING: unexpected arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+// Common part of *printf interceptors.
+// Process format string and va_list, and report all load ranges.
+static void printf_common(void *ctx, const char *format, va_list aq) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ const char *p = format;
+
+ while (*p) {
+ PrintfDirective dir;
+ p = printf_parse_next(p, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1 || dir.precisionIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.starredWidth) {
+ // Dynamic width
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ if (dir.starredPrecision) {
+ // Dynamic precision
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
+ int size = printf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ static int ReportedOnce;
+ if (!ReportedOnce++)
+ Report(
+ "%s: WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s (reported once per process)\n",
+ SanitizerToolName, dir.end - dir.begin, dir.begin);
+ break;
+ }
+ if (dir.convSpecifier == 'n') {
+ void *argp = va_arg(aq, void *);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ continue;
+ } else if (size == FSS_STRLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ if (dir.starredPrecision) {
+ // FIXME: properly support starred precision for strings.
+ size = 0;
+ } else if (dir.fieldPrecision > 0) {
+ // Won't read more than "precision" symbols.
+ size = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (size < dir.fieldPrecision) size++;
+ } else {
+ // Whole string will be accessed.
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else if (size == FSS_WCSLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ // FIXME: Properly support wide-character strings (via wcsrtombs).
+ size = 0;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else {
+ // Skip non-pointer args
+ SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
+ }
+ }
+}
+
+#endif // SANITIZER_INTERCEPT_PRINTF
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
new file mode 100644
index 0000000000..490a04b218
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -0,0 +1,609 @@
+//===-- sanitizer_common_interceptors_ioctl.inc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Ioctl handling in common sanitizer interceptors.
+//===----------------------------------------------------------------------===//
+
+#if !SANITIZER_NETBSD
+
+#include "sanitizer_flags.h"
+
+struct ioctl_desc {
+ unsigned req;
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
+ enum {
+ NONE,
+ READ,
+ WRITE,
+ READWRITE,
+ CUSTOM
+ } type : 3;
+ unsigned size : 29;
+ const char* name;
+};
+
+const unsigned ioctl_table_max = 500;
+static ioctl_desc ioctl_table[ioctl_table_max];
+static unsigned ioctl_table_size = 0;
+
+// This can not be declared as a global, because references to struct_*_sz
+// require a global initializer. And this table must be available before global
+// initializers are run.
+static void ioctl_table_fill() {
+#define _(rq, tp, sz) \
+ if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
+ CHECK(ioctl_table_size < ioctl_table_max); \
+ ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
+ ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
+ ioctl_table[ioctl_table_size].size = sz; \
+ ioctl_table[ioctl_table_size].name = #rq; \
+ ++ioctl_table_size; \
+ }
+
+ _(FIOASYNC, READ, sizeof(int));
+ _(FIOCLEX, NONE, 0);
+ _(FIOGETOWN, WRITE, sizeof(int));
+ _(FIONBIO, READ, sizeof(int));
+ _(FIONCLEX, NONE, 0);
+ _(FIOSETOWN, READ, sizeof(int));
+ _(SIOCATMARK, WRITE, sizeof(int));
+ _(SIOCGIFCONF, CUSTOM, 0);
+ _(SIOCGPGRP, WRITE, sizeof(int));
+ _(SIOCSPGRP, READ, sizeof(int));
+#if !SANITIZER_SOLARIS
+ _(TIOCCONS, NONE, 0);
+#endif
+ _(TIOCEXCL, NONE, 0);
+ _(TIOCGETD, WRITE, sizeof(int));
+ _(TIOCGPGRP, WRITE, pid_t_sz);
+ _(TIOCGWINSZ, WRITE, struct_winsize_sz);
+ _(TIOCMBIC, READ, sizeof(int));
+ _(TIOCMBIS, READ, sizeof(int));
+ _(TIOCMGET, WRITE, sizeof(int));
+ _(TIOCMSET, READ, sizeof(int));
+ _(TIOCNOTTY, NONE, 0);
+ _(TIOCNXCL, NONE, 0);
+ _(TIOCOUTQ, WRITE, sizeof(int));
+ _(TIOCPKT, READ, sizeof(int));
+ _(TIOCSCTTY, NONE, 0);
+ _(TIOCSETD, READ, sizeof(int));
+ _(TIOCSPGRP, READ, pid_t_sz);
+ _(TIOCSTI, READ, sizeof(char));
+ _(TIOCSWINSZ, READ, struct_winsize_sz);
+
+#if !SANITIZER_IOS
+ _(SIOCADDMULTI, READ, struct_ifreq_sz);
+ _(SIOCDELMULTI, READ, struct_ifreq_sz);
+ _(SIOCGIFADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMTU, WRITE, struct_ifreq_sz);
+ _(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
+ _(SIOCSIFADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
+ _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
+ _(SIOCSIFMTU, READ, struct_ifreq_sz);
+ _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
+#endif
+
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+ _(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
+ _(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
+#endif
+
+#if SANITIZER_LINUX
+ // Conflicting request ids.
+ // _(CDROMAUDIOBUFSIZ, NONE, 0);
+ // _(SNDCTL_TMR_CONTINUE, NONE, 0);
+ // _(SNDCTL_TMR_START, NONE, 0);
+ // _(SNDCTL_TMR_STOP, NONE, 0);
+ // _(SOUND_MIXER_READ_LOUD, WRITE, sizeof(int)); // same as ...READ_ENHANCE
+ // _(SOUND_MIXER_READ_MUTE, WRITE, sizeof(int)); // same as ...READ_ENHANCE
+ // _(SOUND_MIXER_WRITE_LOUD, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
+ // _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
+ _(BLKFLSBUF, NONE, 0);
+ _(BLKGETSIZE, WRITE, sizeof(uptr));
+ _(BLKRAGET, WRITE, sizeof(int));
+ _(BLKRASET, NONE, 0);
+ _(BLKROGET, WRITE, sizeof(int));
+ _(BLKROSET, READ, sizeof(int));
+ _(BLKRRPART, NONE, 0);
+ _(CDROMEJECT, NONE, 0);
+ _(CDROMEJECT_SW, NONE, 0);
+ _(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
+ _(CDROMPAUSE, NONE, 0);
+ _(CDROMPLAYMSF, READ, struct_cdrom_msf_sz);
+ _(CDROMPLAYTRKIND, READ, struct_cdrom_ti_sz);
+ _(CDROMREADAUDIO, READ, struct_cdrom_read_audio_sz);
+ _(CDROMREADCOOKED, READ, struct_cdrom_msf_sz);
+ _(CDROMREADMODE1, READ, struct_cdrom_msf_sz);
+ _(CDROMREADMODE2, READ, struct_cdrom_msf_sz);
+ _(CDROMREADRAW, READ, struct_cdrom_msf_sz);
+ _(CDROMREADTOCENTRY, WRITE, struct_cdrom_tocentry_sz);
+ _(CDROMREADTOCHDR, WRITE, struct_cdrom_tochdr_sz);
+ _(CDROMRESET, NONE, 0);
+ _(CDROMRESUME, NONE, 0);
+ _(CDROMSEEK, READ, struct_cdrom_msf_sz);
+ _(CDROMSTART, NONE, 0);
+ _(CDROMSTOP, NONE, 0);
+ _(CDROMSUBCHNL, WRITE, struct_cdrom_subchnl_sz);
+ _(CDROMVOLCTRL, READ, struct_cdrom_volctrl_sz);
+ _(CDROMVOLREAD, WRITE, struct_cdrom_volctrl_sz);
+ _(CDROM_GET_UPC, WRITE, 8);
+ _(EVIOCGABS, WRITE, struct_input_absinfo_sz); // fixup
+ _(EVIOCGBIT, WRITE, struct_input_id_sz); // fixup
+ _(EVIOCGEFFECTS, WRITE, sizeof(int));
+ _(EVIOCGID, WRITE, struct_input_id_sz);
+ _(EVIOCGKEY, WRITE, 0);
+ _(EVIOCGKEYCODE, WRITE, sizeof(int) * 2);
+ _(EVIOCGLED, WRITE, 0);
+ _(EVIOCGNAME, WRITE, 0);
+ _(EVIOCGPHYS, WRITE, 0);
+ _(EVIOCGRAB, READ, sizeof(int));
+ _(EVIOCGREP, WRITE, sizeof(int) * 2);
+ _(EVIOCGSND, WRITE, 0);
+ _(EVIOCGSW, WRITE, 0);
+ _(EVIOCGUNIQ, WRITE, 0);
+ _(EVIOCGVERSION, WRITE, sizeof(int));
+ _(EVIOCRMFF, READ, sizeof(int));
+ _(EVIOCSABS, READ, struct_input_absinfo_sz); // fixup
+ _(EVIOCSFF, READ, struct_ff_effect_sz);
+ _(EVIOCSKEYCODE, READ, sizeof(int) * 2);
+ _(EVIOCSREP, READ, sizeof(int) * 2);
+ _(FDCLRPRM, NONE, 0);
+ _(FDDEFPRM, READ, struct_floppy_struct_sz);
+ _(FDFLUSH, NONE, 0);
+ _(FDFMTBEG, NONE, 0);
+ _(FDFMTEND, NONE, 0);
+ _(FDFMTTRK, READ, struct_format_descr_sz);
+ _(FDGETDRVPRM, WRITE, struct_floppy_drive_params_sz);
+ _(FDGETDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
+ _(FDGETDRVTYP, WRITE, 16);
+ _(FDGETFDCSTAT, WRITE, struct_floppy_fdc_state_sz);
+ _(FDGETMAXERRS, WRITE, struct_floppy_max_errors_sz);
+ _(FDGETPRM, WRITE, struct_floppy_struct_sz);
+ _(FDMSGOFF, NONE, 0);
+ _(FDMSGON, NONE, 0);
+ _(FDPOLLDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
+ _(FDRAWCMD, WRITE, struct_floppy_raw_cmd_sz);
+ _(FDRESET, NONE, 0);
+ _(FDSETDRVPRM, READ, struct_floppy_drive_params_sz);
+ _(FDSETEMSGTRESH, NONE, 0);
+ _(FDSETMAXERRS, READ, struct_floppy_max_errors_sz);
+ _(FDSETPRM, READ, struct_floppy_struct_sz);
+ _(FDTWADDLE, NONE, 0);
+ _(FDWERRORCLR, NONE, 0);
+ _(FDWERRORGET, WRITE, struct_floppy_write_errors_sz);
+ _(HDIO_DRIVE_CMD, WRITE, sizeof(int));
+ _(HDIO_GETGEO, WRITE, struct_hd_geometry_sz);
+ _(HDIO_GET_32BIT, WRITE, sizeof(int));
+ _(HDIO_GET_DMA, WRITE, sizeof(int));
+ _(HDIO_GET_IDENTITY, WRITE, struct_hd_driveid_sz);
+ _(HDIO_GET_KEEPSETTINGS, WRITE, sizeof(int));
+ _(HDIO_GET_MULTCOUNT, WRITE, sizeof(int));
+ _(HDIO_GET_NOWERR, WRITE, sizeof(int));
+ _(HDIO_GET_UNMASKINTR, WRITE, sizeof(int));
+ _(HDIO_SET_32BIT, NONE, 0);
+ _(HDIO_SET_DMA, NONE, 0);
+ _(HDIO_SET_KEEPSETTINGS, NONE, 0);
+ _(HDIO_SET_MULTCOUNT, NONE, 0);
+ _(HDIO_SET_NOWERR, NONE, 0);
+ _(HDIO_SET_UNMASKINTR, NONE, 0);
+ _(MTIOCGET, WRITE, struct_mtget_sz);
+ _(MTIOCPOS, WRITE, struct_mtpos_sz);
+ _(MTIOCTOP, READ, struct_mtop_sz);
+ _(PPPIOCGASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCGDEBUG, WRITE, sizeof(int));
+ _(PPPIOCGFLAGS, WRITE, sizeof(int));
+ _(PPPIOCGUNIT, WRITE, sizeof(int));
+ _(PPPIOCGXASYNCMAP, WRITE, sizeof(int) * 8);
+ _(PPPIOCSASYNCMAP, READ, sizeof(int));
+ _(PPPIOCSDEBUG, READ, sizeof(int));
+ _(PPPIOCSFLAGS, READ, sizeof(int));
+ _(PPPIOCSMAXCID, READ, sizeof(int));
+ _(PPPIOCSMRU, READ, sizeof(int));
+ _(PPPIOCSXASYNCMAP, READ, sizeof(int) * 8);
+ _(SIOCADDRT, READ, struct_rtentry_sz);
+ _(SIOCDARP, READ, struct_arpreq_sz);
+ _(SIOCDELRT, READ, struct_rtentry_sz);
+ _(SIOCDRARP, READ, struct_arpreq_sz);
+ _(SIOCGARP, WRITE, struct_arpreq_sz);
+ _(SIOCGIFENCAP, WRITE, sizeof(int));
+ _(SIOCGIFHWADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMAP, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMEM, WRITE, struct_ifreq_sz);
+ _(SIOCGIFNAME, NONE, 0);
+ _(SIOCGIFSLAVE, NONE, 0);
+ _(SIOCGRARP, WRITE, struct_arpreq_sz);
+ _(SIOCGSTAMP, WRITE, timeval_sz);
+ _(SIOCSARP, READ, struct_arpreq_sz);
+ _(SIOCSIFENCAP, READ, sizeof(int));
+ _(SIOCSIFHWADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFLINK, NONE, 0);
+ _(SIOCSIFMAP, READ, struct_ifreq_sz);
+ _(SIOCSIFMEM, READ, struct_ifreq_sz);
+ _(SIOCSIFSLAVE, NONE, 0);
+ _(SIOCSRARP, READ, struct_arpreq_sz);
+ _(SNDCTL_COPR_HALT, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_LOAD, READ, struct_copr_buffer_sz);
+ _(SNDCTL_COPR_RCODE, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_RCVMSG, WRITE, struct_copr_msg_sz);
+ _(SNDCTL_COPR_RDATA, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_RESET, NONE, 0);
+ _(SNDCTL_COPR_RUN, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_SENDMSG, READ, struct_copr_msg_sz);
+ _(SNDCTL_COPR_WCODE, READ, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_WDATA, READ, struct_copr_debug_buf_sz);
+ _(SNDCTL_DSP_GETBLKSIZE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_NONBLOCK, NONE, 0);
+ _(SNDCTL_DSP_POST, NONE, 0);
+ _(SNDCTL_DSP_RESET, NONE, 0);
+ _(SNDCTL_DSP_SETFMT, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFRAGMENT, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SPEED, WRITE, sizeof(int));
+ _(SNDCTL_DSP_STEREO, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SUBDIVIDE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SYNC, NONE, 0);
+ _(SNDCTL_FM_4OP_ENABLE, READ, sizeof(int));
+ _(SNDCTL_FM_LOAD_INSTR, READ, struct_sbi_instrument_sz);
+ _(SNDCTL_MIDI_INFO, WRITE, struct_midi_info_sz);
+ _(SNDCTL_MIDI_PRETIME, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_CTRLRATE, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_GETINCOUNT, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_GETOUTCOUNT, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_NRMIDIS, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_NRSYNTHS, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_OUTOFBAND, READ, struct_seq_event_rec_sz);
+ _(SNDCTL_SEQ_PANIC, NONE, 0);
+ _(SNDCTL_SEQ_PERCMODE, NONE, 0);
+ _(SNDCTL_SEQ_RESET, NONE, 0);
+ _(SNDCTL_SEQ_RESETSAMPLES, READ, sizeof(int));
+ _(SNDCTL_SEQ_SYNC, NONE, 0);
+ _(SNDCTL_SEQ_TESTMIDI, READ, sizeof(int));
+ _(SNDCTL_SEQ_THRESHOLD, READ, sizeof(int));
+ _(SNDCTL_SYNTH_INFO, WRITE, struct_synth_info_sz);
+ _(SNDCTL_SYNTH_MEMAVL, WRITE, sizeof(int));
+ _(SNDCTL_TMR_METRONOME, READ, sizeof(int));
+ _(SNDCTL_TMR_SELECT, WRITE, sizeof(int));
+ _(SNDCTL_TMR_SOURCE, WRITE, sizeof(int));
+ _(SNDCTL_TMR_TEMPO, WRITE, sizeof(int));
+ _(SNDCTL_TMR_TIMEBASE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_ALTPCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_BASS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_CAPS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_CD, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_DEVMASK, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_ENHANCE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_IGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_IMIX, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE1, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE2, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE3, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_MIC, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_OGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_PCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECLEV, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECMASK, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECSRC, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_SPEAKER, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_STEREODEVS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_SYNTH, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_TREBLE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_VOLUME, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_ALTPCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_BASS, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_CD, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_ENHANCE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_IGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_IMIX, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE1, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE2, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE3, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_MIC, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_OGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_PCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_RECLEV, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_RECSRC, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_SPEAKER, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_SYNTH, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_TREBLE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_VOLUME, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
+ _(TCFLSH, NONE, 0);
+ _(TCGETA, WRITE, struct_termio_sz);
+ _(TCGETS, WRITE, struct_termios_sz);
+ _(TCSBRK, NONE, 0);
+ _(TCSBRKP, NONE, 0);
+ _(TCSETA, READ, struct_termio_sz);
+ _(TCSETAF, READ, struct_termio_sz);
+ _(TCSETAW, READ, struct_termio_sz);
+ _(TCSETS, READ, struct_termios_sz);
+ _(TCSETSF, READ, struct_termios_sz);
+ _(TCSETSW, READ, struct_termios_sz);
+ _(TCXONC, NONE, 0);
+ _(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);
+ _(TIOCGSOFTCAR, WRITE, sizeof(int));
+ _(TIOCINQ, WRITE, sizeof(int));
+ _(TIOCLINUX, READ, sizeof(char));
+ _(TIOCSERCONFIG, NONE, 0);
+ _(TIOCSERGETLSR, WRITE, sizeof(int));
+ _(TIOCSERGWILD, WRITE, sizeof(int));
+ _(TIOCSERSWILD, READ, sizeof(int));
+ _(TIOCSLCKTRMIOS, READ, struct_termios_sz);
+ _(TIOCSSOFTCAR, READ, sizeof(int));
+ _(VT_ACTIVATE, NONE, 0);
+ _(VT_DISALLOCATE, NONE, 0);
+ _(VT_GETMODE, WRITE, struct_vt_mode_sz);
+ _(VT_GETSTATE, WRITE, struct_vt_stat_sz);
+ _(VT_OPENQRY, WRITE, sizeof(int));
+ _(VT_RELDISP, NONE, 0);
+ _(VT_RESIZE, READ, struct_vt_sizes_sz);
+ _(VT_RESIZEX, READ, struct_vt_consize_sz);
+ _(VT_SENDSIG, NONE, 0);
+ _(VT_SETMODE, READ, struct_vt_mode_sz);
+ _(VT_WAITACTIVE, NONE, 0);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
+ _(CYGETDEFTHRESH, WRITE, sizeof(int));
+ _(CYGETDEFTIMEOUT, WRITE, sizeof(int));
+ _(CYGETMON, WRITE, struct_cyclades_monitor_sz);
+ _(CYGETTHRESH, WRITE, sizeof(int));
+ _(CYGETTIMEOUT, WRITE, sizeof(int));
+ _(CYSETDEFTHRESH, NONE, 0);
+ _(CYSETDEFTIMEOUT, NONE, 0);
+ _(CYSETTHRESH, NONE, 0);
+ _(CYSETTIMEOUT, NONE, 0);
+ _(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
+ _(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
+ _(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
+ _(EQL_GETSLAVECFG, WRITE, struct_ifreq_sz);
+ _(EQL_SETMASTRCFG, WRITE, struct_ifreq_sz);
+ _(EQL_SETSLAVECFG, WRITE, struct_ifreq_sz);
+ _(EVIOCGKEYCODE_V2, WRITE, struct_input_keymap_entry_sz);
+ _(EVIOCGPROP, WRITE, 0);
+ _(EVIOCSKEYCODE_V2, READ, struct_input_keymap_entry_sz);
+ _(FS_IOC_GETFLAGS, WRITE, sizeof(int));
+ _(FS_IOC_GETVERSION, WRITE, sizeof(int));
+ _(FS_IOC_SETFLAGS, READ, sizeof(int));
+ _(FS_IOC_SETVERSION, READ, sizeof(int));
+ _(GIO_CMAP, WRITE, 48);
+ _(GIO_FONT, WRITE, 8192);
+ _(GIO_SCRNMAP, WRITE, e_tabsz);
+ _(GIO_UNIMAP, WRITE, struct_unimapdesc_sz);
+ _(GIO_UNISCRNMAP, WRITE, sizeof(short) * e_tabsz);
+ _(KDADDIO, NONE, 0);
+ _(KDDELIO, NONE, 0);
+ _(KDDISABIO, NONE, 0);
+ _(KDENABIO, NONE, 0);
+ _(KDGETKEYCODE, WRITE, struct_kbkeycode_sz);
+ _(KDGETLED, WRITE, 1);
+ _(KDGETMODE, WRITE, sizeof(int));
+ _(KDGKBDIACR, WRITE, struct_kbdiacrs_sz);
+ _(KDGKBENT, WRITE, struct_kbentry_sz);
+ _(KDGKBLED, WRITE, sizeof(int));
+ _(KDGKBMETA, WRITE, sizeof(int));
+ _(KDGKBMODE, WRITE, sizeof(int));
+ _(KDGKBSENT, WRITE, struct_kbsentry_sz);
+ _(KDGKBTYPE, WRITE, 1);
+ _(KDMAPDISP, NONE, 0);
+ _(KDMKTONE, NONE, 0);
+ _(KDSETKEYCODE, READ, struct_kbkeycode_sz);
+ _(KDSETLED, NONE, 0);
+ _(KDSETMODE, NONE, 0);
+ _(KDSIGACCEPT, NONE, 0);
+ _(KDSKBDIACR, READ, struct_kbdiacrs_sz);
+ _(KDSKBENT, READ, struct_kbentry_sz);
+ _(KDSKBLED, NONE, 0);
+ _(KDSKBMETA, NONE, 0);
+ _(KDSKBMODE, NONE, 0);
+ _(KDSKBSENT, READ, struct_kbsentry_sz);
+ _(KDUNMAPDISP, NONE, 0);
+ _(KIOCSOUND, NONE, 0);
+ _(LPABORT, NONE, 0);
+ _(LPABORTOPEN, NONE, 0);
+ _(LPCAREFUL, NONE, 0);
+ _(LPCHAR, NONE, 0);
+ _(LPGETIRQ, WRITE, sizeof(int));
+ _(LPGETSTATUS, WRITE, sizeof(int));
+ _(LPRESET, NONE, 0);
+ _(LPSETIRQ, NONE, 0);
+ _(LPTIME, NONE, 0);
+ _(LPWAIT, NONE, 0);
+ _(MTIOCGETCONFIG, WRITE, struct_mtconfiginfo_sz);
+ _(MTIOCSETCONFIG, READ, struct_mtconfiginfo_sz);
+ _(PIO_CMAP, NONE, 0);
+ _(PIO_FONT, READ, 8192);
+ _(PIO_SCRNMAP, READ, e_tabsz);
+ _(PIO_UNIMAP, READ, struct_unimapdesc_sz);
+ _(PIO_UNIMAPCLR, READ, struct_unimapinit_sz);
+ _(PIO_UNISCRNMAP, READ, sizeof(short) * e_tabsz);
+ _(SCSI_IOCTL_PROBE_HOST, READ, sizeof(int));
+ _(SCSI_IOCTL_TAGGED_DISABLE, NONE, 0);
+ _(SCSI_IOCTL_TAGGED_ENABLE, NONE, 0);
+ _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
+ _(TIOCGSERIAL, WRITE, struct_serial_struct_sz);
+ _(TIOCSERGETMULTI, WRITE, struct_serial_multiport_struct_sz);
+ _(TIOCSERSETMULTI, READ, struct_serial_multiport_struct_sz);
+ _(TIOCSSERIAL, READ, struct_serial_struct_sz);
+
+ // The following ioctl requests are shared between AX25, IPX, netrom and
+ // mrouted.
+ // _(SIOCAIPXITFCRT, READ, sizeof(char));
+ // _(SIOCAX25GETUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCNRGETPARMS, WRITE, struct_nr_parms_struct_sz);
+ // _(SIOCAIPXPRISLT, READ, sizeof(char));
+ // _(SIOCNRSETPARMS, READ, struct_nr_parms_struct_sz);
+ // _(SIOCAX25ADDUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCNRDECOBS, NONE, 0);
+ // _(SIOCAX25DELUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCIPXCFGDATA, WRITE, struct_ipx_config_data_sz);
+ // _(SIOCAX25NOUID, READ, sizeof(int));
+ // _(SIOCNRRTCTL, READ, sizeof(int));
+ // _(SIOCAX25DIGCTL, READ, sizeof(int));
+ // _(SIOCAX25GETPARMS, WRITE, struct_ax25_parms_struct_sz);
+ // _(SIOCAX25SETPARMS, READ, struct_ax25_parms_struct_sz);
+#endif
+#undef _
+}
+
+static bool ioctl_initialized = false;
+
+struct ioctl_desc_compare {
+ bool operator()(const ioctl_desc& left, const ioctl_desc& right) const {
+ return left.req < right.req;
+ }
+};
+
+static void ioctl_init() {
+ ioctl_table_fill();
+ Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());
+
+ bool bad = false;
+ for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
+ if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
+ Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
+ ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
+ ioctl_table[i + 1].name);
+ bad = true;
+ }
+ }
+
+ if (bad) Die();
+
+ ioctl_initialized = true;
+}
+
+// Handle the most evil ioctls that encode argument value as part of request id.
+static unsigned ioctl_request_fixup(unsigned req) {
+#if SANITIZER_LINUX
+ // Strip size and event number.
+ const unsigned kEviocgbitMask =
+ (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
+ if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
+ return IOCTL_EVIOCGBIT;
+ // Strip absolute axis number.
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
+ return IOCTL_EVIOCGABS;
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
+ return IOCTL_EVIOCSABS;
+#endif
+ return req;
+}
+
+static const ioctl_desc *ioctl_table_lookup(unsigned req) {
+ int left = 0;
+ int right = ioctl_table_size;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ if (ioctl_table[mid].req < req)
+ left = mid + 1;
+ else
+ right = mid;
+ }
+ if (left == right && ioctl_table[left].req == req)
+ return ioctl_table + left;
+ else
+ return nullptr;
+}
+
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF) return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0) return false;
+ return true;
+}
+
+static const ioctl_desc *ioctl_lookup(unsigned req) {
+ req = ioctl_request_fixup(req);
+ const ioctl_desc *desc = ioctl_table_lookup(req);
+ if (desc) return desc;
+
+ // Try stripping access size from the request id.
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
+ // Sanity check: requests that encode access size are either read or write and
+ // have size of 0 in the table.
+ if (desc && desc->size == 0 &&
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
+ return desc;
+ return nullptr;
+}
+
+static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
+ }
+}
+
+static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
+ // FIXME: add verbose output
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
+ }
+}
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
new file mode 100644
index 0000000000..6aa73ec8c6
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
@@ -0,0 +1,128 @@
+//===-- sanitizer_common_interceptors_netbsd_compat.inc ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// Interceptors for NetBSD old function calls that have been versioned.
+//
+// NetBSD minimal version supported 9.0.
+// NetBSD current version supported 9.99.26.
+//
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_NETBSD
+
+// First undef all mangled symbols.
+// Next, define compat interceptors.
+// Finally, undef INIT_ and redefine it.
+// This allows to avoid preprocessor issues.
+
+#undef fstatvfs
+#undef fstatvfs1
+#undef getmntinfo
+#undef getvfsstat
+#undef statvfs
+#undef statvfs1
+
+INTERCEPTOR(int, statvfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ return res;
+}
+
+INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs)(fd, buf);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+
+#undef INIT_STATVFS
+#define INIT_STATVFS \
+ COMMON_INTERCEPT_FUNCTION(statvfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs); \
+ COMMON_INTERCEPT_FUNCTION(__statvfs90); \
+ COMMON_INTERCEPT_FUNCTION(__fstatvfs90)
+
+INTERCEPTOR(int, __getmntinfo13, void **mntbufp, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __getmntinfo13, mntbufp, flags);
+ int cnt = REAL(__getmntinfo13)(mntbufp, flags);
+ if (cnt > 0 && mntbufp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));
+ if (*mntbufp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs90_sz);
+ }
+ return cnt;
+}
+
+#undef INIT_GETMNTINFO
+#define INIT_GETMNTINFO \
+ COMMON_INTERCEPT_FUNCTION(__getmntinfo13); \
+ COMMON_INTERCEPT_FUNCTION(__getmntinfo90)
+
+INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);
+ int ret = REAL(getvfsstat)(buf, bufsize, flags);
+ if (buf && ret > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs90_sz);
+ return ret;
+}
+
+#undef INIT_GETVFSSTAT
+#define INIT_GETVFSSTAT \
+ COMMON_INTERCEPT_FUNCTION(getvfsstat); \
+ COMMON_INTERCEPT_FUNCTION(__getvfsstat90)
+
+INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ int res = REAL(statvfs1)(path, buf, flags);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ return res;
+}
+
+INTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(fstatvfs1)(fd, buf, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+
+#undef INIT_STATVFS1
+#define INIT_STATVFS1 \
+ COMMON_INTERCEPT_FUNCTION(statvfs1); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs1); \
+ COMMON_INTERCEPT_FUNCTION(__statvfs190); \
+ COMMON_INTERCEPT_FUNCTION(__fstatvfs190)
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interface.inc b/lib/tsan/sanitizer_common/sanitizer_common_interface.inc
new file mode 100644
index 0000000000..c78b6e10b6
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interface.inc
@@ -0,0 +1,41 @@
+//===-- sanitizer_common_interface.inc ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
+INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
+// Sanitizer weak hooks
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
+// Stacktrace interface.
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_purge_allocator)
+INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
+INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
+INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_interface_posix.inc b/lib/tsan/sanitizer_common/sanitizer_common_interface_posix.inc
new file mode 100644
index 0000000000..38f9531148
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -0,0 +1,13 @@
+//===-- sanitizer_common_interface_posix.inc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list only available for Posix systems.
+//===----------------------------------------------------------------------===//
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
diff --git a/lib/tsan/sanitizer_common/sanitizer_common_syscalls.inc b/lib/tsan/sanitizer_common/sanitizer_common_syscalls.inc
new file mode 100644
index 0000000000..532ac9ead3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_common_syscalls.inc
@@ -0,0 +1,2914 @@
+//===-- sanitizer_common_syscalls.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common syscalls handlers for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_SYSCALL_PRE_READ_RANGE
+// Called in prehook for regions that will be read by the kernel and
+// must be initialized.
+// COMMON_SYSCALL_PRE_WRITE_RANGE
+// Called in prehook for regions that will be written to by the kernel
+// and must be addressable. The actual write range may be smaller than
+// reported in the prehook. See POST_WRITE_RANGE.
+// COMMON_SYSCALL_POST_READ_RANGE
+// Called in posthook for regions that were read by the kernel. Does
+// not make much sense.
+// COMMON_SYSCALL_POST_WRITE_RANGE
+// Called in posthook for regions that were written to by the kernel
+// and are now initialized.
+// COMMON_SYSCALL_ACQUIRE(addr)
+// Acquire memory visibility from addr.
+// COMMON_SYSCALL_RELEASE(addr)
+// Release memory visibility to addr.
+// COMMON_SYSCALL_FD_CLOSE(fd)
+// Called before closing file descriptor fd.
+// COMMON_SYSCALL_FD_ACQUIRE(fd)
+// Acquire memory visibility from fd.
+// COMMON_SYSCALL_FD_RELEASE(fd)
+// Release memory visibility to fd.
+// COMMON_SYSCALL_PRE_FORK()
+// Called before fork syscall.
+// COMMON_SYSCALL_POST_FORK(long res)
+// Called after fork syscall.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_libc.h"
+
+#define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+
+#define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+
+#ifndef COMMON_SYSCALL_ACQUIRE
+# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_RELEASE
+# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_CLOSE
+# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_ACQUIRE
+# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_RELEASE
+# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_PRE_FORK
+# define COMMON_SYSCALL_PRE_FORK() {}
+#endif
+
+#ifndef COMMON_SYSCALL_POST_FORK
+# define COMMON_SYSCALL_POST_FORK(res) {}
+#endif
+
+// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
+
+extern "C" {
+struct sanitizer_kernel_iovec {
+ void *iov_base;
+ unsigned long iov_len;
+};
+
+struct sanitizer_kernel_msghdr {
+ void *msg_name;
+ int msg_namelen;
+ struct sanitizer_kernel_iovec *msg_iov;
+ unsigned long msg_iovlen;
+ void *msg_control;
+ unsigned long msg_controllen;
+ unsigned msg_flags;
+};
+
+struct sanitizer_kernel_mmsghdr {
+ struct sanitizer_kernel_msghdr msg_hdr;
+ unsigned msg_len;
+};
+
+struct sanitizer_kernel_timespec {
+ long tv_sec;
+ long tv_nsec;
+};
+
+struct sanitizer_kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+struct sanitizer_kernel_rusage {
+ struct sanitizer_kernel_timeval ru_timeval[2];
+ long ru_long[14];
+};
+
+struct sanitizer_kernel_sockaddr {
+ unsigned short sa_family;
+ char sa_data[14];
+};
+
+// Real sigset size is always passed as a syscall argument.
+// Declare it "void" to catch sizeof(kernel_sigset_t).
+typedef void kernel_sigset_t;
+
+static void kernel_write_iovec(const __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ POST_WRITE(iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+// This functions uses POST_READ, because it needs to run after syscall to know
+// the real read range.
+static void kernel_read_iovec(const __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ POST_READ(iovec, sizeof(*iovec) * iovlen);
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ POST_READ(iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+PRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
+ PRE_READ(msg, sizeof(*msg));
+}
+
+POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
+ long flags) {
+ if (res >= 0) {
+ if (msg) {
+ for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
+ POST_WRITE(msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
+ }
+ POST_WRITE(msg->msg_control, msg->msg_controllen);
+ }
+ }
+}
+
+PRE_SYSCALL(recvmmsg)(long fd, sanitizer_kernel_mmsghdr *msg, long vlen,
+ long flags, void *timeout) {
+ PRE_READ(msg, vlen * sizeof(*msg));
+}
+
+POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
+ long vlen, long flags, void *timeout) {
+ if (res >= 0) {
+ if (msg) {
+ for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {
+ POST_WRITE(msg->msg_hdr.msg_iov[i].iov_base,
+ msg->msg_hdr.msg_iov[i].iov_len);
+ }
+ POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);
+ POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));
+ }
+ if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(read)(long fd, void *buf, uptr count) {
+ if (buf) {
+ PRE_WRITE(buf, count);
+ }
+}
+
+POST_SYSCALL(read)(long res, long fd, void *buf, uptr count) {
+ if (res > 0 && buf) {
+ POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(time)(void *tloc) {}
+
+POST_SYSCALL(time)(long res, void *tloc) {
+ if (res >= 0) {
+ if (tloc) POST_WRITE(tloc, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(stime)(void *tptr) {}
+
+POST_SYSCALL(stime)(long res, void *tptr) {
+ if (res >= 0) {
+ if (tptr) POST_WRITE(tptr, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}
+
+POST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {
+ if (res >= 0) {
+ if (tv) POST_WRITE(tv, timeval_sz);
+ if (tz) POST_WRITE(tz, struct_timezone_sz);
+ }
+}
+
+PRE_SYSCALL(settimeofday)(void *tv, void *tz) {}
+
+POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
+ if (res >= 0) {
+ if (tv) POST_WRITE(tv, timeval_sz);
+ if (tz) POST_WRITE(tz, struct_timezone_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(adjtimex)(void *txc_p) {}
+
+POST_SYSCALL(adjtimex)(long res, void *txc_p) {
+ if (res >= 0) {
+ if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(times)(void *tbuf) {}
+
+POST_SYSCALL(times)(long res, void *tbuf) {
+ if (res >= 0) {
+ if (tbuf) POST_WRITE(tbuf, struct_tms_sz);
+ }
+}
+
+PRE_SYSCALL(gettid)() {}
+
+POST_SYSCALL(gettid)(long res) {}
+
+PRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}
+
+POST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {
+ if (res >= 0) {
+ if (rqtp) POST_WRITE(rqtp, struct_timespec_sz);
+ if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(alarm)(long seconds) {}
+
+POST_SYSCALL(alarm)(long res, long seconds) {}
+
+PRE_SYSCALL(getpid)() {}
+
+POST_SYSCALL(getpid)(long res) {}
+
+PRE_SYSCALL(getppid)() {}
+
+POST_SYSCALL(getppid)(long res) {}
+
+PRE_SYSCALL(getuid)() {}
+
+POST_SYSCALL(getuid)(long res) {}
+
+PRE_SYSCALL(geteuid)() {}
+
+POST_SYSCALL(geteuid)(long res) {}
+
+PRE_SYSCALL(getgid)() {}
+
+POST_SYSCALL(getgid)(long res) {}
+
+PRE_SYSCALL(getegid)() {}
+
+POST_SYSCALL(getegid)(long res) {}
+
+PRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}
+
+POST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {
+ if (res >= 0) {
+ if (ruid) POST_WRITE(ruid, sizeof(unsigned));
+ if (euid) POST_WRITE(euid, sizeof(unsigned));
+ if (suid) POST_WRITE(suid, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}
+
+POST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {
+ if (res >= 0) {
+ if (rgid) POST_WRITE(rgid, sizeof(unsigned));
+ if (egid) POST_WRITE(egid, sizeof(unsigned));
+ if (sgid) POST_WRITE(sgid, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getpgid)(long pid) {}
+
+POST_SYSCALL(getpgid)(long res, long pid) {}
+
+PRE_SYSCALL(getpgrp)() {}
+
+POST_SYSCALL(getpgrp)(long res) {}
+
+PRE_SYSCALL(getsid)(long pid) {}
+
+POST_SYSCALL(getsid)(long res, long pid) {}
+
+PRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}
+
+POST_SYSCALL(getgroups)(long res, long gidsetsize,
+ __sanitizer___kernel_gid_t *grouplist) {
+ if (res >= 0) {
+ if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ }
+}
+
+PRE_SYSCALL(setregid)(long rgid, long egid) {}
+
+POST_SYSCALL(setregid)(long res, long rgid, long egid) {}
+
+PRE_SYSCALL(setgid)(long gid) {}
+
+POST_SYSCALL(setgid)(long res, long gid) {}
+
+PRE_SYSCALL(setreuid)(long ruid, long euid) {}
+
+POST_SYSCALL(setreuid)(long res, long ruid, long euid) {}
+
+PRE_SYSCALL(setuid)(long uid) {}
+
+POST_SYSCALL(setuid)(long res, long uid) {}
+
+PRE_SYSCALL(setresuid)(long ruid, long euid, long suid) {}
+
+POST_SYSCALL(setresuid)(long res, long ruid, long euid, long suid) {}
+
+PRE_SYSCALL(setresgid)(long rgid, long egid, long sgid) {}
+
+POST_SYSCALL(setresgid)(long res, long rgid, long egid, long sgid) {}
+
+PRE_SYSCALL(setfsuid)(long uid) {}
+
+POST_SYSCALL(setfsuid)(long res, long uid) {}
+
+PRE_SYSCALL(setfsgid)(long gid) {}
+
+POST_SYSCALL(setfsgid)(long res, long gid) {}
+
+PRE_SYSCALL(setpgid)(long pid, long pgid) {}
+
+POST_SYSCALL(setpgid)(long res, long pid, long pgid) {}
+
+PRE_SYSCALL(setsid)() {}
+
+POST_SYSCALL(setsid)(long res) {}
+
+PRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
+ if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+}
+
+POST_SYSCALL(setgroups)(long res, long gidsetsize,
+ __sanitizer___kernel_gid_t *grouplist) {}
+
+PRE_SYSCALL(acct)(const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(acct)(long res, const void *name) {}
+
+PRE_SYSCALL(capget)(void *header, void *dataptr) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+}
+
+POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
+ if (res >= 0)
+ if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
+}
+
+PRE_SYSCALL(capset)(void *header, const void *data) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+ if (data) PRE_READ(data, __user_cap_data_struct_sz);
+}
+
+POST_SYSCALL(capset)(long res, void *header, const void *data) {}
+
+PRE_SYSCALL(personality)(long personality) {}
+
+POST_SYSCALL(personality)(long res, long personality) {}
+
+PRE_SYSCALL(sigpending)(void *set) {}
+
+POST_SYSCALL(sigpending)(long res, void *set) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, old_sigset_t_sz);
+ }
+}
+
+PRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}
+
+POST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, old_sigset_t_sz);
+ if (oset) POST_WRITE(oset, old_sigset_t_sz);
+ }
+}
+
+PRE_SYSCALL(getitimer)(long which, void *value) {}
+
+POST_SYSCALL(getitimer)(long res, long which, void *value) {
+ if (res >= 0) {
+ if (value) POST_WRITE(value, struct_itimerval_sz);
+ }
+}
+
+PRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}
+
+POST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {
+ if (res >= 0) {
+ if (value) POST_WRITE(value, struct_itimerval_sz);
+ if (ovalue) POST_WRITE(ovalue, struct_itimerval_sz);
+ }
+}
+
+PRE_SYSCALL(timer_create)(long which_clock, void *timer_event_spec,
+ void *created_timer_id) {}
+
+POST_SYSCALL(timer_create)(long res, long which_clock, void *timer_event_spec,
+ void *created_timer_id) {
+ if (res >= 0) {
+ if (timer_event_spec) POST_WRITE(timer_event_spec, struct_sigevent_sz);
+ if (created_timer_id) POST_WRITE(created_timer_id, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}
+
+POST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {
+ if (res >= 0) {
+ if (setting) POST_WRITE(setting, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timer_getoverrun)(long timer_id) {}
+
+POST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}
+
+PRE_SYSCALL(timer_settime)(long timer_id, long flags, const void *new_setting,
+ void *old_setting) {
+ if (new_setting) PRE_READ(new_setting, struct_itimerspec_sz);
+}
+
+POST_SYSCALL(timer_settime)(long res, long timer_id, long flags,
+ const void *new_setting, void *old_setting) {
+ if (res >= 0) {
+ if (old_setting) POST_WRITE(old_setting, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timer_delete)(long timer_id) {}
+
+POST_SYSCALL(timer_delete)(long res, long timer_id) {}
+
+PRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {
+ if (tp) PRE_READ(tp, struct_timespec_sz);
+}
+
+POST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}
+
+PRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}
+
+POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
+ if (res >= 0) {
+ if (tp) POST_WRITE(tp, struct_timespec_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
+
+POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
+ if (res >= 0) {
+ if (tx) POST_WRITE(tx, struct_timex_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
+
+POST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {
+ if (res >= 0) {
+ if (tp) POST_WRITE(tp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(clock_nanosleep)(long which_clock, long flags, const void *rqtp,
+ void *rmtp) {
+ if (rqtp) PRE_READ(rqtp, struct_timespec_sz);
+}
+
+POST_SYSCALL(clock_nanosleep)(long res, long which_clock, long flags,
+ const void *rqtp, void *rmtp) {
+ if (res >= 0) {
+ if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(nice)(long increment) {}
+
+POST_SYSCALL(nice)(long res, long increment) {}
+
+PRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}
+
+POST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {
+ if (res >= 0) {
+ if (param) POST_WRITE(param, struct_sched_param_sz);
+ }
+}
+
+PRE_SYSCALL(sched_setparam)(long pid, void *param) {
+ if (param) PRE_READ(param, struct_sched_param_sz);
+}
+
+POST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}
+
+PRE_SYSCALL(sched_getscheduler)(long pid) {}
+
+POST_SYSCALL(sched_getscheduler)(long res, long pid) {}
+
+PRE_SYSCALL(sched_getparam)(long pid, void *param) {}
+
+POST_SYSCALL(sched_getparam)(long res, long pid, void *param) {
+ if (res >= 0) {
+ if (param) POST_WRITE(param, struct_sched_param_sz);
+ }
+}
+
+PRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {
+ if (user_mask_ptr) PRE_READ(user_mask_ptr, len);
+}
+
+POST_SYSCALL(sched_setaffinity)(long res, long pid, long len,
+ void *user_mask_ptr) {}
+
+PRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}
+
+POST_SYSCALL(sched_getaffinity)(long res, long pid, long len,
+ void *user_mask_ptr) {
+ if (res >= 0) {
+ if (user_mask_ptr) POST_WRITE(user_mask_ptr, len);
+ }
+}
+
+PRE_SYSCALL(sched_yield)() {}
+
+POST_SYSCALL(sched_yield)(long res) {}
+
+PRE_SYSCALL(sched_get_priority_max)(long policy) {}
+
+POST_SYSCALL(sched_get_priority_max)(long res, long policy) {}
+
+PRE_SYSCALL(sched_get_priority_min)(long policy) {}
+
+POST_SYSCALL(sched_get_priority_min)(long res, long policy) {}
+
+PRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}
+
+POST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {
+ if (res >= 0) {
+ if (interval) POST_WRITE(interval, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(setpriority)(long which, long who, long niceval) {}
+
+POST_SYSCALL(setpriority)(long res, long which, long who, long niceval) {}
+
+PRE_SYSCALL(getpriority)(long which, long who) {}
+
+POST_SYSCALL(getpriority)(long res, long which, long who) {}
+
+PRE_SYSCALL(shutdown)(long arg0, long arg1) {}
+
+POST_SYSCALL(shutdown)(long res, long arg0, long arg1) {}
+
+PRE_SYSCALL(reboot)(long magic1, long magic2, long cmd, void *arg) {}
+
+POST_SYSCALL(reboot)(long res, long magic1, long magic2, long cmd, void *arg) {}
+
+PRE_SYSCALL(restart_syscall)() {}
+
+POST_SYSCALL(restart_syscall)(long res) {}
+
+PRE_SYSCALL(kexec_load)(long entry, long nr_segments, void *segments,
+ long flags) {}
+
+POST_SYSCALL(kexec_load)(long res, long entry, long nr_segments, void *segments,
+ long flags) {
+ if (res >= 0) {
+ if (segments) POST_WRITE(segments, struct_kexec_segment_sz);
+ }
+}
+
+PRE_SYSCALL(exit)(long error_code) {}
+
+POST_SYSCALL(exit)(long res, long error_code) {}
+
+PRE_SYSCALL(exit_group)(long error_code) {}
+
+POST_SYSCALL(exit_group)(long res, long error_code) {}
+
+PRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}
+
+POST_SYSCALL(wait4)(long res, long pid, void *stat_addr, long options,
+ void *ru) {
+ if (res >= 0) {
+ if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(waitid)(long which, long pid, void *infop, long options, void *ru) {
+}
+
+POST_SYSCALL(waitid)(long res, long which, long pid, void *infop, long options,
+ void *ru) {
+ if (res >= 0) {
+ if (infop) POST_WRITE(infop, siginfo_t_sz);
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}
+
+POST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {
+ if (res >= 0) {
+ if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(set_tid_address)(void *tidptr) {}
+
+POST_SYSCALL(set_tid_address)(long res, void *tidptr) {
+ if (res >= 0) {
+ if (tidptr) POST_WRITE(tidptr, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(init_module)(void *umod, long len, const void *uargs) {
+ if (uargs)
+ PRE_READ(uargs, __sanitizer::internal_strlen((const char *)uargs) + 1);
+}
+
+POST_SYSCALL(init_module)(long res, void *umod, long len, const void *uargs) {}
+
+PRE_SYSCALL(delete_module)(const void *name_user, long flags) {
+ if (name_user)
+ PRE_READ(name_user,
+ __sanitizer::internal_strlen((const char *)name_user) + 1);
+}
+
+POST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}
+
+PRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}
+
+POST_SYSCALL(rt_sigprocmask)(long res, long how, kernel_sigset_t *set,
+ kernel_sigset_t *oset, long sigsetsize) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, sigsetsize);
+ if (oset) POST_WRITE(oset, sigsetsize);
+ }
+}
+
+PRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}
+
+POST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, sigsetsize);
+ }
+}
+
+PRE_SYSCALL(rt_sigtimedwait)(const kernel_sigset_t *uthese, void *uinfo,
+ const void *uts, long sigsetsize) {
+ if (uthese) PRE_READ(uthese, sigsetsize);
+ if (uts) PRE_READ(uts, struct_timespec_sz);
+}
+
+POST_SYSCALL(rt_sigtimedwait)(long res, const void *uthese, void *uinfo,
+ const void *uts, long sigsetsize) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}
+
+POST_SYSCALL(rt_tgsigqueueinfo)(long res, long tgid, long pid, long sig,
+ void *uinfo) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(kill)(long pid, long sig) {}
+
+POST_SYSCALL(kill)(long res, long pid, long sig) {}
+
+PRE_SYSCALL(tgkill)(long tgid, long pid, long sig) {}
+
+POST_SYSCALL(tgkill)(long res, long tgid, long pid, long sig) {}
+
+PRE_SYSCALL(tkill)(long pid, long sig) {}
+
+POST_SYSCALL(tkill)(long res, long pid, long sig) {}
+
+PRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}
+
+POST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(sgetmask)() {}
+
+POST_SYSCALL(sgetmask)(long res) {}
+
+PRE_SYSCALL(ssetmask)(long newmask) {}
+
+POST_SYSCALL(ssetmask)(long res, long newmask) {}
+
+PRE_SYSCALL(signal)(long sig, long handler) {}
+
+POST_SYSCALL(signal)(long res, long sig, long handler) {}
+
+PRE_SYSCALL(pause)() {}
+
+POST_SYSCALL(pause)(long res) {}
+
+PRE_SYSCALL(sync)() {}
+
+POST_SYSCALL(sync)(long res) {}
+
+PRE_SYSCALL(fsync)(long fd) {}
+
+POST_SYSCALL(fsync)(long res, long fd) {}
+
+PRE_SYSCALL(fdatasync)(long fd) {}
+
+POST_SYSCALL(fdatasync)(long res, long fd) {}
+
+PRE_SYSCALL(bdflush)(long func, long data) {}
+
+POST_SYSCALL(bdflush)(long res, long func, long data) {}
+
+PRE_SYSCALL(mount)(void *dev_name, void *dir_name, void *type, long flags,
+ void *data) {}
+
+POST_SYSCALL(mount)(long res, void *dev_name, void *dir_name, void *type,
+ long flags, void *data) {
+ if (res >= 0) {
+ if (dev_name)
+ POST_WRITE(dev_name,
+ __sanitizer::internal_strlen((const char *)dev_name) + 1);
+ if (dir_name)
+ POST_WRITE(dir_name,
+ __sanitizer::internal_strlen((const char *)dir_name) + 1);
+ if (type)
+ POST_WRITE(type, __sanitizer::internal_strlen((const char *)type) + 1);
+ }
+}
+
+PRE_SYSCALL(umount)(void *name, long flags) {}
+
+POST_SYSCALL(umount)(long res, void *name, long flags) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(oldumount)(void *name) {}
+
+POST_SYSCALL(oldumount)(long res, void *name) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(truncate)(const void *path, long length) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(truncate)(long res, const void *path, long length) {}
+
+PRE_SYSCALL(ftruncate)(long fd, long length) {}
+
+POST_SYSCALL(ftruncate)(long res, long fd, long length) {}
+
+PRE_SYSCALL(stat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(statfs)(const void *path, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs_sz);
+ }
+}
+
+PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ }
+}
+
+PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
+
+POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs_sz);
+ }
+}
+
+PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
+
+POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ }
+}
+#endif // !SANITIZER_ANDROID
+
+PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(fstat)(long fd, void *statbuf) {}
+
+POST_SYSCALL(fstat)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newfstat)(long fd, void *statbuf) {}
+
+POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
+
+POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
+ if (res >= 0) {
+ if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
+ }
+}
+#endif // !SANITIZER_ANDROID
+
+PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(fstat64)(long fd, void *statbuf) {}
+
+POST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(setxattr)(const void *path, const void *name, const void *value,
+ long size, long flags) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(setxattr)(long res, const void *path, const void *name,
+ const void *value, long size, long flags) {}
+
+PRE_SYSCALL(lsetxattr)(const void *path, const void *name, const void *value,
+ long size, long flags) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(lsetxattr)(long res, const void *path, const void *name,
+ const void *value, long size, long flags) {}
+
+PRE_SYSCALL(fsetxattr)(long fd, const void *name, const void *value, long size,
+ long flags) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(fsetxattr)(long res, long fd, const void *name, const void *value,
+ long size, long flags) {}
+
+PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
+ long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
+ void *value, long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
+ long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
+ void *value, long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
+ long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
+
+POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(removexattr)(const void *path, const void *name) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(removexattr)(long res, const void *path, const void *name) {}
+
+PRE_SYSCALL(lremovexattr)(const void *path, const void *name) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(lremovexattr)(long res, const void *path, const void *name) {}
+
+PRE_SYSCALL(fremovexattr)(long fd, const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(fremovexattr)(long res, long fd, const void *name) {}
+
+PRE_SYSCALL(brk)(long brk) {}
+
+POST_SYSCALL(brk)(long res, long brk) {}
+
+PRE_SYSCALL(mprotect)(long start, long len, long prot) {}
+
+POST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}
+
+PRE_SYSCALL(mremap)(long addr, long old_len, long new_len, long flags,
+ long new_addr) {}
+
+POST_SYSCALL(mremap)(long res, long addr, long old_len, long new_len,
+ long flags, long new_addr) {}
+
+PRE_SYSCALL(remap_file_pages)(long start, long size, long prot, long pgoff,
+ long flags) {}
+
+POST_SYSCALL(remap_file_pages)(long res, long start, long size, long prot,
+ long pgoff, long flags) {}
+
+PRE_SYSCALL(msync)(long start, long len, long flags) {}
+
+POST_SYSCALL(msync)(long res, long start, long len, long flags) {}
+
+PRE_SYSCALL(munmap)(long addr, long len) {}
+
+POST_SYSCALL(munmap)(long res, long addr, long len) {}
+
+PRE_SYSCALL(mlock)(long start, long len) {}
+
+POST_SYSCALL(mlock)(long res, long start, long len) {}
+
+PRE_SYSCALL(munlock)(long start, long len) {}
+
+POST_SYSCALL(munlock)(long res, long start, long len) {}
+
+PRE_SYSCALL(mlockall)(long flags) {}
+
+POST_SYSCALL(mlockall)(long res, long flags) {}
+
+PRE_SYSCALL(munlockall)() {}
+
+POST_SYSCALL(munlockall)(long res) {}
+
+PRE_SYSCALL(madvise)(long start, long len, long behavior) {}
+
+POST_SYSCALL(madvise)(long res, long start, long len, long behavior) {}
+
+PRE_SYSCALL(mincore)(long start, long len, void *vec) {}
+
+POST_SYSCALL(mincore)(long res, long start, long len, void *vec) {
+ if (res >= 0) {
+ if (vec) {
+ POST_WRITE(vec, (len + GetPageSizeCached() - 1) / GetPageSizeCached());
+ }
+ }
+}
+
+PRE_SYSCALL(pivot_root)(const void *new_root, const void *put_old) {
+ if (new_root)
+ PRE_READ(new_root,
+ __sanitizer::internal_strlen((const char *)new_root) + 1);
+ if (put_old)
+ PRE_READ(put_old, __sanitizer::internal_strlen((const char *)put_old) + 1);
+}
+
+POST_SYSCALL(pivot_root)(long res, const void *new_root, const void *put_old) {}
+
+PRE_SYSCALL(chroot)(const void *filename) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chroot)(long res, const void *filename) {}
+
+PRE_SYSCALL(mknod)(const void *filename, long mode, long dev) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(mknod)(long res, const void *filename, long mode, long dev) {}
+
+PRE_SYSCALL(link)(const void *oldname, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}
+
+PRE_SYSCALL(symlink)(const void *old, const void *new_) {
+ if (old) PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
+ if (new_)
+ PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);
+}
+
+POST_SYSCALL(symlink)(long res, const void *old, const void *new_) {}
+
+PRE_SYSCALL(unlink)(const void *pathname) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(unlink)(long res, const void *pathname) {}
+
+PRE_SYSCALL(rename)(const void *oldname, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(rename)(long res, const void *oldname, const void *newname) {}
+
+PRE_SYSCALL(chmod)(const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chmod)(long res, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchmod)(long fd, long mode) {}
+
+POST_SYSCALL(fchmod)(long res, long fd, long mode) {}
+
+PRE_SYSCALL(fcntl)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(fcntl)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(fcntl64)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(fcntl64)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(pipe)(void *fildes) {}
+
+POST_SYSCALL(pipe)(long res, void *fildes) {
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
+
+POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(dup)(long fildes) {}
+
+POST_SYSCALL(dup)(long res, long fildes) {}
+
+PRE_SYSCALL(dup2)(long oldfd, long newfd) {}
+
+POST_SYSCALL(dup2)(long res, long oldfd, long newfd) {}
+
+PRE_SYSCALL(dup3)(long oldfd, long newfd, long flags) {}
+
+POST_SYSCALL(dup3)(long res, long oldfd, long newfd, long flags) {}
+
+PRE_SYSCALL(ioperm)(long from, long num, long on) {}
+
+POST_SYSCALL(ioperm)(long res, long from, long num, long on) {}
+
+PRE_SYSCALL(ioctl)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(ioctl)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(flock)(long fd, long cmd) {}
+
+POST_SYSCALL(flock)(long res, long fd, long cmd) {}
+
+PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
+ if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
+}
+
+POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
+ if (res >= 0) {
+ if (ctx) POST_WRITE(ctx, sizeof(*ctx));
+ // (*ctx) is actually a pointer to a kernel mapped page, and there are
+ // people out there who are crazy enough to peek into that page's 32-byte
+ // header.
+ if (*ctx) POST_WRITE(*ctx, 32);
+ }
+}
+
+PRE_SYSCALL(io_destroy)(long ctx) {}
+
+POST_SYSCALL(io_destroy)(long res, long ctx) {}
+
+PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
+ __sanitizer_io_event *ioevpp, void *timeout) {
+ if (timeout) PRE_READ(timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
+ __sanitizer_io_event *ioevpp, void *timeout) {
+ if (res >= 0) {
+ if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
+ if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ }
+ for (long i = 0; i < res; i++) {
+ // We synchronize io_submit -> io_getevents/io_cancel using the
+ // user-provided data context. Data is not necessary a pointer, it can be
+ // an int, 0 or whatever; acquire/release will correctly handle this.
+ // This scheme can lead to false negatives, e.g. when all operations
+ // synchronize on 0. But there does not seem to be a better solution
+ // (except wrapping all operations in own context, which is unreliable).
+ // We can not reliably extract fildes in io_getevents.
+ COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
+ }
+}
+
+PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
+ for (long i = 0; i < nr; ++i) {
+ uptr op = iocbpp[i]->aio_lio_opcode;
+ void *data = (void*)iocbpp[i]->aio_data;
+ void *buf = (void*)iocbpp[i]->aio_buf;
+ uptr len = (uptr)iocbpp[i]->aio_nbytes;
+ if (op == iocb_cmd_pwrite && buf && len) {
+ PRE_READ(buf, len);
+ } else if (op == iocb_cmd_pread && buf && len) {
+ POST_WRITE(buf, len);
+ } else if (op == iocb_cmd_pwritev) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ for (uptr v = 0; v < len; v++)
+ PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
+ } else if (op == iocb_cmd_preadv) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ for (uptr v = 0; v < len; v++)
+ POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
+ }
+ // See comment in io_getevents.
+ COMMON_SYSCALL_RELEASE(data);
+ }
+}
+
+POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
+ __sanitizer_iocb **iocbpp) {}
+
+PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
+ __sanitizer_io_event *result) {
+}
+
+POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
+ __sanitizer_io_event *result) {
+ if (res == 0) {
+ if (result) {
+ // See comment in io_getevents.
+ COMMON_SYSCALL_ACQUIRE((void*)result->data);
+ POST_WRITE(result, sizeof(*result));
+ }
+ if (iocb)
+ POST_WRITE(iocb, sizeof(*iocb));
+ }
+}
+
+PRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}
+
+POST_SYSCALL(sendfile)(long res, long out_fd, long in_fd,
+ __sanitizer___kernel_off_t *offset, long count) {
+ if (res >= 0) {
+ if (offset) POST_WRITE(offset, sizeof(*offset));
+ }
+}
+
+PRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}
+
+POST_SYSCALL(sendfile64)(long res, long out_fd, long in_fd,
+ __sanitizer___kernel_loff_t *offset, long count) {
+ if (res >= 0) {
+ if (offset) POST_WRITE(offset, sizeof(*offset));
+ }
+}
+
+PRE_SYSCALL(readlink)(const void *path, void *buf, long bufsiz) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(readlink)(long res, const void *path, void *buf, long bufsiz) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(creat)(const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(creat)(long res, const void *pathname, long mode) {}
+
+PRE_SYSCALL(open)(const void *filename, long flags, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}
+
+PRE_SYSCALL(close)(long fd) {
+ COMMON_SYSCALL_FD_CLOSE((int)fd);
+}
+
+POST_SYSCALL(close)(long res, long fd) {}
+
+PRE_SYSCALL(access)(const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(access)(long res, const void *filename, long mode) {}
+
+PRE_SYSCALL(vhangup)() {}
+
+POST_SYSCALL(vhangup)(long res) {}
+
+PRE_SYSCALL(chown)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chown)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(lchown)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lchown)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(fchown)(long fd, long user, long group) {}
+
+POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
+
+#if SANITIZER_USES_UID16_SYSCALLS
+PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chown16)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(lchown16)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lchown16)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(fchown16)(long fd, long user, long group) {}
+
+POST_SYSCALL(fchown16)(long res, long fd, long user, long group) {}
+
+PRE_SYSCALL(setregid16)(long rgid, long egid) {}
+
+POST_SYSCALL(setregid16)(long res, long rgid, long egid) {}
+
+PRE_SYSCALL(setgid16)(long gid) {}
+
+POST_SYSCALL(setgid16)(long res, long gid) {}
+
+PRE_SYSCALL(setreuid16)(long ruid, long euid) {}
+
+POST_SYSCALL(setreuid16)(long res, long ruid, long euid) {}
+
+PRE_SYSCALL(setuid16)(long uid) {}
+
+POST_SYSCALL(setuid16)(long res, long uid) {}
+
+PRE_SYSCALL(setresuid16)(long ruid, long euid, long suid) {}
+
+POST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}
+
+PRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}
+
+POST_SYSCALL(getresuid16)(long res, __sanitizer___kernel_old_uid_t *ruid,
+ __sanitizer___kernel_old_uid_t *euid,
+ __sanitizer___kernel_old_uid_t *suid) {
+ if (res >= 0) {
+ if (ruid) POST_WRITE(ruid, sizeof(*ruid));
+ if (euid) POST_WRITE(euid, sizeof(*euid));
+ if (suid) POST_WRITE(suid, sizeof(*suid));
+ }
+}
+
+PRE_SYSCALL(setresgid16)(long rgid, long egid, long sgid) {}
+
+POST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}
+
+PRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}
+
+POST_SYSCALL(getresgid16)(long res, __sanitizer___kernel_old_gid_t *rgid,
+ __sanitizer___kernel_old_gid_t *egid,
+ __sanitizer___kernel_old_gid_t *sgid) {
+ if (res >= 0) {
+ if (rgid) POST_WRITE(rgid, sizeof(*rgid));
+ if (egid) POST_WRITE(egid, sizeof(*egid));
+ if (sgid) POST_WRITE(sgid, sizeof(*sgid));
+ }
+}
+
+PRE_SYSCALL(setfsuid16)(long uid) {}
+
+POST_SYSCALL(setfsuid16)(long res, long uid) {}
+
+PRE_SYSCALL(setfsgid16)(long gid) {}
+
+POST_SYSCALL(setfsgid16)(long res, long gid) {}
+
+PRE_SYSCALL(getgroups16)(long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {}
+
+POST_SYSCALL(getgroups16)(long res, long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {
+ if (res >= 0) {
+ if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ }
+}
+
+PRE_SYSCALL(setgroups16)(long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {
+ if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+}
+
+POST_SYSCALL(setgroups16)(long res, long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {}
+
+PRE_SYSCALL(getuid16)() {}
+
+POST_SYSCALL(getuid16)(long res) {}
+
+PRE_SYSCALL(geteuid16)() {}
+
+POST_SYSCALL(geteuid16)(long res) {}
+
+PRE_SYSCALL(getgid16)() {}
+
+POST_SYSCALL(getgid16)(long res) {}
+
+PRE_SYSCALL(getegid16)() {}
+
+POST_SYSCALL(getegid16)(long res) {}
+#endif // SANITIZER_USES_UID16_SYSCALLS
+
+PRE_SYSCALL(utime)(void *filename, void *times) {}
+
+POST_SYSCALL(utime)(long res, void *filename, void *times) {
+ if (res >= 0) {
+ if (filename)
+ POST_WRITE(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+ if (times) POST_WRITE(times, struct_utimbuf_sz);
+ }
+}
+
+PRE_SYSCALL(utimes)(void *filename, void *utimes) {}
+
+POST_SYSCALL(utimes)(long res, void *filename, void *utimes) {
+ if (res >= 0) {
+ if (filename)
+ POST_WRITE(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+ if (utimes) POST_WRITE(utimes, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(lseek)(long fd, long offset, long origin) {}
+
+POST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}
+
+PRE_SYSCALL(llseek)(long fd, long offset_high, long offset_low, void *result,
+ long origin) {}
+
+POST_SYSCALL(llseek)(long res, long fd, long offset_high, long offset_low,
+ void *result, long origin) {
+ if (res >= 0) {
+ if (result) POST_WRITE(result, sizeof(long long));
+ }
+}
+
+PRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}
+
+POST_SYSCALL(readv)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen) {
+ if (res >= 0) {
+ if (vec) kernel_write_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(write)(long fd, const void *buf, long count) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}
+
+PRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}
+
+POST_SYSCALL(writev)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen) {
+ if (res >= 0) {
+ if (vec) kernel_read_iovec(vec, vlen, res);
+ }
+}
+
+#ifdef _LP64
+PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}
+
+POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
+ long pos) {}
+#else
+PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}
+
+POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos0,
+ long pos1) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos0,
+ long pos1) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
+ long pos0, long pos1) {}
+#endif
+
+PRE_SYSCALL(preadv)(long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {}
+
+POST_SYSCALL(preadv)(long res, long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {
+ if (res >= 0) {
+ if (vec) kernel_write_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(pwritev)(long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {}
+
+POST_SYSCALL(pwritev)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen, long pos_l, long pos_h) {
+ if (res >= 0) {
+ if (vec) kernel_read_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(getcwd)(void *buf, long size) {}
+
+POST_SYSCALL(getcwd)(long res, void *buf, long size) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(mkdir)(const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(mkdir)(long res, const void *pathname, long mode) {}
+
+PRE_SYSCALL(chdir)(const void *filename) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chdir)(long res, const void *filename) {}
+
+PRE_SYSCALL(fchdir)(long fd) {}
+
+POST_SYSCALL(fchdir)(long res, long fd) {}
+
+PRE_SYSCALL(rmdir)(const void *pathname) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(rmdir)(long res, const void *pathname) {}
+
+PRE_SYSCALL(lookup_dcookie)(u64 cookie64, void *buf, long len) {}
+
+POST_SYSCALL(lookup_dcookie)(long res, u64 cookie64, void *buf, long len) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {
+ if (special)
+ PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);
+}
+
+POST_SYSCALL(quotactl)(long res, long cmd, const void *special, long id,
+ void *addr) {}
+
+PRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}
+
+POST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {
+ if (res >= 0) {
+ if (dirent) POST_WRITE(dirent, res);
+ }
+}
+
+PRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}
+
+POST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {
+ if (res >= 0) {
+ if (dirent) POST_WRITE(dirent, res);
+ }
+}
+
+PRE_SYSCALL(setsockopt)(long fd, long level, long optname, void *optval,
+ long optlen) {}
+
+POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
+ void *optval, long optlen) {
+ if (res >= 0) {
+ if (optval)
+ POST_WRITE(optval,
+ __sanitizer::internal_strlen((const char *)optval) + 1);
+ }
+}
+
+PRE_SYSCALL(getsockopt)(long fd, long level, long optname, void *optval,
+ void *optlen) {}
+
+POST_SYSCALL(getsockopt)(long res, long fd, long level, long optname,
+ void *optval, void *optlen) {
+ if (res >= 0) {
+ if (optval)
+ POST_WRITE(optval,
+ __sanitizer::internal_strlen((const char *)optval) + 1);
+ if (optlen) POST_WRITE(optlen, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
+
+POST_SYSCALL(bind)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ long arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ }
+}
+
+PRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
+
+POST_SYSCALL(connect)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ long arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ }
+}
+
+PRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
+
+POST_SYSCALL(accept)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(accept4)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2,
+ long arg3) {}
+
+POST_SYSCALL(accept4)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2, long arg3) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getsockname)(long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {}
+
+POST_SYSCALL(getsockname)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getpeername)(long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {}
+
+POST_SYSCALL(getpeername)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}
+
+POST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {
+ if (res) {
+ if (arg1) POST_READ(arg1, res);
+ }
+}
+
+PRE_SYSCALL(sendto)(long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {}
+
+POST_SYSCALL(sendto)(long res, long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {
+ if (res >= 0) {
+ if (arg1) POST_READ(arg1, res);
+ if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ }
+}
+
+PRE_SYSCALL(sendmsg)(long fd, void *msg, long flags) {}
+
+POST_SYSCALL(sendmsg)(long res, long fd, void *msg, long flags) {
+ // FIXME: POST_READ
+}
+
+PRE_SYSCALL(sendmmsg)(long fd, void *msg, long vlen, long flags) {}
+
+POST_SYSCALL(sendmmsg)(long res, long fd, void *msg, long vlen, long flags) {
+ // FIXME: POST_READ
+}
+
+PRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}
+
+POST_SYSCALL(recv)(long res, void *buf, long len, long flags) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(recvfrom)(long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {}
+
+POST_SYSCALL(recvfrom)(long res, long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ if (arg5) POST_WRITE(arg5, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(socket)(long arg0, long arg1, long arg2) {}
+
+POST_SYSCALL(socket)(long res, long arg0, long arg1, long arg2) {}
+
+PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
+
+POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
+ if (res >= 0)
+ if (sv) POST_WRITE(sv, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(socketcall)(long call, void *args) {}
+
+POST_SYSCALL(socketcall)(long res, long call, void *args) {
+ if (res >= 0) {
+ if (args) POST_WRITE(args, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(listen)(long arg0, long arg1) {}
+
+POST_SYSCALL(listen)(long res, long arg0, long arg1) {}
+
+PRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}
+
+POST_SYSCALL(poll)(long res, __sanitizer_pollfd *ufds, long nfds,
+ long timeout) {
+ if (res >= 0) {
+ if (ufds) POST_WRITE(ufds, nfds * sizeof(*ufds));
+ }
+}
+
+PRE_SYSCALL(select)(long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {}
+
+POST_SYSCALL(select)(long res, long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {
+ if (res >= 0) {
+ if (inp) POST_WRITE(inp, sizeof(*inp));
+ if (outp) POST_WRITE(outp, sizeof(*outp));
+ if (exp) POST_WRITE(exp, sizeof(*exp));
+ if (tvp) POST_WRITE(tvp, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(old_select)(void *arg) {}
+
+POST_SYSCALL(old_select)(long res, void *arg) {}
+
+PRE_SYSCALL(epoll_create)(long size) {}
+
+POST_SYSCALL(epoll_create)(long res, long size) {}
+
+PRE_SYSCALL(epoll_create1)(long flags) {}
+
+POST_SYSCALL(epoll_create1)(long res, long flags) {}
+
+PRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}
+
+POST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {
+ if (res >= 0) {
+ if (event) POST_WRITE(event, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_wait)(long epfd, void *events, long maxevents, long timeout) {
+}
+
+POST_SYSCALL(epoll_wait)(long res, long epfd, void *events, long maxevents,
+ long timeout) {
+ if (res >= 0) {
+ if (events) POST_WRITE(events, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_pwait)(long epfd, void *events, long maxevents, long timeout,
+ const kernel_sigset_t *sigmask, long sigsetsize) {
+ if (sigmask) PRE_READ(sigmask, sigsetsize);
+}
+
+POST_SYSCALL(epoll_pwait)(long res, long epfd, void *events, long maxevents,
+ long timeout, const void *sigmask, long sigsetsize) {
+ if (res >= 0) {
+ if (events) POST_WRITE(events, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(gethostname)(void *name, long len) {}
+
+POST_SYSCALL(gethostname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(sethostname)(void *name, long len) {}
+
+POST_SYSCALL(sethostname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(setdomainname)(void *name, long len) {}
+
+POST_SYSCALL(setdomainname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(newuname)(void *name) {}
+
+POST_SYSCALL(newuname)(long res, void *name) {
+ if (res >= 0) {
+ if (name) POST_WRITE(name, struct_new_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(uname)(void *arg0) {}
+
+POST_SYSCALL(uname)(long res, void *arg0) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, struct_old_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(olduname)(void *arg0) {}
+
+POST_SYSCALL(olduname)(long res, void *arg0) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, struct_oldold_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(getrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+PRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+PRE_SYSCALL(setrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(prlimit64)(long pid, long resource, const void *new_rlim,
+ void *old_rlim) {
+ if (new_rlim) PRE_READ(new_rlim, struct_rlimit64_sz);
+}
+
+POST_SYSCALL(prlimit64)(long res, long pid, long resource, const void *new_rlim,
+ void *old_rlim) {
+ if (res >= 0) {
+ if (old_rlim) POST_WRITE(old_rlim, struct_rlimit64_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(getrusage)(long who, void *ru) {}
+
+POST_SYSCALL(getrusage)(long res, long who, void *ru) {
+ if (res >= 0) {
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(umask)(long mask) {}
+
+POST_SYSCALL(umask)(long res, long mask) {}
+
+PRE_SYSCALL(msgget)(long key, long msgflg) {}
+
+POST_SYSCALL(msgget)(long res, long key, long msgflg) {}
+
+PRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {
+ if (msgp) PRE_READ(msgp, msgsz);
+}
+
+POST_SYSCALL(msgsnd)(long res, long msqid, void *msgp, long msgsz,
+ long msgflg) {}
+
+PRE_SYSCALL(msgrcv)(long msqid, void *msgp, long msgsz, long msgtyp,
+ long msgflg) {}
+
+POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
+ long msgflg) {
+ if (res >= 0) {
+ if (msgp) POST_WRITE(msgp, res);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
+
+POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
+
+POST_SYSCALL(semget)(long res, long key, long nsems, long semflg) {}
+
+PRE_SYSCALL(semop)(long semid, void *sops, long nsops) {}
+
+POST_SYSCALL(semop)(long res, long semid, void *sops, long nsops) {}
+
+PRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}
+
+POST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}
+
+PRE_SYSCALL(semtimedop)(long semid, void *sops, long nsops,
+ const void *timeout) {
+ if (timeout) PRE_READ(timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(semtimedop)(long res, long semid, void *sops, long nsops,
+ const void *timeout) {}
+
+PRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}
+
+POST_SYSCALL(shmat)(long res, long shmid, void *shmaddr, long shmflg) {
+ if (res >= 0) {
+ if (shmaddr)
+ POST_WRITE(shmaddr,
+ __sanitizer::internal_strlen((const char *)shmaddr) + 1);
+ }
+}
+
+PRE_SYSCALL(shmget)(long key, long size, long flag) {}
+
+POST_SYSCALL(shmget)(long res, long key, long size, long flag) {}
+
+PRE_SYSCALL(shmdt)(void *shmaddr) {}
+
+POST_SYSCALL(shmdt)(long res, void *shmaddr) {
+ if (res >= 0) {
+ if (shmaddr)
+ POST_WRITE(shmaddr,
+ __sanitizer::internal_strlen((const char *)shmaddr) + 1);
+ }
+}
+
+PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
+ long fifth) {}
+
+POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
+ void *ptr, long fifth) {}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
+
+POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
+ }
+}
+
+PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(mq_open)(long res, const void *name, long oflag, long mode,
+ void *attr) {
+ if (res >= 0) {
+ if (attr) POST_WRITE(attr, struct_mq_attr_sz);
+ }
+}
+
+PRE_SYSCALL(mq_unlink)(const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(mq_unlink)(long res, const void *name) {}
+
+PRE_SYSCALL(mq_timedsend)(long mqdes, const void *msg_ptr, long msg_len,
+ long msg_prio, const void *abs_timeout) {
+ if (msg_ptr) PRE_READ(msg_ptr, msg_len);
+ if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(mq_timedsend)(long res, long mqdes, const void *msg_ptr,
+ long msg_len, long msg_prio,
+ const void *abs_timeout) {}
+
+PRE_SYSCALL(mq_timedreceive)(long mqdes, void *msg_ptr, long msg_len,
+ void *msg_prio, const void *abs_timeout) {
+ if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(mq_timedreceive)(long res, long mqdes, void *msg_ptr, long msg_len,
+ int *msg_prio, const void *abs_timeout) {
+ if (res >= 0) {
+ if (msg_ptr) POST_WRITE(msg_ptr, res);
+ if (msg_prio) POST_WRITE(msg_prio, sizeof(*msg_prio));
+ }
+}
+
+PRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {
+ if (notification) PRE_READ(notification, struct_sigevent_sz);
+}
+
+POST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}
+
+PRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {
+ if (mqstat) PRE_READ(mqstat, struct_mq_attr_sz);
+}
+
+POST_SYSCALL(mq_getsetattr)(long res, long mqdes, const void *mqstat,
+ void *omqstat) {
+ if (res >= 0) {
+ if (omqstat) POST_WRITE(omqstat, struct_mq_attr_sz);
+ }
+}
+#endif // SANITIZER_ANDROID
+
+PRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}
+
+POST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}
+
+PRE_SYSCALL(pciconfig_read)(long bus, long dfn, long off, long len, void *buf) {
+}
+
+POST_SYSCALL(pciconfig_read)(long res, long bus, long dfn, long off, long len,
+ void *buf) {}
+
+PRE_SYSCALL(pciconfig_write)(long bus, long dfn, long off, long len,
+ void *buf) {}
+
+POST_SYSCALL(pciconfig_write)(long res, long bus, long dfn, long off, long len,
+ void *buf) {}
+
+PRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {
+ if (specialfile)
+ PRE_READ(specialfile,
+ __sanitizer::internal_strlen((const char *)specialfile) + 1);
+}
+
+POST_SYSCALL(swapon)(long res, const void *specialfile, long swap_flags) {}
+
+PRE_SYSCALL(swapoff)(const void *specialfile) {
+ if (specialfile)
+ PRE_READ(specialfile,
+ __sanitizer::internal_strlen((const char *)specialfile) + 1);
+}
+
+POST_SYSCALL(swapoff)(long res, const void *specialfile) {}
+
+PRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {
+ if (args) {
+ if (args->name) PRE_READ(args->name, args->nlen * sizeof(*args->name));
+ if (args->newval) PRE_READ(args->name, args->newlen);
+ }
+}
+
+POST_SYSCALL(sysctl)(long res, __sanitizer___sysctl_args *args) {
+ if (res >= 0) {
+ if (args && args->oldval && args->oldlenp) {
+ POST_WRITE(args->oldlenp, sizeof(*args->oldlenp));
+ POST_WRITE(args->oldval, *args->oldlenp);
+ }
+ }
+}
+
+PRE_SYSCALL(sysinfo)(void *info) {}
+
+POST_SYSCALL(sysinfo)(long res, void *info) {
+ if (res >= 0) {
+ if (info) POST_WRITE(info, struct_sysinfo_sz);
+ }
+}
+
+PRE_SYSCALL(sysfs)(long option, long arg1, long arg2) {}
+
+POST_SYSCALL(sysfs)(long res, long option, long arg1, long arg2) {}
+
+PRE_SYSCALL(syslog)(long type, void *buf, long len) {}
+
+POST_SYSCALL(syslog)(long res, long type, void *buf, long len) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(uselib)(const void *library) {
+ if (library)
+ PRE_READ(library, __sanitizer::internal_strlen((const char *)library) + 1);
+}
+
+POST_SYSCALL(uselib)(long res, const void *library) {}
+
+PRE_SYSCALL(ni_syscall)() {}
+
+POST_SYSCALL(ni_syscall)(long res) {}
+
+PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+ if (data) {
+ if (request == ptrace_setregs) {
+ PRE_READ((void *)data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_setfpregs) {
+ PRE_READ((void *)data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_setfpxregs) {
+ PRE_READ((void *)data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_setsiginfo) {
+ PRE_READ((void *)data, siginfo_t_sz);
+ } else if (request == ptrace_setregset) {
+ __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
+ PRE_READ(iov->iov_base, iov->iov_len);
+ }
+ }
+#endif
+}
+
+POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+ if (res >= 0 && data) {
+ // Note that this is different from the interceptor in
+ // sanitizer_common_interceptors.inc.
+ // PEEK* requests return resulting values through data pointer.
+ if (request == ptrace_getregs) {
+ POST_WRITE((void *)data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_getfpregs) {
+ POST_WRITE((void *)data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_getfpxregs) {
+ POST_WRITE((void *)data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_getsiginfo) {
+ POST_WRITE((void *)data, siginfo_t_sz);
+ } else if (request == ptrace_getregset) {
+ __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
+ POST_WRITE(iov->iov_base, iov->iov_len);
+ } else if (request == ptrace_peekdata || request == ptrace_peektext ||
+ request == ptrace_peekuser) {
+ POST_WRITE((void *)data, sizeof(void *));
+ }
+ }
+#endif
+}
+
+PRE_SYSCALL(add_key)(const void *_type, const void *_description,
+ const void *_payload, long plen, long destringid) {
+ if (_type)
+ PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
+ if (_description)
+ PRE_READ(_description,
+ __sanitizer::internal_strlen((const char *)_description) + 1);
+}
+
+POST_SYSCALL(add_key)(long res, const void *_type, const void *_description,
+ const void *_payload, long plen, long destringid) {}
+
+PRE_SYSCALL(request_key)(const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {
+ if (_type)
+ PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
+ if (_description)
+ PRE_READ(_description,
+ __sanitizer::internal_strlen((const char *)_description) + 1);
+ if (_callout_info)
+ PRE_READ(_callout_info,
+ __sanitizer::internal_strlen((const char *)_callout_info) + 1);
+}
+
+POST_SYSCALL(request_key)(long res, const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {}
+
+PRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}
+
+POST_SYSCALL(keyctl)(long res, long cmd, long arg2, long arg3, long arg4,
+ long arg5) {}
+
+PRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}
+
+POST_SYSCALL(ioprio_set)(long res, long which, long who, long ioprio) {}
+
+PRE_SYSCALL(ioprio_get)(long which, long who) {}
+
+POST_SYSCALL(ioprio_get)(long res, long which, long who) {}
+
+PRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}
+
+POST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {
+ if (res >= 0) {
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(migrate_pages)(long pid, long maxnode, const void *from,
+ const void *to) {
+ if (from) PRE_READ(from, sizeof(long));
+ if (to) PRE_READ(to, sizeof(long));
+}
+
+POST_SYSCALL(migrate_pages)(long res, long pid, long maxnode, const void *from,
+ const void *to) {}
+
+PRE_SYSCALL(move_pages)(long pid, long nr_pages, const void **pages,
+ const int *nodes, int *status, long flags) {
+ if (pages) PRE_READ(pages, nr_pages * sizeof(*pages));
+ if (nodes) PRE_READ(nodes, nr_pages * sizeof(*nodes));
+}
+
+POST_SYSCALL(move_pages)(long res, long pid, long nr_pages, const void **pages,
+ const int *nodes, int *status, long flags) {
+ if (res >= 0) {
+ if (status) POST_WRITE(status, nr_pages * sizeof(*status));
+ }
+}
+
+PRE_SYSCALL(mbind)(long start, long len, long mode, void *nmask, long maxnode,
+ long flags) {}
+
+POST_SYSCALL(mbind)(long res, long start, long len, long mode, void *nmask,
+ long maxnode, long flags) {
+ if (res >= 0) {
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(get_mempolicy)(void *policy, void *nmask, long maxnode, long addr,
+ long flags) {}
+
+POST_SYSCALL(get_mempolicy)(long res, void *policy, void *nmask, long maxnode,
+ long addr, long flags) {
+ if (res >= 0) {
+ if (policy) POST_WRITE(policy, sizeof(int));
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(inotify_init)() {}
+
+POST_SYSCALL(inotify_init)(long res) {}
+
+PRE_SYSCALL(inotify_init1)(long flags) {}
+
+POST_SYSCALL(inotify_init1)(long res, long flags) {}
+
+PRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(inotify_add_watch)(long res, long fd, const void *path,
+ long mask) {}
+
+PRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}
+
+POST_SYSCALL(inotify_rm_watch)(long res, long fd, long wd) {}
+
+PRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}
+
+POST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {
+ if (res >= 0) {
+ if (unpc) POST_WRITE(unpc, sizeof(*unpc));
+ if (ustatus) POST_WRITE(ustatus, sizeof(*ustatus));
+ }
+}
+
+PRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(spu_create)(long res, const void *name, long flags, long mode,
+ long fd) {}
+
+PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(mknodat)(long res, long dfd, const void *filename, long mode,
+ long dev) {}
+
+PRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(mkdirat)(long res, long dfd, const void *pathname, long mode) {}
+
+PRE_SYSCALL(unlinkat)(long dfd, const void *pathname, long flag) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(unlinkat)(long res, long dfd, const void *pathname, long flag) {}
+
+PRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(symlinkat)(long res, const void *oldname, long newdfd,
+ const void *newname) {}
+
+PRE_SYSCALL(linkat)(long olddfd, const void *oldname, long newdfd,
+ const void *newname, long flags) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(linkat)(long res, long olddfd, const void *oldname, long newdfd,
+ const void *newname, long flags) {}
+
+PRE_SYSCALL(renameat)(long olddfd, const void *oldname, long newdfd,
+ const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(renameat)(long res, long olddfd, const void *oldname, long newdfd,
+ const void *newname) {}
+
+PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(futimesat)(long res, long dfd, const void *filename,
+ void *utimes) {
+ if (res >= 0) {
+ if (utimes) POST_WRITE(utimes, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(faccessat)(long dfd, const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(faccessat)(long res, long dfd, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchownat)(long dfd, const void *filename, long user, long group,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fchownat)(long res, long dfd, const void *filename, long user,
+ long group, long flag) {}
+
+PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(openat)(long res, long dfd, const void *filename, long flags,
+ long mode) {}
+
+PRE_SYSCALL(newfstatat)(long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newfstatat)(long res, long dfd, const void *filename,
+ void *statbuf, long flag) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(fstatat64)(long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fstatat64)(long res, long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(readlinkat)(long res, long dfd, const void *path, void *buf,
+ long bufsiz) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(utimensat)(long dfd, const void *filename, void *utimes,
+ long flags) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(utimensat)(long res, long dfd, const void *filename, void *utimes,
+ long flags) {
+ if (res >= 0) {
+ if (utimes) POST_WRITE(utimes, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(unshare)(long unshare_flags) {}
+
+POST_SYSCALL(unshare)(long res, long unshare_flags) {}
+
+PRE_SYSCALL(splice)(long fd_in, void *off_in, long fd_out, void *off_out,
+ long len, long flags) {}
+
+POST_SYSCALL(splice)(long res, long fd_in, void *off_in, long fd_out,
+ void *off_out, long len, long flags) {
+ if (res >= 0) {
+ if (off_in) POST_WRITE(off_in, sizeof(long long));
+ if (off_out) POST_WRITE(off_out, sizeof(long long));
+ }
+}
+
+PRE_SYSCALL(vmsplice)(long fd, const __sanitizer_iovec *iov, long nr_segs,
+ long flags) {}
+
+POST_SYSCALL(vmsplice)(long res, long fd, const __sanitizer_iovec *iov,
+ long nr_segs, long flags) {
+ if (res >= 0) {
+ if (iov) kernel_read_iovec(iov, nr_segs, res);
+ }
+}
+
+PRE_SYSCALL(tee)(long fdin, long fdout, long len, long flags) {}
+
+POST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}
+
+PRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}
+
+POST_SYSCALL(get_robust_list)(long res, long pid, void *head_ptr,
+ void *len_ptr) {}
+
+PRE_SYSCALL(set_robust_list)(void *head, long len) {}
+
+POST_SYSCALL(set_robust_list)(long res, void *head, long len) {}
+
+PRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}
+
+POST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {
+ if (res >= 0) {
+ if (cpu) POST_WRITE(cpu, sizeof(unsigned));
+ if (node) POST_WRITE(node, sizeof(unsigned));
+ // The third argument to this system call is nowadays unused.
+ }
+}
+
+PRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}
+
+POST_SYSCALL(signalfd)(long res, long ufd, kernel_sigset_t *user_mask,
+ long sizemask) {
+ if (res >= 0) {
+ if (user_mask) POST_WRITE(user_mask, sizemask);
+ }
+}
+
+PRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}
+
+POST_SYSCALL(signalfd4)(long res, long ufd, kernel_sigset_t *user_mask,
+ long sizemask, long flags) {
+ if (res >= 0) {
+ if (user_mask) POST_WRITE(user_mask, sizemask);
+ }
+}
+
+PRE_SYSCALL(timerfd_create)(long clockid, long flags) {}
+
+POST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}
+
+PRE_SYSCALL(timerfd_settime)(long ufd, long flags, const void *utmr,
+ void *otmr) {
+ if (utmr) PRE_READ(utmr, struct_itimerspec_sz);
+}
+
+POST_SYSCALL(timerfd_settime)(long res, long ufd, long flags, const void *utmr,
+ void *otmr) {
+ if (res >= 0) {
+ if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}
+
+POST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {
+ if (res >= 0) {
+ if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(eventfd)(long count) {}
+
+POST_SYSCALL(eventfd)(long res, long count) {}
+
+PRE_SYSCALL(eventfd2)(long count, long flags) {}
+
+POST_SYSCALL(eventfd2)(long res, long count, long flags) {}
+
+PRE_SYSCALL(old_readdir)(long arg0, void *arg1, long arg2) {}
+
+POST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {
+ // Missing definition of 'struct old_linux_dirent'.
+}
+
+PRE_SYSCALL(pselect6)(long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2,
+ __sanitizer___kernel_fd_set *arg3, void *arg4,
+ void *arg5) {}
+
+POST_SYSCALL(pselect6)(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2,
+ __sanitizer___kernel_fd_set *arg3, void *arg4,
+ void *arg5) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(*arg2));
+ if (arg3) POST_WRITE(arg3, sizeof(*arg3));
+ if (arg4) POST_WRITE(arg4, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(ppoll)(__sanitizer_pollfd *arg0, long arg1, void *arg2,
+ const kernel_sigset_t *arg3, long arg4) {
+ if (arg3) PRE_READ(arg3, arg4);
+}
+
+POST_SYSCALL(ppoll)(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2,
+ const void *arg3, long arg4) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, sizeof(*arg0));
+ if (arg2) POST_WRITE(arg2, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(syncfs)(long fd) {}
+
+POST_SYSCALL(syncfs)(long res, long fd) {}
+
+PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
+ long cpu, long group_fd, long flags) {
+ if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
+}
+
+POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
+ long pid, long cpu, long group_fd, long flags) {}
+
+PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
+ long pgoff) {}
+
+POST_SYSCALL(mmap_pgoff)(long res, long addr, long len, long prot, long flags,
+ long fd, long pgoff) {}
+
+PRE_SYSCALL(old_mmap)(void *arg) {}
+
+POST_SYSCALL(old_mmap)(long res, void *arg) {}
+
+PRE_SYSCALL(name_to_handle_at)(long dfd, const void *name, void *handle,
+ void *mnt_id, long flag) {}
+
+POST_SYSCALL(name_to_handle_at)(long res, long dfd, const void *name,
+ void *handle, void *mnt_id, long flag) {}
+
+PRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}
+
+POST_SYSCALL(open_by_handle_at)(long res, long mountdirfd, void *handle,
+ long flags) {}
+
+PRE_SYSCALL(setns)(long fd, long nstype) {}
+
+POST_SYSCALL(setns)(long res, long fd, long nstype) {}
+
+PRE_SYSCALL(process_vm_readv)(long pid, const __sanitizer_iovec *lvec,
+ long liovcnt, const void *rvec, long riovcnt,
+ long flags) {}
+
+POST_SYSCALL(process_vm_readv)(long res, long pid,
+ const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
+ if (res >= 0) {
+ if (lvec) kernel_write_iovec(lvec, liovcnt, res);
+ }
+}
+
+PRE_SYSCALL(process_vm_writev)(long pid, const __sanitizer_iovec *lvec,
+ long liovcnt, const void *rvec, long riovcnt,
+ long flags) {}
+
+POST_SYSCALL(process_vm_writev)(long res, long pid,
+ const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
+ if (res >= 0) {
+ if (lvec) kernel_read_iovec(lvec, liovcnt, res);
+ }
+}
+
+PRE_SYSCALL(fork)() {
+ COMMON_SYSCALL_PRE_FORK();
+}
+
+POST_SYSCALL(fork)(long res) {
+ COMMON_SYSCALL_POST_FORK(res);
+}
+
+PRE_SYSCALL(vfork)() {
+ COMMON_SYSCALL_PRE_FORK();
+}
+
+POST_SYSCALL(vfork)(long res) {
+ COMMON_SYSCALL_POST_FORK(res);
+}
+
+PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sizeof(act->sa_mask));
+ }
+}
+
+POST_SYSCALL(sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+}
+
+PRE_SYSCALL(rt_sigaction)(long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sz);
+ }
+}
+
+POST_SYSCALL(rt_sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (res >= 0 && oldact) {
+ SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
+ POST_WRITE(oldact, oldact_sz);
+ }
+}
+
+PRE_SYSCALL(getrandom)(void *buf, uptr count, long flags) {
+ if (buf) {
+ PRE_WRITE(buf, count);
+ }
+}
+
+POST_SYSCALL(getrandom)(long res, void *buf, uptr count, long flags) {
+ if (res > 0 && buf) {
+ POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(sigaltstack)(const void *ss, void *oss) {
+ if (ss != nullptr) {
+ PRE_READ(ss, struct_stack_t_sz);
+ }
+ if (oss != nullptr) {
+ PRE_WRITE(oss, struct_stack_t_sz);
+ }
+}
+
+POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
+ if (res == 0) {
+ if (oss != nullptr) {
+ POST_WRITE(oss, struct_stack_t_sz);
+ }
+ }
+}
+} // extern "C"
+
+#undef PRE_SYSCALL
+#undef PRE_READ
+#undef PRE_WRITE
+#undef POST_SYSCALL
+#undef POST_READ
+#undef POST_WRITE
+
+#endif // SANITIZER_LINUX
diff --git a/lib/tsan/sanitizer_common/sanitizer_coverage_interface.inc b/lib/tsan/sanitizer_common/sanitizer_coverage_interface.inc
new file mode 100644
index 0000000000..d7ab0c3d98
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_coverage_interface.inc
@@ -0,0 +1,33 @@
+//===-- sanitizer_coverage_interface.inc ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_reset)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
+INTERFACE_WEAK_FUNCTION(__sancov_default_options)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init)
diff --git a/lib/tsan/sanitizer_common/sanitizer_dbghelp.h b/lib/tsan/sanitizer_common/sanitizer_dbghelp.h
new file mode 100644
index 0000000000..00a5399800
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_dbghelp.h
@@ -0,0 +1,41 @@
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_deadlock_detector.h b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector.h
new file mode 100644
index 0000000000..b80cff460e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector.h
@@ -0,0 +1,410 @@
+//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// The deadlock detector maintains a directed graph of lock acquisitions.
+// When a lock event happens, the detector checks if the locks already held by
+// the current thread are reachable from the newly acquired lock.
+//
+// The detector can handle only a fixed amount of simultaneously live locks
+// (a lock is alive if it has been locked at least once and has not been
+// destroyed). When the maximal number of locks is reached the entire graph
+// is flushed and the new lock epoch is started. The node ids from the old
+// epochs can not be used with any of the detector methods except for
+// nodeBelongsToCurrentEpoch().
+//
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_H
+#define SANITIZER_DEADLOCK_DETECTOR_H
+
+#include "sanitizer_bvgraph.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Thread-local state for DeadlockDetector.
+// It contains the locks currently held by the owning thread.
+template <class BV>
+class DeadlockDetectorTLS {
+ public:
+ // No CTOR.
+ void clear() {
+ bv_.clear();
+ epoch_ = 0;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ bool empty() const { return bv_.empty(); }
+
+ void ensureCurrentEpoch(uptr current_epoch) {
+ if (epoch_ == current_epoch) return;
+ bv_.clear();
+ epoch_ = current_epoch;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ uptr getEpoch() const { return epoch_; }
+
+ // Returns true if this is the first (non-recursive) acquisition of this lock.
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
+ CHECK_EQ(epoch_, current_epoch);
+ if (!bv_.setBit(lock_id)) {
+ // The lock is already held by this thread, it must be recursive.
+ CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
+ recursive_locks[n_recursive_locks++] = lock_id;
+ return false;
+ }
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ // lock_id < BV::kSize, can cast to a smaller int.
+ u32 lock_id_short = static_cast<u32>(lock_id);
+ LockWithContext l = {lock_id_short, stk};
+ all_locks_with_contexts_[n_all_locks_++] = l;
+ return true;
+ }
+
+ void removeLock(uptr lock_id) {
+ if (n_recursive_locks) {
+ for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
+ if (recursive_locks[i] == lock_id) {
+ n_recursive_locks--;
+ Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
+ return;
+ }
+ }
+ }
+ if (!bv_.clearBit(lock_id))
+ return; // probably addLock happened before flush
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
+ }
+
+ const BV &getLocks(uptr current_epoch) const {
+ CHECK_EQ(epoch_, current_epoch);
+ return bv_;
+ }
+
+ uptr getNumLocks() const { return n_all_locks_; }
+ uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
+
+ private:
+ BV bv_;
+ uptr epoch_;
+ uptr recursive_locks[64];
+ uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
+};
+
+// DeadlockDetector.
+// For deadlock detection to work we need one global DeadlockDetector object
+// and one DeadlockDetectorTLS object per evey thread.
+// This class is not thread safe, all concurrent accesses should be guarded
+// by an external lock.
+// Most of the methods of this class are not thread-safe (i.e. should
+// be protected by an external lock) unless explicitly told otherwise.
+template <class BV>
+class DeadlockDetector {
+ public:
+ typedef BV BitVector;
+
+ uptr size() const { return g_.size(); }
+
+ // No CTOR.
+ void clear() {
+ current_epoch_ = 0;
+ available_nodes_.clear();
+ recycled_nodes_.clear();
+ g_.clear();
+ n_edges_ = 0;
+ }
+
+ // Allocate new deadlock detector node.
+ // If we are out of available nodes first try to recycle some.
+ // If there is nothing to recycle, flush the graph and increment the epoch.
+ // Associate 'data' (opaque user's object) with the new node.
+ uptr newNode(uptr data) {
+ if (!available_nodes_.empty())
+ return getAvailableNode(data);
+ if (!recycled_nodes_.empty()) {
+ for (sptr i = n_edges_ - 1; i >= 0; i--) {
+ if (recycled_nodes_.getBit(edges_[i].from) ||
+ recycled_nodes_.getBit(edges_[i].to)) {
+ Swap(edges_[i], edges_[n_edges_ - 1]);
+ n_edges_--;
+ }
+ }
+ CHECK(available_nodes_.empty());
+ // removeEdgesFrom was called in removeNode.
+ g_.removeEdgesTo(recycled_nodes_);
+ available_nodes_.setUnion(recycled_nodes_);
+ recycled_nodes_.clear();
+ return getAvailableNode(data);
+ }
+ // We are out of vacant nodes. Flush and increment the current_epoch_.
+ current_epoch_ += size();
+ recycled_nodes_.clear();
+ available_nodes_.setAll();
+ g_.clear();
+ n_edges_ = 0;
+ return getAvailableNode(data);
+ }
+
+ // Get data associated with the node created by newNode().
+ uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
+
+ bool nodeBelongsToCurrentEpoch(uptr node) {
+ return node && (node / size() * size()) == current_epoch_;
+ }
+
+ void removeNode(uptr node) {
+ uptr idx = nodeToIndex(node);
+ CHECK(!available_nodes_.getBit(idx));
+ CHECK(recycled_nodes_.setBit(idx));
+ g_.removeEdgesFrom(idx);
+ }
+
+ void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
+ dtls->ensureCurrentEpoch(current_epoch_);
+ }
+
+ // Returns true if there is a cycle in the graph after this lock event.
+ // Ideally should be called before the lock is acquired so that we can
+ // report a deadlock before a real deadlock happens.
+ bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
+ }
+
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
+ // Add cur_node to the set of locks held currently by dtls.
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ }
+
+ // Experimental *racy* fast path function.
+ // Returns true if all edges from the currently held locks to cur_node exist.
+ bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ uptr local_epoch = dtls->getEpoch();
+ // Read from current_epoch_ is racy.
+ if (cur_node && local_epoch == current_epoch_ &&
+ local_epoch == nodeToEpoch(cur_node)) {
+ uptr cur_idx = nodeToIndexUnchecked(cur_node);
+ for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
+ if (!g_.hasEdge(dtls->getLock(i), cur_idx))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Adds edges from currently held locks to cur_node,
+ // returns the number of added edges, and puts the sources of added edges
+ // into added_edges[].
+ // Should be called before onLockAfter.
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
+ int unique_tid) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ uptr added_edges[40];
+ uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
+ added_edges, ARRAY_SIZE(added_edges));
+ for (uptr i = 0; i < n_added_edges; i++) {
+ if (n_edges_ < ARRAY_SIZE(edges_)) {
+ Edge e = {(u16)added_edges[i], (u16)cur_idx,
+ dtls->findLockContext(added_edges[i]), stk,
+ unique_tid};
+ edges_[n_edges_++] = e;
+ }
+ }
+ return n_added_edges;
+ }
+
+ bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
+ int *unique_tid) {
+ uptr from_idx = nodeToIndex(from_node);
+ uptr to_idx = nodeToIndex(to_node);
+ for (uptr i = 0; i < n_edges_; i++) {
+ if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
+ *stk_from = edges_[i].stk_from;
+ *stk_to = edges_[i].stk_to;
+ *unique_tid = edges_[i].unique_tid;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Test-only function. Handles the before/after lock events,
+ // returns true if there is a cycle.
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
+ addEdges(dtls, cur_node, stk, 0);
+ onLockAfter(dtls, cur_node, stk);
+ return is_reachable;
+ }
+
+ // Handles the try_lock event, returns false.
+ // When a try_lock event happens (i.e. a try_lock call succeeds) we need
+ // to add this lock to the currently held locks, but we should not try to
+ // change the lock graph or to detect a cycle. We may want to investigate
+ // whether a more aggressive strategy is possible for try_lock.
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ return false;
+ }
+
+ // Returns true iff dtls is empty (no locks are currently held) and we can
+ // add the node to the currently held locks w/o chanding the global state.
+ // This operation is thread-safe as it only touches the dtls.
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (!dtls->empty()) return false;
+ if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ // Finds a path between the lock 'cur_node' (currently not held in dtls)
+ // and some currently held lock, returns the length of the path
+ // or 0 on failure.
+ uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
+ uptr path_size) {
+ tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
+ uptr idx = nodeToIndex(cur_node);
+ CHECK(!tmp_bv_.getBit(idx));
+ uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
+ for (uptr i = 0; i < res; i++)
+ path[i] = indexToNode(path[i]);
+ if (res)
+ CHECK_EQ(path[0], cur_node);
+ return res;
+ }
+
+ // Handle the unlock event.
+ // This operation is thread-safe as it only touches the dtls.
+ void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ if (dtls->getEpoch() == nodeToEpoch(node))
+ dtls->removeLock(nodeToIndexUnchecked(node));
+ }
+
+ // Tries to handle the lock event w/o writing to global state.
+ // Returns true on success.
+ // This operation is thread-safe as it only touches the dtls
+ // (modulo racy nature of hasAllEdges).
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (hasAllEdges(dtls, node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
+ return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
+ }
+
+ uptr testOnlyGetEpoch() const { return current_epoch_; }
+ bool testOnlyHasEdge(uptr l1, uptr l2) {
+ return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
+ }
+ // idx1 and idx2 are raw indices to g_, not lock IDs.
+ bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
+ return g_.hasEdge(idx1, idx2);
+ }
+
+ void Print() {
+ for (uptr from = 0; from < size(); from++)
+ for (uptr to = 0; to < size(); to++)
+ if (g_.hasEdge(from, to))
+ Printf(" %zx => %zx\n", from, to);
+ }
+
+ private:
+ void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
+
+ void check_node(uptr node) const {
+ CHECK_GE(node, size());
+ CHECK_EQ(current_epoch_, nodeToEpoch(node));
+ }
+
+ uptr indexToNode(uptr idx) const {
+ check_idx(idx);
+ return idx + current_epoch_;
+ }
+
+ uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
+
+ uptr nodeToIndex(uptr node) const {
+ check_node(node);
+ return nodeToIndexUnchecked(node);
+ }
+
+ uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
+
+ uptr getAvailableNode(uptr data) {
+ uptr idx = available_nodes_.getAndClearFirstOne();
+ data_[idx] = data;
+ return indexToNode(idx);
+ }
+
+ struct Edge {
+ u16 from;
+ u16 to;
+ u32 stk_from;
+ u32 stk_to;
+ int unique_tid;
+ };
+
+ uptr current_epoch_;
+ BV available_nodes_;
+ BV recycled_nodes_;
+ BV tmp_bv_;
+ BVGraph<BV> g_;
+ uptr data_[BV::kSize];
+ Edge edges_[BV::kSize * 32];
+ uptr n_edges_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_deadlock_detector1.cpp b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector1.cpp
new file mode 100644
index 0000000000..d4a325bea4
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector1.cpp
@@ -0,0 +1,194 @@
+//===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on NxN adjacency bit matrix.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_deadlock_detector.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+
+namespace __sanitizer {
+
+typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
+
+struct DDPhysicalThread {
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ DeadlockDetectorTLS<DDBV> dd;
+ DDReport rep;
+ bool report_pending;
+};
+
+struct DD : public DDetector {
+ SpinMutex mtx;
+ DeadlockDetector<DDBV> dd;
+ DDFlags flags;
+
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread *CreatePhysicalThread() override;
+ void DestroyPhysicalThread(DDPhysicalThread *pt) override;
+
+ DDLogicalThread *CreateLogicalThread(u64 ctx) override;
+ void DestroyLogicalThread(DDLogicalThread *lt) override;
+
+ void MutexInit(DDCallback *cb, DDMutex *m) override;
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) override;
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexDestroy(DDCallback *cb, DDMutex *m) override;
+
+ DDReport *GetReport(DDCallback *cb) override;
+
+ void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
+ void ReportDeadlock(DDCallback *cb, DDMutex *m);
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags) {
+ dd.clear();
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ return nullptr;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
+ lt->ctx = ctx;
+ lt->dd.clear();
+ lt->report_pending = false;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ m->id = 0;
+ m->stk = cb->Unwind();
+}
+
+void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
+ if (!dd.nodeBelongsToCurrentEpoch(m->id))
+ m->id = dd.newNode(reinterpret_cast<uptr>(m));
+ dd.ensureCurrentEpoch(&lt->dd);
+}
+
+void DD::MutexBeforeLock(DDCallback *cb,
+ DDMutex *m, bool wlock) {
+ DDLogicalThread *lt = cb->lt;
+ if (lt->dd.empty()) return; // This will be the first lock held by lt.
+ if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (dd.isHeld(&lt->dd, m->id))
+ return; // FIXME: allow this only for recursive locks.
+ if (dd.onLockBefore(&lt->dd, m->id)) {
+ // Actually add this edge now so that we have all the stack traces.
+ dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
+ ReportDeadlock(cb, m);
+ }
+}
+
+void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
+ DDLogicalThread *lt = cb->lt;
+ uptr path[20];
+ uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
+ if (len == 0U) {
+ // A cycle of 20+ locks? Well, that's a bit odd...
+ Printf("WARNING: too long mutex cycle found\n");
+ return;
+ }
+ CHECK_EQ(m->id, path[0]);
+ lt->report_pending = true;
+ len = Min<uptr>(len, DDReport::kMaxLoopSize);
+ DDReport *rep = &lt->rep;
+ rep->n = len;
+ for (uptr i = 0; i < len; i++) {
+ uptr from = path[i];
+ uptr to = path[(i + 1) % len];
+ DDMutex *m0 = (DDMutex*)dd.getData(from);
+ DDMutex *m1 = (DDMutex*)dd.getData(to);
+
+ u32 stk_from = -1U, stk_to = -1U;
+ int unique_tid = 0;
+ dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
+ // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
+ // unique_tid);
+ rep->loop[i].thr_ctx = unique_tid;
+ rep->loop[i].mtx_ctx0 = m0->ctx;
+ rep->loop[i].mtx_ctx1 = m1->ctx;
+ rep->loop[i].stk[0] = stk_to;
+ rep->loop[i].stk[1] = stk_from;
+ }
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
+ DDLogicalThread *lt = cb->lt;
+ u32 stk = 0;
+ if (flags.second_deadlock_stack)
+ stk = cb->Unwind();
+ // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
+ if (dd.onFirstLock(&lt->dd, m->id, stk))
+ return;
+ if (dd.onLockFast(&lt->dd, m->id, stk))
+ return;
+
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (wlock) // Only a recursive rlock may be held.
+ CHECK(!dd.isHeld(&lt->dd, m->id));
+ if (!trylock)
+ dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
+ dd.onLockAfter(&lt->dd, m->id, stk);
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
+ dd.onUnlock(&cb->lt->dd, m->id);
+}
+
+void DD::MutexDestroy(DDCallback *cb,
+ DDMutex *m) {
+ if (!m->id) return;
+ SpinMutexLock lk(&mtx);
+ if (dd.nodeBelongsToCurrentEpoch(m->id))
+ dd.removeNode(m->id);
+ m->id = 0;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->lt->report_pending)
+ return nullptr;
+ cb->lt->report_pending = false;
+ return &cb->lt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
diff --git a/lib/tsan/sanitizer_common/sanitizer_deadlock_detector2.cpp b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector2.cpp
new file mode 100644
index 0000000000..4026739d4e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector2.cpp
@@ -0,0 +1,423 @@
+//===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on adjacency lists.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+
+namespace __sanitizer {
+
+const int kMaxNesting = 64;
+const u32 kNoId = -1;
+const u32 kEndId = -2;
+const int kMaxLink = 8;
+const int kL1Size = 1024;
+const int kL2Size = 1024;
+const int kMaxMutex = kL1Size * kL2Size;
+
+struct Id {
+ u32 id;
+ u32 seq;
+
+ explicit Id(u32 id = 0, u32 seq = 0)
+ : id(id)
+ , seq(seq) {
+ }
+};
+
+struct Link {
+ u32 id;
+ u32 seq;
+ u32 tid;
+ u32 stk0;
+ u32 stk1;
+
+ explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
+ : id(id)
+ , seq(seq)
+ , tid(tid)
+ , stk0(s0)
+ , stk1(s1) {
+ }
+};
+
+struct DDPhysicalThread {
+ DDReport rep;
+ bool report_pending;
+ bool visited[kMaxMutex];
+ Link pending[kMaxMutex];
+ Link path[kMaxMutex];
+};
+
+struct ThreadMutex {
+ u32 id;
+ u32 stk;
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ ThreadMutex locked[kMaxNesting];
+ int nlocked;
+};
+
+struct Mutex {
+ StaticSpinMutex mtx;
+ u32 seq;
+ int nlink;
+ Link link[kMaxLink];
+};
+
+struct DD : public DDetector {
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
+ void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
+ u32 allocateId(DDCallback *cb);
+ Mutex *getMutex(u32 id);
+ u32 getMutexId(Mutex *m);
+
+ DDFlags flags;
+
+ Mutex* mutex[kL1Size];
+
+ SpinMutex mtx;
+ InternalMmapVector<u32> free_id;
+ int id_gen = 0;
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
+ "deadlock detector (physical thread)");
+ return pt;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+ pt->~DDPhysicalThread();
+ UnmapOrDie(pt, sizeof(DDPhysicalThread));
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
+ sizeof(DDLogicalThread));
+ lt->ctx = ctx;
+ lt->nlocked = 0;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
+ m->id = kNoId;
+ m->recursion = 0;
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+}
+
+Mutex *DD::getMutex(u32 id) {
+ return &mutex[id / kL2Size][id % kL2Size];
+}
+
+u32 DD::getMutexId(Mutex *m) {
+ for (int i = 0; i < kL1Size; i++) {
+ Mutex *tab = mutex[i];
+ if (tab == 0)
+ break;
+ if (m >= tab && m < tab + kL2Size)
+ return i * kL2Size + (m - tab);
+ }
+ return -1;
+}
+
+u32 DD::allocateId(DDCallback *cb) {
+ u32 id = -1;
+ SpinMutexLock l(&mtx);
+ if (free_id.size() > 0) {
+ id = free_id.back();
+ free_id.pop_back();
+ } else {
+ CHECK_LT(id_gen, kMaxMutex);
+ if ((id_gen % kL2Size) == 0) {
+ mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
+ "deadlock detector (mutex table)");
+ }
+ id = id_gen++;
+ }
+ CHECK_LE(id, kMaxMutex);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
+ return id;
+}
+
+void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDPhysicalThread *pt = cb->pt;
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+
+ // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+ if (lt->nlocked == 1) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ bool added = false;
+ Mutex *mtx = getMutex(m->id);
+ for (int i = 0; i < lt->nlocked - 1; i++) {
+ u32 id1 = lt->locked[i].id;
+ u32 stk1 = lt->locked[i].stk;
+ Mutex *mtx1 = getMutex(id1);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->nlink == kMaxLink) {
+ // FIXME(dvyukov): check stale links
+ continue;
+ }
+ int li = 0;
+ for (; li < mtx1->nlink; li++) {
+ Link *link = &mtx1->link[li];
+ if (link->id == m->id) {
+ if (link->seq != mtx->seq) {
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ break;
+ }
+ }
+ if (li == mtx1->nlink) {
+ // FIXME(dvyukov): check stale links
+ Link *link = &mtx1->link[mtx1->nlink++];
+ link->id = m->id;
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ }
+
+ if (!added || mtx->nlink == 0) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CycleCheck(pt, lt, m);
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {
+ VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
+ CHECK(wlock);
+ m->recursion++;
+ return;
+ }
+ CHECK_EQ(owner, 0);
+ if (wlock) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
+ CHECK_EQ(m->recursion, 0);
+ m->recursion = 1;
+ atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
+ }
+
+ if (!trylock)
+ return;
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
+ if (--m->recursion > 0)
+ return;
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+ }
+ CHECK_NE(m->id, kNoId);
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (cb->lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+}
+
+void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
+ cb->lt->ctx, m);
+ DDLogicalThread *lt = cb->lt;
+
+ if (m->id == kNoId)
+ return;
+
+ // Remove the mutex from lt->locked if there.
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+
+ // Clear and invalidate the mutex descriptor.
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ mtx->seq++;
+ mtx->nlink = 0;
+ }
+
+ // Return id to cache.
+ {
+ SpinMutexLock l(&mtx);
+ free_id.push_back(m->id);
+ }
+}
+
+void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
+ DDMutex *m) {
+ internal_memset(pt->visited, 0, sizeof(pt->visited));
+ int npath = 0;
+ int npending = 0;
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ for (int li = 0; li < mtx->nlink; li++)
+ pt->pending[npending++] = mtx->link[li];
+ }
+ while (npending > 0) {
+ Link link = pt->pending[--npending];
+ if (link.id == kEndId) {
+ npath--;
+ continue;
+ }
+ if (pt->visited[link.id])
+ continue;
+ Mutex *mtx1 = getMutex(link.id);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->seq != link.seq)
+ continue;
+ pt->visited[link.id] = true;
+ if (mtx1->nlink == 0)
+ continue;
+ pt->path[npath++] = link;
+ pt->pending[npending++] = Link(kEndId);
+ if (link.id == m->id)
+ return Report(pt, lt, npath); // Bingo!
+ for (int li = 0; li < mtx1->nlink; li++) {
+ Link *link1 = &mtx1->link[li];
+ // Mutex *mtx2 = getMutex(link->id);
+ // FIXME(dvyukov): fast seq check
+ // FIXME(dvyukov): fast nlink != 0 check
+ // FIXME(dvyukov): fast pending check?
+ // FIXME(dvyukov): npending can be larger than kMaxMutex
+ pt->pending[npending++] = *link1;
+ }
+ }
+}
+
+void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
+ DDReport *rep = &pt->rep;
+ rep->n = npath;
+ for (int i = 0; i < npath; i++) {
+ Link *link = &pt->path[i];
+ Link *link0 = &pt->path[i ? i - 1 : npath - 1];
+ rep->loop[i].thr_ctx = link->tid;
+ rep->loop[i].mtx_ctx0 = link0->id;
+ rep->loop[i].mtx_ctx1 = link->id;
+ rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
+ rep->loop[i].stk[1] = link->stk1;
+ }
+ pt->report_pending = true;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->pt->report_pending)
+ return 0;
+ cb->pt->report_pending = false;
+ return &cb->pt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
diff --git a/lib/tsan/sanitizer_common/sanitizer_deadlock_detector_interface.h b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector_interface.h
new file mode 100644
index 0000000000..a4722b080e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -0,0 +1,92 @@
+//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// Abstract deadlock detector interface.
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
+# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
+#endif
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+// dd - deadlock detector.
+// lt - logical (user) thread.
+// pt - physical (OS) thread.
+
+struct DDPhysicalThread;
+struct DDLogicalThread;
+
+struct DDMutex {
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+ uptr id;
+ u32 stk; // creation stack
+#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+ u32 id;
+ u32 recursion;
+ atomic_uintptr_t owner;
+#else
+# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
+#endif
+ u64 ctx;
+};
+
+struct DDFlags {
+ bool second_deadlock_stack;
+};
+
+struct DDReport {
+ enum { kMaxLoopSize = 20 };
+ int n; // number of entries in loop
+ struct {
+ u64 thr_ctx; // user thread context
+ u64 mtx_ctx0; // user mutex context, start of the edge
+ u64 mtx_ctx1; // user mutex context, end of the edge
+ u32 stk[2]; // stack ids for the edge
+ } loop[kMaxLoopSize];
+};
+
+struct DDCallback {
+ DDPhysicalThread *pt;
+ DDLogicalThread *lt;
+
+ virtual u32 Unwind() { return 0; }
+ virtual int UniqueTid() { return 0; }
+};
+
+struct DDetector {
+ static DDetector *Create(const DDFlags *flags);
+
+ virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
+ virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
+
+ virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
+ virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
+
+ virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
+ virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {}
+ virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
+
+ virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_errno.cpp b/lib/tsan/sanitizer_common/sanitizer_errno.cpp
new file mode 100644
index 0000000000..cbadf4d924
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_errno.cpp
@@ -0,0 +1,34 @@
+//===-- sanitizer_errno.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into other
+// files (e.g. interceptors are not supposed to include any system headers).
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_internal_defs.h"
+
+#include <errno.h>
+
+namespace __sanitizer {
+
+COMPILER_CHECK(errno_ENOMEM == ENOMEM);
+COMPILER_CHECK(errno_EBUSY == EBUSY);
+COMPILER_CHECK(errno_EINVAL == EINVAL);
+
+// EOWNERDEAD is not present in some older platforms.
+#if defined(EOWNERDEAD)
+extern const int errno_EOWNERDEAD = EOWNERDEAD;
+#else
+extern const int errno_EOWNERDEAD = -1;
+#endif
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_errno.h b/lib/tsan/sanitizer_common/sanitizer_errno.h
new file mode 100644
index 0000000000..584e66e4a8
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_errno.h
@@ -0,0 +1,39 @@
+//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into sensitive
+// files (e.g. interceptors are not supposed to include any system headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_H
+#define SANITIZER_ERRNO_H
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+# define __errno_location __error
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
+ SANITIZER_RTEMS
+# define __errno_location __errno
+#elif SANITIZER_SOLARIS
+# define __errno_location ___errno
+#elif SANITIZER_WINDOWS
+# define __errno_location _errno
+#endif
+
+extern "C" int *__errno_location();
+
+#define errno (*__errno_location())
+
+#endif // SANITIZER_ERRNO_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_errno_codes.h b/lib/tsan/sanitizer_common/sanitizer_errno_codes.h
new file mode 100644
index 0000000000..f388d0d364
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_errno_codes.h
@@ -0,0 +1,33 @@
+//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno codes to avoid including errno.h and its dependencies into
+// sensitive files (e.g. interceptors are not supposed to include any system
+// headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_CODES_H
+#define SANITIZER_ERRNO_CODES_H
+
+namespace __sanitizer {
+
+#define errno_ENOMEM 12
+#define errno_EBUSY 16
+#define errno_EINVAL 22
+
+// Those might not present or their value differ on different platforms.
+extern const int errno_EOWNERDEAD;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ERRNO_CODES_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_file.cpp b/lib/tsan/sanitizer_common/sanitizer_file.cpp
new file mode 100644
index 0000000000..79930d7942
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_file.cpp
@@ -0,0 +1,215 @@
+//===-- sanitizer_file.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. It defines filesystem-related interfaces. This
+// is separate from sanitizer_common.cpp so that it's simpler to disable
+// all the filesystem support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+
+namespace __sanitizer {
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ WriteToFile(kStderrFd, buffer, length);
+}
+
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
+
+void RawWrite(const char *buffer) {
+ report_file.Write(buffer, internal_strlen(buffer));
+}
+
+void ReportFile::ReopenIfNecessary() {
+ mu->CheckLocked();
+ if (fd == kStdoutFd || fd == kStderrFd) return;
+
+ uptr pid = internal_getpid();
+ // If in tracer, use the parent's file.
+ if (pid == stoptheworld_tracer_pid)
+ pid = stoptheworld_tracer_ppid;
+ if (fd != kInvalidFd) {
+ // If the report file is already opened by the current process,
+ // do nothing. Otherwise the report file was opened by the parent
+ // process, close it now.
+ if (fd_pid == pid)
+ return;
+ else
+ CloseFile(fd);
+ }
+
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
+ exe_name, pid);
+ } else {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+ }
+ fd = OpenFile(full_path, WrOnly);
+ if (fd == kInvalidFd) {
+ const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
+ Die();
+ }
+ fd_pid = pid;
+}
+
+void ReportFile::SetReportPath(const char *path) {
+ if (!path)
+ return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+
+ SpinMutexLock l(mu);
+ if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+ CloseFile(fd);
+ fd = kInvalidFd;
+ if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
+ } else if (internal_strcmp(path, "stderr") == 0) {
+ fd = kStderrFd;
+ } else {
+ internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ }
+}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ *buff = nullptr;
+ *buff_size = 0;
+ *read_len = 0;
+ if (!max_len)
+ return true;
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = Min(PageSize, max_len);
+
+ // The files we usually open are not seekable, so try different buffer sizes.
+ for (uptr size = kMinFileLen;; size = Min(size * 2, max_len)) {
+ UnmapOrDie(*buff, *buff_size);
+ *buff = (char*)MmapOrDie(size, __func__);
+ *buff_size = size;
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd) {
+ UnmapOrDie(*buff, *buff_size);
+ return false;
+ }
+ *read_len = 0;
+ // Read up to one page at a time.
+ bool reached_eof = false;
+ while (*read_len < size) {
+ uptr just_read;
+ if (!ReadFromFile(fd, *buff + *read_len, size - *read_len, &just_read,
+ errno_p)) {
+ UnmapOrDie(*buff, *buff_size);
+ CloseFile(fd);
+ return false;
+ }
+ *read_len += just_read;
+ if (just_read == 0 || *read_len == max_len) {
+ reached_eof = true;
+ break;
+ }
+ }
+ CloseFile(fd);
+ if (reached_eof) // We've read the whole file.
+ break;
+ }
+ return true;
+}
+
+bool ReadFileToVector(const char *file_name,
+ InternalMmapVectorNoCtor<char> *buff, uptr max_len,
+ error_t *errno_p) {
+ buff->clear();
+ if (!max_len)
+ return true;
+ uptr PageSize = GetPageSizeCached();
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd)
+ return false;
+ uptr read_len = 0;
+ while (read_len < max_len) {
+ if (read_len >= buff->size())
+ buff->resize(Min(Max(PageSize, read_len * 2), max_len));
+ CHECK_LT(read_len, buff->size());
+ CHECK_LE(buff->size(), max_len);
+ uptr just_read;
+ if (!ReadFromFile(fd, buff->data() + read_len, buff->size() - read_len,
+ &just_read, errno_p)) {
+ CloseFile(fd);
+ return false;
+ }
+ read_len += just_read;
+ if (!just_read)
+ break;
+ }
+ CloseFile(fd);
+ buff->resize(read_len);
+ return true;
+}
+
+static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
+
+char *FindPathToBinary(const char *name) {
+ if (FileExists(name)) {
+ return internal_strdup(name);
+ }
+
+ const char *path = GetEnv("PATH");
+ if (!path)
+ return nullptr;
+ uptr name_len = internal_strlen(name);
+ InternalMmapVector<char> buffer(kMaxPathLength);
+ const char *beg = path;
+ while (true) {
+ const char *end = internal_strchrnul(beg, kPathSeparator);
+ uptr prefix_len = end - beg;
+ if (prefix_len + name_len + 2 <= kMaxPathLength) {
+ internal_memcpy(buffer.data(), beg, prefix_len);
+ buffer[prefix_len] = '/';
+ internal_memcpy(&buffer[prefix_len + 1], name, name_len);
+ buffer[prefix_len + 1 + name_len] = '\0';
+ if (FileExists(buffer.data()))
+ return internal_strdup(buffer.data());
+ }
+ if (*end == '\0') break;
+ beg = end + 1;
+ }
+ return nullptr;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ report_file.SetReportPath(path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
+ report_file.fd_pid = internal_getpid();
+}
+} // extern "C"
+
+#endif // !SANITIZER_FUCHSIA
diff --git a/lib/tsan/sanitizer_common/sanitizer_file.h b/lib/tsan/sanitizer_common/sanitizer_file.h
new file mode 100644
index 0000000000..26681f0493
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_file.h
@@ -0,0 +1,106 @@
+//===-- sanitizer_file.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+// It declares filesystem-related interfaces. This is separate from
+// sanitizer_common.h so that it's simpler to disable all the filesystem
+// support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FILE_H
+#define SANITIZER_FILE_H
+
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+struct ReportFile {
+ void Write(const char *buffer, uptr length);
+ bool SupportsColors();
+ void SetReportPath(const char *path);
+
+ // Don't use fields directly. They are only declared public to allow
+ // aggregate initialization.
+
+ // Protects fields below.
+ StaticSpinMutex *mu;
+ // Opened file descriptor. Defaults to stderr. It may be equal to
+ // kInvalidFd, in which case new file will be opened when necessary.
+ fd_t fd;
+ // Path prefix of report file, set via __sanitizer_set_report_path.
+ char path_prefix[kMaxPathLength];
+ // Full path to report, obtained as <path_prefix>.PID
+ char full_path[kMaxPathLength];
+ // PID of the process that opened fd. If a fork() occurs,
+ // the PID of child will be different from fd_pid.
+ uptr fd_pid;
+
+ private:
+ void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
+enum FileAccessMode {
+ RdOnly,
+ WrOnly,
+ RdWr
+};
+
+// Returns kInvalidFd on error.
+fd_t OpenFile(const char *filename, FileAccessMode mode,
+ error_t *errno_p = nullptr);
+void CloseFile(fd_t);
+
+// Return true on success, false on error.
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
+ uptr *bytes_read = nullptr, error_t *error_p = nullptr);
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
+ uptr *bytes_written = nullptr, error_t *error_p = nullptr);
+
+// Scoped file handle closer.
+struct FileCloser {
+ explicit FileCloser(fd_t fd) : fd(fd) {}
+ ~FileCloser() { CloseFile(fd); }
+ fd_t fd;
+};
+
+bool SupportsColoredOutput(fd_t fd);
+
+// OS
+const char *GetPwd();
+bool FileExists(const char *filename);
+char *FindPathToBinary(const char *name);
+bool IsPathSeparator(const char c);
+bool IsAbsolutePath(const char *path);
+// Starts a subprocess and returs its pid.
+// If *_fd parameters are not kInvalidFd their corresponding input/output
+// streams will be redirect to the file. The files will always be closed
+// in parent process even in case of an error.
+// The child process will close all fds after STDERR_FILENO
+// before passing control to a program.
+pid_t StartSubprocess(const char *filename, const char *const argv[],
+ const char *const envp[], fd_t stdin_fd = kInvalidFd,
+ fd_t stdout_fd = kInvalidFd, fd_t stderr_fd = kInvalidFd);
+// Checks if specified process is still running
+bool IsProcessRunning(pid_t pid);
+// Waits for the process to finish and returns its exit code.
+// Returns -1 in case of an error.
+int WaitForProcess(pid_t pid);
+
+// Maps given file to virtual memory, and returns pointer to it
+// (or NULL if mapping fails). Stores the size of mmaped region
+// in '*buff_size'.
+void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FILE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_flag_parser.cpp b/lib/tsan/sanitizer_common/sanitizer_flag_parser.cpp
new file mode 100644
index 0000000000..9e274268bf
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_flag_parser.cpp
@@ -0,0 +1,191 @@
+//===-- sanitizer_flag_parser.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flag_parser.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+LowLevelAllocator FlagParser::Alloc;
+
+class UnknownFlags {
+ static const int kMaxUnknownFlags = 20;
+ const char *unknown_flags_[kMaxUnknownFlags];
+ int n_unknown_flags_;
+
+ public:
+ void Add(const char *name) {
+ CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
+ unknown_flags_[n_unknown_flags_++] = name;
+ }
+
+ void Report() {
+ if (!n_unknown_flags_) return;
+ Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
+ for (int i = 0; i < n_unknown_flags_; ++i)
+ Printf(" %s\n", unknown_flags_[i]);
+ n_unknown_flags_ = 0;
+ }
+};
+
+UnknownFlags unknown_flags;
+
+void ReportUnrecognizedFlags() {
+ unknown_flags.Report();
+}
+
+char *FlagParser::ll_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)Alloc.Allocate(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+void FlagParser::PrintFlagDescriptions() {
+ char buffer[128];
+ buffer[sizeof(buffer) - 1] = '\0';
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ for (int i = 0; i < n_flags_; ++i) {
+ bool truncated = !(flags_[i].handler->Format(buffer, sizeof(buffer)));
+ CHECK_EQ(buffer[sizeof(buffer) - 1], '\0');
+ const char *truncation_str = truncated ? " Truncated" : "";
+ Printf("\t%s\n\t\t- %s (Current Value%s: %s)\n", flags_[i].name,
+ flags_[i].desc, truncation_str, buffer);
+ }
+}
+
+void FlagParser::fatal_error(const char *err) {
+ Printf("%s: ERROR: %s\n", SanitizerToolName, err);
+ Die();
+}
+
+bool FlagParser::is_space(char c) {
+ return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
+ c == '\r';
+}
+
+void FlagParser::skip_whitespace() {
+ while (is_space(buf_[pos_])) ++pos_;
+}
+
+void FlagParser::parse_flag(const char *env_option_name) {
+ uptr name_start = pos_;
+ while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != '=') {
+ if (env_option_name) {
+ Printf("%s: ERROR: expected '=' in %s\n", SanitizerToolName,
+ env_option_name);
+ Die();
+ } else {
+ fatal_error("expected '='");
+ }
+ }
+ char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
+
+ uptr value_start = ++pos_;
+ char *value;
+ if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
+ char quote = buf_[pos_++];
+ while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
+ if (buf_[pos_] == 0) fatal_error("unterminated string");
+ value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
+ ++pos_; // consume the closing quote
+ } else {
+ while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
+ fatal_error("expected separator or eol");
+ value = ll_strndup(buf_ + value_start, pos_ - value_start);
+ }
+
+ bool res = run_handler(name, value);
+ if (!res) fatal_error("Flag parsing failed.");
+}
+
+void FlagParser::parse_flags(const char *env_option_name) {
+ while (true) {
+ skip_whitespace();
+ if (buf_[pos_] == 0) break;
+ parse_flag(env_option_name);
+ }
+
+ // Do a sanity check for certain flags.
+ if (common_flags_dont_use.malloc_context_size < 1)
+ common_flags_dont_use.malloc_context_size = 1;
+}
+
+void FlagParser::ParseStringFromEnv(const char *env_name) {
+ const char *env = GetEnv(env_name);
+ VPrintf(1, "%s: %s\n", env_name, env ? env : "<empty>");
+ ParseString(env, env_name);
+}
+
+void FlagParser::ParseString(const char *s, const char *env_option_name) {
+ if (!s) return;
+ // Backup current parser state to allow nested ParseString() calls.
+ const char *old_buf_ = buf_;
+ uptr old_pos_ = pos_;
+ buf_ = s;
+ pos_ = 0;
+
+ parse_flags(env_option_name);
+
+ buf_ = old_buf_;
+ pos_ = old_pos_;
+}
+
+bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
+ static const uptr kMaxIncludeSize = 1 << 15;
+ char *data;
+ uptr data_mapped_size;
+ error_t err;
+ uptr len;
+ if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
+ Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
+ if (ignore_missing)
+ return true;
+ Printf("Failed to read options from '%s': error %d\n", path, err);
+ return false;
+ }
+ ParseString(data, path);
+ UnmapOrDie(data, data_mapped_size);
+ return true;
+}
+
+bool FlagParser::run_handler(const char *name, const char *value) {
+ for (int i = 0; i < n_flags_; ++i) {
+ if (internal_strcmp(name, flags_[i].name) == 0)
+ return flags_[i].handler->Parse(value);
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ unknown_flags.Add(name);
+ return true;
+}
+
+void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc) {
+ CHECK_LT(n_flags_, kMaxFlags);
+ flags_[n_flags_].name = name;
+ flags_[n_flags_].desc = desc;
+ flags_[n_flags_].handler = handler;
+ ++n_flags_;
+}
+
+FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
+ flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_flag_parser.h b/lib/tsan/sanitizer_common/sanitizer_flag_parser.h
new file mode 100644
index 0000000000..fac5dff346
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_flag_parser.h
@@ -0,0 +1,204 @@
+//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAG_REGISTRY_H
+#define SANITIZER_FLAG_REGISTRY_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class FlagHandlerBase {
+ public:
+ virtual bool Parse(const char *value) { return false; }
+ // Write the C string representation of the current value (truncated to fit)
+ // into the buffer of size `size`. Returns false if truncation occurred and
+ // returns true otherwise.
+ virtual bool Format(char *buffer, uptr size) {
+ if (size > 0)
+ buffer[0] = '\0';
+ return false;
+ }
+
+ protected:
+ ~FlagHandlerBase() {}
+
+ inline bool FormatString(char *buffer, uptr size, const char *str_to_use) {
+ uptr num_symbols_should_write =
+ internal_snprintf(buffer, size, "%s", str_to_use);
+ return num_symbols_should_write < size;
+ }
+};
+
+template <typename T>
+class FlagHandler : public FlagHandlerBase {
+ T *t_;
+
+ public:
+ explicit FlagHandler(T *t) : t_(t) {}
+ bool Parse(const char *value) final;
+ bool Format(char *buffer, uptr size) final;
+};
+
+inline bool ParseBool(const char *value, bool *b) {
+ if (internal_strcmp(value, "0") == 0 ||
+ internal_strcmp(value, "no") == 0 ||
+ internal_strcmp(value, "false") == 0) {
+ *b = false;
+ return true;
+ }
+ if (internal_strcmp(value, "1") == 0 ||
+ internal_strcmp(value, "yes") == 0 ||
+ internal_strcmp(value, "true") == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+ if (ParseBool(value, t_)) return true;
+ Printf("ERROR: Invalid value for bool option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<bool>::Format(char *buffer, uptr size) {
+ return FormatString(buffer, size, *t_ ? "true" : "false");
+}
+
+template <>
+inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
+ bool b;
+ if (ParseBool(value, &b)) {
+ *t_ = b ? kHandleSignalYes : kHandleSignalNo;
+ return true;
+ }
+ if (internal_strcmp(value, "2") == 0 ||
+ internal_strcmp(value, "exclusive") == 0) {
+ *t_ = kHandleSignalExclusive;
+ return true;
+ }
+ Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<HandleSignalMode>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Parse(const char *value) {
+ *t_ = value;
+ return true;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Format(char *buffer, uptr size) {
+ return FormatString(buffer, size, *t_);
+}
+
+template <>
+inline bool FlagHandler<int>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<int>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
+ return num_symbols_should_write < size;
+}
+
+template <>
+inline bool FlagHandler<s64>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for s64 option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<s64>::Format(char *buffer, uptr size) {
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "%lld", *t_);
+ return num_symbols_should_write < size;
+}
+
+class FlagParser {
+ static const int kMaxFlags = 200;
+ struct Flag {
+ const char *name;
+ const char *desc;
+ FlagHandlerBase *handler;
+ } *flags_;
+ int n_flags_;
+
+ const char *buf_;
+ uptr pos_;
+
+ public:
+ FlagParser();
+ void RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc);
+ void ParseString(const char *s, const char *env_name = 0);
+ void ParseStringFromEnv(const char *env_name);
+ bool ParseFile(const char *path, bool ignore_missing);
+ void PrintFlagDescriptions();
+
+ static LowLevelAllocator Alloc;
+
+ private:
+ void fatal_error(const char *err);
+ bool is_space(char c);
+ void skip_whitespace();
+ void parse_flags(const char *env_option_name);
+ void parse_flag(const char *env_option_name);
+ bool run_handler(const char *name, const char *value);
+ char *ll_strndup(const char *s, uptr n);
+};
+
+template <typename T>
+static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
+ T *var) {
+ FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);
+ parser->RegisterHandler(name, fh, desc);
+}
+
+void ReportUnrecognizedFlags();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAG_REGISTRY_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_flags.cpp b/lib/tsan/sanitizer_common/sanitizer_flags.cpp
new file mode 100644
index 0000000000..684ee1e0b9
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_flags.cpp
@@ -0,0 +1,129 @@
+//===-- sanitizer_flags.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flags.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+CommonFlags common_flags_dont_use;
+
+void CommonFlags::SetDefaults() {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+}
+
+void CommonFlags::CopyFrom(const CommonFlags &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+}
+
+// Copy the string from "s" to "out", making the following substitutions:
+// %b = binary basename
+// %p = pid
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
+ char *out_end = out + out_size;
+ while (*s && out < out_end - 1) {
+ if (s[0] != '%') {
+ *out++ = *s++;
+ continue;
+ }
+ switch (s[1]) {
+ case 'b': {
+ const char *base = GetProcessName();
+ CHECK(base);
+ while (*base && out < out_end - 1)
+ *out++ = *base++;
+ s += 2; // skip "%b"
+ break;
+ }
+ case 'p': {
+ int pid = internal_getpid();
+ char buf[32];
+ char *buf_pos = buf + 32;
+ do {
+ *--buf_pos = (pid % 10) + '0';
+ pid /= 10;
+ } while (pid);
+ while (buf_pos < buf + 32 && out < out_end - 1)
+ *out++ = *buf_pos++;
+ s += 2; // skip "%p"
+ break;
+ }
+ default:
+ *out++ = *s++;
+ break;
+ }
+ }
+ CHECK(out < out_end - 1);
+ *out = '\0';
+}
+
+class FlagHandlerInclude : public FlagHandlerBase {
+ FlagParser *parser_;
+ bool ignore_missing_;
+ const char *original_path_;
+
+ public:
+ explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
+ : parser_(parser), ignore_missing_(ignore_missing), original_path_("") {}
+ bool Parse(const char *value) final {
+ original_path_ = value;
+ if (internal_strchr(value, '%')) {
+ char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
+ SubstituteForFlagValue(value, buf, kMaxPathLength);
+ bool res = parser_->ParseFile(buf, ignore_missing_);
+ UnmapOrDie(buf, kMaxPathLength);
+ return res;
+ }
+ return parser_->ParseFile(value, ignore_missing_);
+ }
+ bool Format(char *buffer, uptr size) {
+ // Note `original_path_` isn't actually what's parsed due to `%`
+ // substitutions. Printing the substituted path would require holding onto
+ // mmap'ed memory.
+ return FormatString(buffer, size, original_path_);
+ }
+};
+
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
+ FlagHandlerInclude *fh_include = new (FlagParser::Alloc)
+ FlagHandlerInclude(parser, /*ignore_missing*/ false);
+ parser->RegisterHandler("include", fh_include,
+ "read more options from the given file");
+ FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc)
+ FlagHandlerInclude(parser, /*ignore_missing*/ true);
+ parser->RegisterHandler(
+ "include_if_exists", fh_include_if_exists,
+ "read more options from the given file (if it exists)");
+}
+
+void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &cf->Name);
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ RegisterIncludeFlags(parser, cf);
+}
+
+void InitializeCommonFlags(CommonFlags *cf) {
+ // need to record coverage to generate coverage report.
+ cf->coverage |= cf->html_cov_report;
+ SetVerbosity(cf->verbosity);
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_flags.h b/lib/tsan/sanitizer_common/sanitizer_flags.h
new file mode 100644
index 0000000000..8f5e987da3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_flags.h
@@ -0,0 +1,67 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAGS_H
+#define SANITIZER_FLAGS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum HandleSignalMode {
+ kHandleSignalNo,
+ kHandleSignalYes,
+ kHandleSignalExclusive,
+};
+
+struct CommonFlags {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ void SetDefaults();
+ void CopyFrom(const CommonFlags &other);
+};
+
+// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
+extern CommonFlags common_flags_dont_use;
+inline const CommonFlags *common_flags() {
+ return &common_flags_dont_use;
+}
+
+inline void SetCommonFlagsDefaults() {
+ common_flags_dont_use.SetDefaults();
+}
+
+// This function can only be used to setup tool-specific overrides for
+// CommonFlags defaults. Generally, it should only be used right after
+// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
+// only during the flags initialization (i.e. before they are used for
+// the first time).
+inline void OverrideCommonFlags(const CommonFlags &cf) {
+ common_flags_dont_use.CopyFrom(cf);
+}
+
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size);
+
+class FlagParser;
+void RegisterCommonFlags(FlagParser *parser,
+ CommonFlags *cf = &common_flags_dont_use);
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
+
+// Should be called after parsing all flags. Sets up common flag values
+// and perform initializations common to all sanitizers (e.g. setting
+// verbosity).
+void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAGS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_flags.inc b/lib/tsan/sanitizer_common/sanitizer_flags.inc
new file mode 100644
index 0000000000..065258a5a6
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_flags.inc
@@ -0,0 +1,250 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common flags available in all sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef COMMON_FLAG
+#error "Define COMMON_FLAG prior to including this file!"
+#endif
+
+// COMMON_FLAG(Type, Name, DefaultValue, Description)
+// Supported types: bool, const char *, int, uptr.
+// Default value must be a compile-time constant.
+// Description must be a string literal.
+
+COMMON_FLAG(
+ bool, symbolize, true,
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.")
+COMMON_FLAG(
+ const char *, external_symbolizer_path, nullptr,
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.")
+COMMON_FLAG(
+ bool, allow_addr2line, false,
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.")
+COMMON_FLAG(const char *, strip_path_prefix, "",
+ "Strips this prefix from file paths in error reports.")
+COMMON_FLAG(bool, fast_unwind_on_check, false,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "internal CHECK failures.")
+COMMON_FLAG(bool, fast_unwind_on_fatal, false,
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.")
+COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.")
+COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
+COMMON_FLAG(int, malloc_context_size, 1,
+ "Max number of stack frames kept for each allocation/deallocation.")
+COMMON_FLAG(
+ const char *, log_path, "stderr",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".")
+COMMON_FLAG(
+ bool, log_exe_name, false,
+ "Mention name of executable when reporting error and "
+ "append executable name to logs (as in \"log_path.exe_name.pid\").")
+COMMON_FLAG(
+ bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ "Write all sanitizer output to syslog in addition to other means of "
+ "logging.")
+COMMON_FLAG(
+ int, verbosity, 0,
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
+COMMON_FLAG(bool, strip_env, 1,
+ "Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
+ "avoid passing it to children. Default is true.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
+COMMON_FLAG(
+ bool, leak_check_at_exit, true,
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.")
+COMMON_FLAG(bool, allocator_may_return_null, false,
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.")
+COMMON_FLAG(bool, print_summary, true,
+ "If false, disable printing error summaries in addition to error "
+ "reports.")
+COMMON_FLAG(int, print_module_map, 0,
+ "OS X only (0 - don't print, 1 - print only once before process "
+ "exits, 2 - print after each report).")
+COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
+#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
+ "Controls custom tool's " #signal " handler (0 - do not registers the " \
+ "handler, 1 - register the handler and allow user to set own, " \
+ "2 - registers the handler and block user from changing it). "
+COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
+COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))
+COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))
+COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))
+COMMON_FLAG(HandleSignalMode, handle_sigtrap, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGTRAP))
+COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
+#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
+COMMON_FLAG(bool, allow_user_segv_handler, true,
+ "Deprecated. True has no effect, use handle_sigbus=1. If false, "
+ "handle_*=1 will be upgraded to handle_*=2.")
+COMMON_FLAG(bool, use_sigaltstack, true,
+ "If set, uses alternate stack for signal handling.")
+COMMON_FLAG(bool, detect_deadlocks, true,
+ "If set, deadlock detection is enabled.")
+COMMON_FLAG(
+ uptr, clear_shadow_mmap_threshold, 64 * 1024,
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.")
+COMMON_FLAG(const char *, color, "auto",
+ "Colorize reports: (always|never|auto).")
+COMMON_FLAG(
+ bool, legacy_pthread_cond, false,
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.")
+COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
+COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
+COMMON_FLAG(uptr, mmap_limit_mb, 0,
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools")
+COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
+ "Hard RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS and aborts the process if the"
+ " limit is reached")
+COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
+ "Soft RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS. If the limit is reached"
+ " all subsequent malloc/new calls will fail or return NULL"
+ " (depending on the value of allocator_may_return_null)"
+ " until the RSS goes below the soft limit."
+ " This limit does not affect memory allocations other than"
+ " malloc/new.")
+COMMON_FLAG(uptr, max_allocation_size_mb, 0,
+ "If non-zero, malloc/new calls larger than this size will return "
+ "nullptr (or crash if allocator_may_return_null=false).")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms,
+ ((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000,
+ "Only affects a 64-bit allocator. If set, tries to release unused "
+ "memory to the OS, but not more often than this interval (in "
+ "milliseconds). Negative values mean do not attempt to release "
+ "memory to the OS.\n")
+COMMON_FLAG(bool, can_use_proc_maps_statm, true,
+ "If false, do not attempt to read /proc/maps/statm."
+ " Mostly useful for testing sanitizers.")
+COMMON_FLAG(
+ bool, coverage, false,
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).")
+COMMON_FLAG(const char *, coverage_dir, ".",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.")
+COMMON_FLAG(bool, full_address_space, false,
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized")
+COMMON_FLAG(bool, print_suppressions, true,
+ "Print matched suppressions at exit.")
+COMMON_FLAG(
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
+COMMON_FLAG(bool, use_madv_dontdump, true,
+ "If set, instructs kernel to not store the (huge) shadow "
+ "in core file.")
+COMMON_FLAG(bool, symbolize_inline_frames, true,
+ "Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, symbolize_vs_style, false,
+ "Print file locations in Visual Studio style (e.g: "
+ " file(10,42): ...")
+COMMON_FLAG(int, dedup_token_length, 0,
+ "If positive, after printing a stack trace also print a short "
+ "string token based on this number of frames that will simplify "
+ "deduplication of the reports. "
+ "Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.")
+COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
+ "Format string used to render stack frames. "
+ "See sanitizer_stacktrace_printer.h for the format description. "
+ "Use DEFAULT to get default format.")
+COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
+ "If true, the shadow is not allowed to use huge pages. ")
+COMMON_FLAG(bool, strict_string_checks, false,
+ "If set check that string arguments are properly null-terminated")
+COMMON_FLAG(bool, intercept_strstr, true,
+ "If set, uses custom wrappers for strstr and strcasestr functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strspn, true,
+ "If set, uses custom wrappers for strspn and strcspn function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strtok, true,
+ "If set, uses a custom wrapper for the strtok function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strpbrk, true,
+ "If set, uses custom wrappers for strpbrk function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strlen, true,
+ "If set, uses custom wrappers for strlen and strnlen functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strndup, true,
+ "If set, uses custom wrappers for strndup functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strchr, true,
+ "If set, uses custom wrappers for strchr, strchrnul, and strrchr "
+ "functions to find more errors.")
+COMMON_FLAG(bool, intercept_memcmp, true,
+ "If set, uses custom wrappers for memcmp function "
+ "to find more errors.")
+COMMON_FLAG(bool, strict_memcmp, true,
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.")
+COMMON_FLAG(bool, intercept_memmem, true,
+ "If set, uses a wrapper for memmem() to find more errors.")
+COMMON_FLAG(bool, intercept_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove "
+ "intrinsics to find more errors.")
+COMMON_FLAG(bool, intercept_stat, true,
+ "If set, uses custom wrappers for *stat functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_send, true,
+ "If set, uses custom wrappers for send* functions "
+ "to find more errors.")
+COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
+ "If set, decorate sanitizer mappings in /proc/self/maps with "
+ "user-readable names")
+COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
+ "found an error")
+COMMON_FLAG(
+ bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.")
+COMMON_FLAG(bool, suppress_equal_pcs, true,
+ "Deduplicate multiple reports for single source location in "
+ "halt_on_error=false mode (asan only).")
+COMMON_FLAG(bool, print_cmdline, false, "Print command line on crash "
+ "(asan only).")
+COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.")
+COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.")
+COMMON_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
+COMMON_FLAG(bool, dump_registers, true,
+ "If true, dump values of CPU registers when SEGV happens. Only "
+ "available on OS X for now.")
+COMMON_FLAG(bool, detect_write_exec, false,
+ "If true, triggers warning when writable-executable pages requests "
+ "are being made")
+COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
+ "TEST ONLY fail to read memory mappings to emulate sanitized "
+ "\"init\"")
diff --git a/lib/tsan/sanitizer_common/sanitizer_freebsd.h b/lib/tsan/sanitizer_common/sanitizer_freebsd.h
new file mode 100644
index 0000000000..82b227eab6
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_freebsd.h
@@ -0,0 +1,137 @@
+//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FREEBSD_H
+#define SANITIZER_FREEBSD_H
+
+#include "sanitizer_internal_defs.h"
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+#include <osreldate.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#include <link.h>
+#include <sys/param.h>
+#include <ucontext.h>
+
+namespace __sanitizer {
+
+typedef unsigned long long __xuint64_t;
+
+typedef __int32_t __xregister_t;
+
+typedef struct __xmcontext {
+ __xregister_t mc_onstack;
+ __xregister_t mc_gs;
+ __xregister_t mc_fs;
+ __xregister_t mc_es;
+ __xregister_t mc_ds;
+ __xregister_t mc_edi;
+ __xregister_t mc_esi;
+ __xregister_t mc_ebp;
+ __xregister_t mc_isp;
+ __xregister_t mc_ebx;
+ __xregister_t mc_edx;
+ __xregister_t mc_ecx;
+ __xregister_t mc_eax;
+ __xregister_t mc_trapno;
+ __xregister_t mc_err;
+ __xregister_t mc_eip;
+ __xregister_t mc_cs;
+ __xregister_t mc_eflags;
+ __xregister_t mc_esp;
+ __xregister_t mc_ss;
+
+ int mc_len;
+ int mc_fpformat;
+ int mc_ownedfp;
+ __xregister_t mc_flags;
+
+ int mc_fpstate[128] __aligned(16);
+ __xregister_t mc_fsbase;
+ __xregister_t mc_gsbase;
+ __xregister_t mc_xfpustate;
+ __xregister_t mc_xfpustate_len;
+
+ int mc_spare2[4];
+} xmcontext_t;
+
+typedef struct __xucontext {
+ sigset_t uc_sigmask;
+ xmcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+ int __spare__[4];
+} xucontext_t;
+
+struct xkinfo_vmentry {
+ int kve_structsize;
+ int kve_type;
+ __xuint64_t kve_start;
+ __xuint64_t kve_end;
+ __xuint64_t kve_offset;
+ __xuint64_t kve_vn_fileid;
+ __uint32_t kve_vn_fsid;
+ int kve_flags;
+ int kve_resident;
+ int kve_private_resident;
+ int kve_protection;
+ int kve_ref_count;
+ int kve_shadow_count;
+ int kve_vn_type;
+ __xuint64_t kve_vn_size;
+ __uint32_t kve_vn_rdev;
+ __uint16_t kve_vn_mode;
+ __uint16_t kve_status;
+ int _kve_ispare[12];
+ char kve_path[PATH_MAX];
+};
+
+typedef struct {
+ __uint32_t p_type;
+ __uint32_t p_offset;
+ __uint32_t p_vaddr;
+ __uint32_t p_paddr;
+ __uint32_t p_filesz;
+ __uint32_t p_memsz;
+ __uint32_t p_flags;
+ __uint32_t p_align;
+} XElf32_Phdr;
+
+struct xdl_phdr_info {
+ Elf_Addr dlpi_addr;
+ const char *dlpi_name;
+ const XElf32_Phdr *dlpi_phdr;
+ Elf_Half dlpi_phnum;
+ unsigned long long int dlpi_adds;
+ unsigned long long int dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info *, size_t,
+ void *);
+typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void *);
+
+#define xdl_iterate_phdr(callback, param) \
+ (((xdl_iterate_phdr_t *)dl_iterate_phdr)((callback), (param)))
+
+} // namespace __sanitizer
+
+#endif // __FreeBSD_version <= 902001
+#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+
+#endif // SANITIZER_FREEBSD_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_fuchsia.cpp b/lib/tsan/sanitizer_common/sanitizer_fuchsia.cpp
new file mode 100644
index 0000000000..6d1ad79467
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_fuchsia.cpp
@@ -0,0 +1,531 @@
+//===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and other sanitizer
+// run-time libraries and implements Fuchsia-specific functions from
+// sanitizer_common.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+
+#include <limits.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <zircon/errors.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace __sanitizer {
+
+void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
+
+uptr internal_sched_yield() {
+ zx_status_t status = _zx_nanosleep(0);
+ CHECK_EQ(status, ZX_OK);
+ return 0; // Why doesn't this return void?
+}
+
+static void internal_nanosleep(zx_time_t ns) {
+ zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
+ CHECK_EQ(status, ZX_OK);
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ internal_nanosleep(ZX_SEC(seconds));
+ return 0;
+}
+
+u64 NanoTime() {
+ zx_time_t time;
+ zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time);
+ CHECK_EQ(status, ZX_OK);
+ return time;
+}
+
+u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
+
+uptr internal_getpid() {
+ zx_info_handle_basic_t info;
+ zx_status_t status =
+ _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
+ sizeof(info), NULL, NULL);
+ CHECK_EQ(status, ZX_OK);
+ uptr pid = static_cast<uptr>(info.koid);
+ CHECK_EQ(pid, info.koid);
+ return pid;
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+ UNIMPLEMENTED();
+}
+
+uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
+
+tid_t GetTid() { return GetThreadSelf(); }
+
+void Abort() { abort(); }
+
+int Atexit(void (*function)(void)) { return atexit(function); }
+
+void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+
+void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
+
+void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ void *base;
+ size_t size;
+ CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
+ CHECK_EQ(pthread_attr_destroy(&attr), 0);
+
+ *stack_bottom = reinterpret_cast<uptr>(base);
+ *stack_top = *stack_bottom + size;
+}
+
+void InitializePlatformEarly() {}
+void MaybeReexec() {}
+void CheckASLR() {}
+void CheckMPROTECT() {}
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void DisableCoreDumperIfNecessary() {}
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
+void SetAlternateSignalStack() {}
+void UnsetAlternateSignalStack() {}
+void InitTlsSize() {}
+
+void PrintModuleMap() {}
+
+bool SignalContext::IsStackOverflow() const { return false; }
+void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
+const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
+
+enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
+
+BlockingMutex::BlockingMutex() {
+ // NOTE! It's important that this use internal_memset, because plain
+ // memset might be intercepted (e.g., actually be __asan_memset).
+ // Defining this so the compiler initializes each field, e.g.:
+ // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
+ // might result in the compiler generating a call to memset, which would
+ // have the same problem.
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+ zx_status_t status =
+ _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
+ ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
+ if (status != ZX_ERR_BAD_STATE) // Normal race.
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping) {
+ zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+uptr GetPageSize() { return PAGE_SIZE; }
+
+uptr GetMmapGranularity() { return PAGE_SIZE; }
+
+sanitizer_shadow_bounds_t ShadowBounds;
+
+uptr GetMaxUserVirtualAddress() {
+ ShadowBounds = __sanitizer_shadow_bounds();
+ return ShadowBounds.memory_limit - 1;
+}
+
+uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
+
+static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
+ bool raw_report, bool die_for_nomem) {
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
+ raw_report);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+ uintptr_t addr;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+ vmo, 0, size, &addr);
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
+ raw_report);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ return DoAnonymousMmapOrDie(size, mem_type, false, false);
+}
+
+uptr ReservedAddressRange::Init(uptr init_size, const char *name,
+ uptr fixed_addr) {
+ init_size = RoundUpTo(init_size, PAGE_SIZE);
+ DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
+ uintptr_t base;
+ zx_handle_t vmar;
+ zx_status_t status =
+ _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
+ 0, init_size, &vmar, &base);
+ if (status != ZX_OK)
+ ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
+ base_ = reinterpret_cast<void *>(base);
+ size_ = init_size;
+ name_ = name;
+ os_handle_ = vmar;
+
+ return reinterpret_cast<uptr>(base_);
+}
+
+static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
+ void *base, const char *name, bool die_for_nomem) {
+ uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
+ map_size = RoundUpTo(map_size, PAGE_SIZE);
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
+ return 0;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
+ DCHECK_GE(base + size_, map_size + offset);
+ uintptr_t addr;
+
+ status =
+ _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
+ offset, vmo, 0, map_size, &addr);
+ _zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
+ ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
+ }
+ return 0;
+ }
+ IncreaseTotalMmap(map_size);
+ return addr;
+}
+
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
+ const char *name) {
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name_, false);
+}
+
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
+ const char *name) {
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name_, true);
+}
+
+void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
+ if (!addr || !size) return;
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_status_t status =
+ _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
+ if (status != ZX_OK) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+
+ DecreaseTotalMmap(size);
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ CHECK_LE(size, size_);
+ const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
+ if (addr == reinterpret_cast<uptr>(base_)) {
+ if (size == size_) {
+ // Destroying the vmar effectively unmaps the whole mapping.
+ _zx_vmar_destroy(vmar);
+ _zx_handle_close(vmar);
+ os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
+ DecreaseTotalMmap(size);
+ return;
+ }
+ } else {
+ CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
+ }
+ // Partial unmapping does not affect the fact that the initial range is still
+ // reserved, and the resulting unmapped memory can't be reused.
+ UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
+}
+
+// This should never be called.
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ UNIMPLEMENTED();
+}
+
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK_GE(size, PAGE_SIZE);
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+
+ // Map a larger size to get a chunk of address space big enough that
+ // it surely contains an aligned region of the requested size. Then
+ // overwrite the aligned middle portion with a mapping from the
+ // beginning of the VMO, and unmap the excess before and after.
+ size_t map_size = size + alignment;
+ uintptr_t addr;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+ vmo, 0, map_size, &addr);
+ if (status == ZX_OK) {
+ uintptr_t map_addr = addr;
+ uintptr_t map_end = map_addr + map_size;
+ addr = RoundUpTo(map_addr, alignment);
+ uintptr_t end = addr + size;
+ if (addr != map_addr) {
+ zx_info_vmar_t info;
+ status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
+ sizeof(info), NULL, NULL);
+ if (status == ZX_OK) {
+ uintptr_t new_addr;
+ status = _zx_vmar_map(
+ _zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
+ addr - info.base, vmo, 0, size, &new_addr);
+ if (status == ZX_OK) CHECK_EQ(new_addr, addr);
+ }
+ }
+ if (status == ZX_OK && addr != map_addr)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
+ if (status == ZX_OK && end != map_end)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
+ }
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
+}
+
+// This is used on the shadow mapping, which cannot be changed.
+// Zircon doesn't have anything like MADV_DONTNEED.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+
+void DumpProcessMap() {
+ // TODO(mcgrathr): write it
+ return;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ // TODO(mcgrathr): Figure out a better way.
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status == ZX_OK) {
+ status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
+ _zx_handle_close(vmo);
+ }
+ return status == ZX_OK;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ zx_handle_t vmo;
+ zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
+ if (status == ZX_OK) {
+ uint64_t vmo_size;
+ status = _zx_vmo_get_size(vmo, &vmo_size);
+ if (status == ZX_OK) {
+ if (vmo_size < max_len) max_len = vmo_size;
+ size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
+ uintptr_t addr;
+ status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
+ map_size, &addr);
+ if (status == ZX_OK) {
+ *buff = reinterpret_cast<char *>(addr);
+ *buff_size = map_size;
+ *read_len = max_len;
+ }
+ }
+ _zx_handle_close(vmo);
+ }
+ if (status != ZX_OK && errno_p) *errno_p = status;
+ return status == ZX_OK;
+}
+
+void RawWrite(const char *buffer) {
+ constexpr size_t size = 128;
+ static _Thread_local char line[size];
+ static _Thread_local size_t lastLineEnd = 0;
+ static _Thread_local size_t cur = 0;
+
+ while (*buffer) {
+ if (cur >= size) {
+ if (lastLineEnd == 0)
+ lastLineEnd = size;
+ __sanitizer_log_write(line, lastLineEnd);
+ internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
+ cur = cur - lastLineEnd;
+ lastLineEnd = 0;
+ }
+ if (*buffer == '\n')
+ lastLineEnd = cur + 1;
+ line[cur++] = *buffer++;
+ }
+ // Flush all complete lines before returning.
+ if (lastLineEnd != 0) {
+ __sanitizer_log_write(line, lastLineEnd);
+ internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
+ cur = cur - lastLineEnd;
+ lastLineEnd = 0;
+ }
+}
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ __sanitizer_log_write(buffer, length);
+}
+
+char **StoredArgv;
+char **StoredEnviron;
+
+char **GetArgv() { return StoredArgv; }
+char **GetEnviron() { return StoredEnviron; }
+
+const char *GetEnv(const char *name) {
+ if (StoredEnviron) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = StoredEnviron; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return nullptr;
+}
+
+uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
+ const char *argv0 = "<UNKNOWN>";
+ if (StoredArgv && StoredArgv[0]) {
+ argv0 = StoredArgv[0];
+ }
+ internal_strncpy(buf, argv0, buf_len);
+ return internal_strlen(buf);
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+uptr MainThreadStackBase, MainThreadStackSize;
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
+ _zx_cprng_draw(buffer, length);
+ return true;
+}
+
+u32 GetNumberOfCPUs() {
+ return zx_system_get_num_cpus();
+}
+
+uptr GetRSS() { UNIMPLEMENTED(); }
+
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+extern "C" {
+void __sanitizer_startup_hook(int argc, char **argv, char **envp,
+ void *stack_base, size_t stack_size) {
+ __sanitizer::StoredArgv = argv;
+ __sanitizer::StoredEnviron = envp;
+ __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
+ __sanitizer::MainThreadStackSize = stack_size;
+}
+
+void __sanitizer_set_report_path(const char *path) {
+ // Handle the initialization code in each sanitizer, but no other calls.
+ // This setting is never consulted on Fuchsia.
+ DCHECK_EQ(path, common_flags()->log_path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ UNREACHABLE("not available on Fuchsia");
+}
+} // extern "C"
+
+#endif // SANITIZER_FUCHSIA
diff --git a/lib/tsan/sanitizer_common/sanitizer_fuchsia.h b/lib/tsan/sanitizer_common/sanitizer_fuchsia.h
new file mode 100644
index 0000000000..96f9cde7ef
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_fuchsia.h
@@ -0,0 +1,36 @@
+//===-- sanitizer_fuchsia.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// Fuchsia-specific sanitizer support.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FUCHSIA_H
+#define SANITIZER_FUCHSIA_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls/object.h>
+
+namespace __sanitizer {
+
+extern uptr MainThreadStackBase, MainThreadStackSize;
+extern sanitizer_shadow_bounds_t ShadowBounds;
+
+struct MemoryMappingLayoutData {
+ InternalMmapVector<zx_info_maps_t> data;
+ size_t current; // Current index into the vector.
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
+#endif // SANITIZER_FUCHSIA_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_getauxval.h b/lib/tsan/sanitizer_common/sanitizer_getauxval.h
new file mode 100644
index 0000000000..86ad3a5e2c
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_getauxval.h
@@ -0,0 +1,59 @@
+//===-- sanitizer_getauxval.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common getauxval() guards and definitions.
+// getauxval() is not defined until glibc version 2.16, or until API level 21
+// for Android.
+// Implement the getauxval() compat function for NetBSD.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_GETAUXVAL_H
+#define SANITIZER_GETAUXVAL_H
+
+#include "sanitizer_platform.h"
+#include "sanitizer_glibc_version.h"
+
+#if SANITIZER_LINUX || SANITIZER_FUCHSIA
+
+# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
+ SANITIZER_FUCHSIA
+# define SANITIZER_USE_GETAUXVAL 1
+# else
+# define SANITIZER_USE_GETAUXVAL 0
+# endif
+
+# if SANITIZER_USE_GETAUXVAL
+# include <sys/auxv.h>
+# else
+// The weak getauxval definition allows to check for the function at runtime.
+// This is useful for Android, when compiled at a lower API level yet running
+// on a more recent platform that offers the function.
+extern "C" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type);
+# endif
+
+#elif SANITIZER_NETBSD
+
+#define SANITIZER_USE_GETAUXVAL 1
+
+#include <dlfcn.h>
+#include <elf.h>
+
+static inline decltype(AuxInfo::a_v) getauxval(decltype(AuxInfo::a_type) type) {
+ for (const AuxInfo *aux = (const AuxInfo *)_dlauxinfo();
+ aux->a_type != AT_NULL; ++aux) {
+ if (type == aux->a_type)
+ return aux->a_v;
+ }
+
+ return 0;
+}
+
+#endif
+
+#endif // SANITIZER_GETAUXVAL_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_glibc_version.h b/lib/tsan/sanitizer_common/sanitizer_glibc_version.h
new file mode 100644
index 0000000000..47175f20aa
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_glibc_version.h
@@ -0,0 +1,26 @@
+//===-- sanitizer_glibc_version.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_GLIBC_VERSION_H
+#define SANITIZER_GLIBC_VERSION_H
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_FUCHSIA
+#include <features.h>
+#endif
+
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_hash.h b/lib/tsan/sanitizer_common/sanitizer_hash.h
new file mode 100644
index 0000000000..3d97dcc5d2
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_hash.h
@@ -0,0 +1,43 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a simple hash function.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_HASH_H
+#define SANITIZER_HASH_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+class MurMur2HashBuilder {
+ static const u32 m = 0x5bd1e995;
+ static const u32 seed = 0x9747b28c;
+ static const u32 r = 24;
+ u32 h;
+
+ public:
+ explicit MurMur2HashBuilder(u32 init = 0) { h = seed ^ init; }
+ void add(u32 k) {
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ u32 get() {
+ u32 x = h;
+ x ^= x >> 13;
+ x *= m;
+ x ^= x >> 15;
+ return x;
+ }
+};
+} //namespace __sanitizer
+
+#endif // SANITIZER_HASH_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/lib/tsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
new file mode 100644
index 0000000000..576807ea3a
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
@@ -0,0 +1,1535 @@
+//===-- sanitizer_interceptors_ioctl_netbsd.inc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Ioctl handling in common sanitizer interceptors.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_flags.h"
+
+struct ioctl_desc {
+ unsigned req;
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
+ enum { NONE, READ, WRITE, READWRITE, CUSTOM } type : 3;
+ unsigned size : 29;
+ const char *name;
+};
+
+const unsigned ioctl_table_max = 1238;
+static ioctl_desc ioctl_table[ioctl_table_max];
+static unsigned ioctl_table_size = 0;
+
+// This can not be declared as a global, because references to struct_*_sz
+// require a global initializer. And this table must be available before global
+// initializers are run.
+static void ioctl_table_fill() {
+#define _(rq, tp, sz) \
+ if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
+ CHECK(ioctl_table_size < ioctl_table_max); \
+ ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
+ ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
+ ioctl_table[ioctl_table_size].size = sz; \
+ ioctl_table[ioctl_table_size].name = #rq; \
+ ++ioctl_table_size; \
+ }
+
+ /* Entries from file: altq/altq_afmap.h */
+ _(AFM_ADDFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_DELFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_CLEANFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_GETFMAP, READWRITE, struct_atm_flowmap_sz);
+ /* Entries from file: altq/altq.h */
+ _(ALTQGTYPE, READWRITE, struct_altqreq_sz);
+ _(ALTQTBRSET, READ, struct_tbrreq_sz);
+ _(ALTQTBRGET, READWRITE, struct_tbrreq_sz);
+ /* Entries from file: altq/altq_blue.h */
+ _(BLUE_IF_ATTACH, READ, struct_blue_interface_sz);
+ _(BLUE_DISABLE, READ, struct_blue_interface_sz);
+ _(BLUE_CONFIG, READWRITE, struct_blue_conf_sz);
+ _(BLUE_GETSTATS, READWRITE, struct_blue_stats_sz);
+ /* Entries from file: altq/altq_cbq.h */
+ _(CBQ_ENABLE, READ, struct_cbq_interface_sz);
+ _(CBQ_ADD_CLASS, READWRITE, struct_cbq_add_class_sz);
+ _(CBQ_DEL_CLASS, READ, struct_cbq_delete_class_sz);
+ _(CBQ_MODIFY_CLASS, READWRITE, struct_cbq_modify_class_sz);
+ _(CBQ_DEL_FILTER, READ, struct_cbq_delete_filter_sz);
+ _(CBQ_GETSTATS, READWRITE, struct_cbq_getstats_sz);
+ /* Entries from file: altq/altq_cdnr.h */
+ _(CDNR_IF_DETACH, READ, struct_cdnr_interface_sz);
+ _(CDNR_ADD_FILTER, READWRITE, struct_cdnr_add_filter_sz);
+ _(CDNR_GETSTATS, READWRITE, struct_cdnr_get_stats_sz);
+ _(CDNR_ADD_ELEM, READWRITE, struct_cdnr_add_element_sz);
+ _(CDNR_DEL_ELEM, READ, struct_cdnr_delete_element_sz);
+ _(CDNR_ADD_TBM, READWRITE, struct_cdnr_add_tbmeter_sz);
+ _(CDNR_MOD_TBM, READ, struct_cdnr_modify_tbmeter_sz);
+ _(CDNR_TBM_STATS, READWRITE, struct_cdnr_tbmeter_stats_sz);
+ _(CDNR_ADD_TCM, READWRITE, struct_cdnr_add_trtcm_sz);
+ _(CDNR_MOD_TCM, READWRITE, struct_cdnr_modify_trtcm_sz);
+ _(CDNR_TCM_STATS, READWRITE, struct_cdnr_tcm_stats_sz);
+ _(CDNR_ADD_TSW, READWRITE, struct_cdnr_add_tswtcm_sz);
+ _(CDNR_MOD_TSW, READWRITE, struct_cdnr_modify_tswtcm_sz);
+ /* Entries from file: altq/altq_fifoq.h */
+ _(FIFOQ_CONFIG, READWRITE, struct_fifoq_conf_sz);
+ _(FIFOQ_GETSTATS, READWRITE, struct_fifoq_getstats_sz);
+ /* Entries from file: altq/altq_hfsc.h */
+ _(HFSC_CLEAR_HIERARCHY, READ, struct_hfsc_interface_sz);
+ _(HFSC_ADD_CLASS, READWRITE, struct_hfsc_add_class_sz);
+ _(HFSC_GETSTATS, READWRITE, struct_hfsc_class_stats_sz);
+ /* Entries from file: altq/altq_jobs.h */
+ _(JOBS_IF_ATTACH, READ, struct_jobs_attach_sz);
+ _(JOBS_IF_DETACH, READ, struct_jobs_interface_sz);
+ _(JOBS_ENABLE, READ, struct_jobs_interface_sz);
+ _(JOBS_DISABLE, READ, struct_jobs_interface_sz);
+ _(JOBS_CLEAR, READ, struct_jobs_interface_sz);
+ _(JOBS_ADD_CLASS, READWRITE, struct_jobs_add_class_sz);
+ _(JOBS_MOD_CLASS, READ, struct_jobs_modify_class_sz);
+ /* Entries from file: altq/altq_priq.h */
+ _(PRIQ_IF_ATTACH, READ, struct_priq_interface_sz);
+ _(PRIQ_CLEAR, READ, struct_priq_interface_sz);
+ _(PRIQ_ADD_CLASS, READWRITE, struct_priq_add_class_sz);
+ _(PRIQ_DEL_CLASS, READ, struct_priq_delete_class_sz);
+ _(PRIQ_MOD_CLASS, READ, struct_priq_modify_class_sz);
+ _(PRIQ_ADD_FILTER, READWRITE, struct_priq_add_filter_sz);
+ _(PRIQ_DEL_FILTER, READ, struct_priq_delete_filter_sz);
+ _(PRIQ_GETSTATS, READWRITE, struct_priq_class_stats_sz);
+ /* Entries from file: altq/altq_red.h */
+ _(RED_CONFIG, READWRITE, struct_red_conf_sz);
+ _(RED_GETSTATS, READWRITE, struct_red_stats_sz);
+ _(RED_SETDEFAULTS, READ, struct_redparams_sz);
+ /* Entries from file: altq/altq_rio.h */
+ _(RIO_CONFIG, READWRITE, struct_rio_conf_sz);
+ _(RIO_GETSTATS, READWRITE, struct_rio_stats_sz);
+ _(RIO_SETDEFAULTS, READ, struct_redparams_sz);
+ /* Entries from file: altq/altq_wfq.h */
+ _(WFQ_CONFIG, READWRITE, struct_wfq_conf_sz);
+ _(WFQ_GET_QID, READWRITE, struct_wfq_getqid_sz);
+ _(WFQ_SET_WEIGHT, READWRITE, struct_wfq_setweight_sz);
+ /* Entries from file: crypto/cryptodev.h */
+ _(CRIOGET, READWRITE, sizeof(u32));
+ _(CIOCFSESSION, READ, sizeof(u32));
+ _(CIOCKEY, READWRITE, struct_crypt_kop_sz);
+ _(CIOCNFKEYM, READWRITE, struct_crypt_mkop_sz);
+ _(CIOCNFSESSION, READ, struct_crypt_sfop_sz);
+ _(CIOCNCRYPTRETM, READWRITE, struct_cryptret_sz);
+ _(CIOCNCRYPTRET, READWRITE, struct_crypt_result_sz);
+ _(CIOCGSESSION, READWRITE, struct_session_op_sz);
+ _(CIOCNGSESSION, READWRITE, struct_crypt_sgop_sz);
+ _(CIOCCRYPT, READWRITE, struct_crypt_op_sz);
+ _(CIOCNCRYPTM, READWRITE, struct_crypt_mop_sz);
+ _(CIOCASYMFEAT, WRITE, sizeof(u32));
+ /* Entries from file: dev/apm/apmio.h */
+ _(APM_IOC_REJECT, READ, struct_apm_event_info_sz);
+ _(OAPM_IOC_GETPOWER, WRITE, struct_apm_power_info_sz);
+ _(APM_IOC_GETPOWER, READWRITE, struct_apm_power_info_sz);
+ _(APM_IOC_NEXTEVENT, WRITE, struct_apm_event_info_sz);
+ _(APM_IOC_DEV_CTL, READ, struct_apm_ctl_sz);
+ /* Entries from file: dev/dm/netbsd-dm.h */
+ _(NETBSD_DM_IOCTL, READWRITE, struct_plistref_sz);
+ /* Entries from file: dev/dmover/dmover_io.h */
+ _(DMIO_SETFUNC, READ, struct_dmio_setfunc_sz);
+ /* Entries from file: dev/dtv/dtvio_demux.h */
+ _(DMX_START, NONE, 0);
+ _(DMX_STOP, NONE, 0);
+ _(DMX_SET_FILTER, READ, struct_dmx_sct_filter_params_sz);
+ _(DMX_SET_PES_FILTER, READ, struct_dmx_pes_filter_params_sz);
+ _(DMX_SET_BUFFER_SIZE, NONE, 0);
+ _(DMX_GET_STC, READWRITE, struct_dmx_stc_sz);
+ _(DMX_ADD_PID, READ, sizeof(u16));
+ _(DMX_REMOVE_PID, READ, sizeof(u16));
+ _(DMX_GET_CAPS, WRITE, struct_dmx_caps_sz);
+ _(DMX_SET_SOURCE, READ, enum_dmx_source_sz);
+ /* Entries from file: dev/dtv/dtvio_frontend.h */
+ _(FE_READ_STATUS, WRITE, enum_fe_status_sz);
+ _(FE_READ_BER, WRITE, sizeof(u32));
+ _(FE_READ_SNR, WRITE, sizeof(u16));
+ _(FE_READ_SIGNAL_STRENGTH, WRITE, sizeof(u16));
+ _(FE_READ_UNCORRECTED_BLOCKS, WRITE, sizeof(u32));
+ _(FE_SET_FRONTEND, READWRITE, struct_dvb_frontend_parameters_sz);
+ _(FE_GET_FRONTEND, WRITE, struct_dvb_frontend_parameters_sz);
+ _(FE_GET_EVENT, WRITE, struct_dvb_frontend_event_sz);
+ _(FE_GET_INFO, WRITE, struct_dvb_frontend_info_sz);
+ _(FE_DISEQC_RESET_OVERLOAD, NONE, 0);
+ _(FE_DISEQC_SEND_MASTER_CMD, READ, struct_dvb_diseqc_master_cmd_sz);
+ _(FE_DISEQC_RECV_SLAVE_REPLY, WRITE, struct_dvb_diseqc_slave_reply_sz);
+ _(FE_DISEQC_SEND_BURST, READ, enum_fe_sec_mini_cmd_sz);
+ _(FE_SET_TONE, READ, enum_fe_sec_tone_mode_sz);
+ _(FE_SET_VOLTAGE, READ, enum_fe_sec_voltage_sz);
+ _(FE_ENABLE_HIGH_LNB_VOLTAGE, READ, sizeof(int));
+ _(FE_SET_FRONTEND_TUNE_MODE, READ, sizeof(unsigned int));
+ _(FE_DISHNETWORK_SEND_LEGACY_CMD, READ, sizeof(unsigned long));
+ /* Entries from file: dev/hdaudio/hdaudioio.h */
+ _(HDAUDIO_FGRP_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_GETCONFIG, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_SETCONFIG, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_WIDGET_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_CODEC_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_AFG_WIDGET_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_AFG_CODEC_INFO, READWRITE, struct_plistref_sz);
+ /* Entries from file: dev/hdmicec/hdmicecio.h */
+ _(CEC_GET_PHYS_ADDR, WRITE, sizeof(u16));
+ _(CEC_GET_LOG_ADDRS, WRITE, sizeof(u16));
+ _(CEC_SET_LOG_ADDRS, READ, sizeof(u16));
+ _(CEC_GET_VENDOR_ID, WRITE, sizeof(u32));
+ /* Entries from file: dev/hpc/hpcfbio.h */
+ _(HPCFBIO_GCONF, READWRITE, struct_hpcfb_fbconf_sz);
+ _(HPCFBIO_SCONF, READ, struct_hpcfb_fbconf_sz);
+ _(HPCFBIO_GDSPCONF, READWRITE, struct_hpcfb_dspconf_sz);
+ _(HPCFBIO_SDSPCONF, READ, struct_hpcfb_dspconf_sz);
+ _(HPCFBIO_GOP, WRITE, struct_hpcfb_dsp_op_sz);
+ _(HPCFBIO_SOP, READWRITE, struct_hpcfb_dsp_op_sz);
+ /* Entries from file: dev/i2o/iopio.h */
+ _(IOPIOCPT, READWRITE, struct_ioppt_sz);
+ _(IOPIOCGLCT, READWRITE, struct_iovec_sz);
+ _(IOPIOCGSTATUS, READWRITE, struct_iovec_sz);
+ _(IOPIOCRECONFIG, NONE, 0);
+ _(IOPIOCGTIDMAP, READWRITE, struct_iovec_sz);
+ /* Entries from file: dev/ic/athioctl.h */
+ _(SIOCGATHSTATS, READWRITE, struct_ifreq_sz);
+ _(SIOCGATHDIAG, READWRITE, struct_ath_diag_sz);
+ /* Entries from file: dev/ic/bt8xx.h */
+ _(METEORCAPTUR, READ, sizeof(int));
+ _(METEORCAPFRM, READ, struct_meteor_capframe_sz);
+ _(METEORSETGEO, READ, struct_meteor_geomet_sz);
+ _(METEORGETGEO, WRITE, struct_meteor_geomet_sz);
+ _(METEORSTATUS, WRITE, sizeof(unsigned short));
+ _(METEORSHUE, READ, sizeof(signed char));
+ _(METEORGHUE, WRITE, sizeof(signed char));
+ _(METEORSFMT, READ, sizeof(unsigned int));
+ _(METEORGFMT, WRITE, sizeof(unsigned int));
+ _(METEORSINPUT, READ, sizeof(unsigned int));
+ _(METEORGINPUT, WRITE, sizeof(unsigned int));
+ _(METEORSCHCV, READ, sizeof(unsigned char));
+ _(METEORGCHCV, WRITE, sizeof(unsigned char));
+ _(METEORSCOUNT, READ, struct_meteor_counts_sz);
+ _(METEORGCOUNT, WRITE, struct_meteor_counts_sz);
+ _(METEORSFPS, READ, sizeof(unsigned short));
+ _(METEORGFPS, WRITE, sizeof(unsigned short));
+ _(METEORSSIGNAL, READ, sizeof(unsigned int));
+ _(METEORGSIGNAL, WRITE, sizeof(unsigned int));
+ _(METEORSVIDEO, READ, struct_meteor_video_sz);
+ _(METEORGVIDEO, WRITE, struct_meteor_video_sz);
+ _(METEORSBRIG, READ, sizeof(unsigned char));
+ _(METEORGBRIG, WRITE, sizeof(unsigned char));
+ _(METEORSCSAT, READ, sizeof(unsigned char));
+ _(METEORGCSAT, WRITE, sizeof(unsigned char));
+ _(METEORSCONT, READ, sizeof(unsigned char));
+ _(METEORGCONT, WRITE, sizeof(unsigned char));
+ _(METEORSHWS, READ, sizeof(unsigned char));
+ _(METEORGHWS, WRITE, sizeof(unsigned char));
+ _(METEORSVWS, READ, sizeof(unsigned char));
+ _(METEORGVWS, WRITE, sizeof(unsigned char));
+ _(METEORSTS, READ, sizeof(unsigned char));
+ _(METEORGTS, WRITE, sizeof(unsigned char));
+ _(TVTUNER_SETCHNL, READ, sizeof(unsigned int));
+ _(TVTUNER_GETCHNL, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETTYPE, READ, sizeof(unsigned int));
+ _(TVTUNER_GETTYPE, WRITE, sizeof(unsigned int));
+ _(TVTUNER_GETSTATUS, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETFREQ, READ, sizeof(unsigned int));
+ _(TVTUNER_GETFREQ, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETAFC, READ, sizeof(int));
+ _(TVTUNER_GETAFC, WRITE, sizeof(int));
+ _(RADIO_SETMODE, READ, sizeof(unsigned int));
+ _(RADIO_GETMODE, WRITE, sizeof(unsigned char));
+ _(RADIO_SETFREQ, READ, sizeof(unsigned int));
+ _(RADIO_GETFREQ, WRITE, sizeof(unsigned int));
+ _(METEORSACTPIXFMT, READ, sizeof(int));
+ _(METEORGACTPIXFMT, WRITE, sizeof(int));
+ _(METEORGSUPPIXFMT, READWRITE, struct_meteor_pixfmt_sz);
+ _(TVTUNER_GETCHNLSET, READWRITE, struct_bktr_chnlset_sz);
+ _(REMOTE_GETKEY, WRITE, struct_bktr_remote_sz);
+ /* Entries from file: dev/ic/icp_ioctl.h */
+ _(GDT_IOCTL_GENERAL, READWRITE, struct_gdt_ucmd_sz);
+ _(GDT_IOCTL_DRVERS, WRITE, sizeof(int));
+ _(GDT_IOCTL_CTRTYPE, READWRITE, struct_gdt_ctrt_sz);
+ _(GDT_IOCTL_OSVERS, WRITE, struct_gdt_osv_sz);
+ _(GDT_IOCTL_CTRCNT, WRITE, sizeof(int));
+ _(GDT_IOCTL_EVENT, READWRITE, struct_gdt_event_sz);
+ _(GDT_IOCTL_STATIST, WRITE, struct_gdt_statist_sz);
+ _(GDT_IOCTL_RESCAN, READWRITE, struct_gdt_rescan_sz);
+ /* Entries from file: dev/ic/isp_ioctl.h */
+ _(ISP_SDBLEV, READWRITE, sizeof(int));
+ _(ISP_RESETHBA, NONE, 0);
+ _(ISP_RESCAN, NONE, 0);
+ _(ISP_SETROLE, READWRITE, sizeof(int));
+ _(ISP_GETROLE, WRITE, sizeof(int));
+ _(ISP_GET_STATS, WRITE, struct_isp_stats_sz);
+ _(ISP_CLR_STATS, NONE, 0);
+ _(ISP_FC_LIP, NONE, 0);
+ _(ISP_FC_GETDINFO, READWRITE, struct_isp_fc_device_sz);
+ _(ISP_GET_FW_CRASH_DUMP, NONE, 0);
+ _(ISP_FORCE_CRASH_DUMP, NONE, 0);
+ _(ISP_FC_GETHINFO, READWRITE, struct_isp_hba_device_sz);
+ _(ISP_TSK_MGMT, READWRITE, struct_isp_fc_tsk_mgmt_sz);
+ _(ISP_FC_GETDLIST, NONE, 0);
+ /* Entries from file: dev/ic/mlxio.h */
+ _(MLXD_STATUS, WRITE, sizeof(int));
+ _(MLXD_CHECKASYNC, WRITE, sizeof(int));
+ _(MLXD_DETACH, READ, sizeof(int));
+ _(MLX_RESCAN_DRIVES, NONE, 0);
+ _(MLX_PAUSE_CHANNEL, READ, struct_mlx_pause_sz);
+ _(MLX_COMMAND, READWRITE, struct_mlx_usercommand_sz);
+ _(MLX_REBUILDASYNC, READWRITE, struct_mlx_rebuild_request_sz);
+ _(MLX_REBUILDSTAT, WRITE, struct_mlx_rebuild_status_sz);
+ _(MLX_GET_SYSDRIVE, READWRITE, sizeof(int));
+ _(MLX_GET_CINFO, WRITE, struct_mlx_cinfo_sz);
+ /* Entries from file: dev/ic/nvmeio.h */
+ _(NVME_PASSTHROUGH_CMD, READWRITE, struct_nvme_pt_command_sz);
+ /* Entries from file: dev/ic/qemufwcfgio.h */
+ _(FWCFGIO_SET_INDEX, READ, sizeof(u16));
+ /* Entries from file: dev/ir/irdaio.h */
+ _(IRDA_RESET_PARAMS, NONE, 0);
+ _(IRDA_SET_PARAMS, READ, struct_irda_params_sz);
+ _(IRDA_GET_SPEEDMASK, WRITE, sizeof(unsigned int));
+ _(IRDA_GET_TURNAROUNDMASK, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_GET_DEVICE, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_GET_DONGLE, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_SET_DONGLE, READ, sizeof(unsigned int));
+ /* Entries from file: dev/isa/isvio.h */
+ _(ISV_CMD, READWRITE, struct_isv_cmd_sz);
+ /* Entries from file: dev/isa/wtreg.h */
+ _(WTQICMD, NONE, 0);
+ /* Entries from file: dev/iscsi/iscsi_ioctl.h */
+ _(ISCSI_GET_VERSION, READWRITE, struct_iscsi_get_version_parameters_sz);
+ _(ISCSI_LOGIN, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_LOGOUT, READWRITE, struct_iscsi_logout_parameters_sz);
+ _(ISCSI_ADD_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_RESTORE_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_REMOVE_CONNECTION, READWRITE, struct_iscsi_remove_parameters_sz);
+ _(ISCSI_CONNECTION_STATUS, READWRITE, struct_iscsi_conn_status_parameters_sz);
+ _(ISCSI_SEND_TARGETS, READWRITE, struct_iscsi_send_targets_parameters_sz);
+ _(ISCSI_SET_NODE_NAME, READWRITE, struct_iscsi_set_node_name_parameters_sz);
+ _(ISCSI_IO_COMMAND, READWRITE, struct_iscsi_iocommand_parameters_sz);
+ _(ISCSI_REGISTER_EVENT, READWRITE, struct_iscsi_register_event_parameters_sz);
+ _(ISCSI_DEREGISTER_EVENT, READWRITE,
+ struct_iscsi_register_event_parameters_sz);
+ _(ISCSI_WAIT_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);
+ _(ISCSI_POLL_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);
+ /* Entries from file: dev/ofw/openfirmio.h */
+ _(OFIOCGET, READWRITE, struct_ofiocdesc_sz);
+ _(OFIOCSET, READ, struct_ofiocdesc_sz);
+ _(OFIOCNEXTPROP, READWRITE, struct_ofiocdesc_sz);
+ _(OFIOCGETOPTNODE, WRITE, sizeof(int));
+ _(OFIOCGETNEXT, READWRITE, sizeof(int));
+ _(OFIOCGETCHILD, READWRITE, sizeof(int));
+ _(OFIOCFINDDEVICE, READWRITE, struct_ofiocdesc_sz);
+ /* Entries from file: dev/pci/amrio.h */
+ _(AMR_IO_VERSION, WRITE, sizeof(int));
+ _(AMR_IO_COMMAND, READWRITE, struct_amr_user_ioctl_sz);
+ /* Entries from file: dev/pci/mlyio.h */
+ _(MLYIO_COMMAND, READWRITE, struct_mly_user_command_sz);
+ _(MLYIO_HEALTH, READ, struct_mly_user_health_sz);
+ /* Entries from file: dev/pci/pciio.h */
+ _(PCI_IOC_CFGREAD, READWRITE, struct_pciio_cfgreg_sz);
+ _(PCI_IOC_CFGWRITE, READ, struct_pciio_cfgreg_sz);
+ _(PCI_IOC_BDF_CFGREAD, READWRITE, struct_pciio_bdf_cfgreg_sz);
+ _(PCI_IOC_BDF_CFGWRITE, READ, struct_pciio_bdf_cfgreg_sz);
+ _(PCI_IOC_BUSINFO, WRITE, struct_pciio_businfo_sz);
+ _(PCI_IOC_DRVNAME, READWRITE, struct_pciio_drvname_sz);
+ _(PCI_IOC_DRVNAMEONBUS, READWRITE, struct_pciio_drvnameonbus_sz);
+ /* Entries from file: dev/pci/tweio.h */
+ _(TWEIO_COMMAND, READWRITE, struct_twe_usercommand_sz);
+ _(TWEIO_STATS, READWRITE, union_twe_statrequest_sz);
+ _(TWEIO_AEN_POLL, WRITE, sizeof(int));
+ _(TWEIO_AEN_WAIT, WRITE, sizeof(int));
+ _(TWEIO_SET_PARAM, READ, struct_twe_paramcommand_sz);
+ _(TWEIO_GET_PARAM, READ, struct_twe_paramcommand_sz);
+ _(TWEIO_RESET, NONE, 0);
+ _(TWEIO_ADD_UNIT, READ, struct_twe_drivecommand_sz);
+ _(TWEIO_DEL_UNIT, READ, struct_twe_drivecommand_sz);
+ /* Entries from file: dev/pcmcia/if_cnwioctl.h */
+ _(SIOCSCNWDOMAIN, READ, struct_ifreq_sz);
+ _(SIOCGCNWDOMAIN, READWRITE, struct_ifreq_sz);
+ _(SIOCSCNWKEY, READWRITE, struct_ifreq_sz);
+ _(SIOCGCNWSTATUS, READWRITE, struct_cnwstatus_sz);
+ _(SIOCGCNWSTATS, READWRITE, struct_cnwistats_sz);
+ _(SIOCGCNWTRAIL, READWRITE, struct_cnwitrail_sz);
+ /* Entries from file: dev/pcmcia/if_rayreg.h */
+ _(SIOCGRAYSIGLEV, READWRITE, struct_ifreq_sz);
+ /* Entries from file: dev/raidframe/raidframeio.h */
+ _(RAIDFRAME_SHUTDOWN, NONE, 0);
+ _(RAIDFRAME_TUR, READ, sizeof(u64));
+ _(RAIDFRAME_FAIL_DISK, READ, struct_rf_recon_req_sz);
+ _(RAIDFRAME_CHECK_RECON_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_REWRITEPARITY, NONE, 0);
+ _(RAIDFRAME_COPYBACK, NONE, 0);
+ _(RAIDFRAME_SPARET_WAIT, WRITE, struct_RF_SparetWait_sz);
+ _(RAIDFRAME_SEND_SPARET, READ, sizeof(uptr));
+ _(RAIDFRAME_ABORT_SPARET_WAIT, NONE, 0);
+ _(RAIDFRAME_START_ATRACE, NONE, 0);
+ _(RAIDFRAME_STOP_ATRACE, NONE, 0);
+ _(RAIDFRAME_GET_SIZE, WRITE, sizeof(int));
+ _(RAIDFRAME_RESET_ACCTOTALS, NONE, 0);
+ _(RAIDFRAME_KEEP_ACCTOTALS, READ, sizeof(int));
+ _(RAIDFRAME_GET_COMPONENT_LABEL, READWRITE, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_SET_COMPONENT_LABEL, READ, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_INIT_LABELS, READ, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_ADD_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_REMOVE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_REBUILD_IN_PLACE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_CHECK_PARITY, READWRITE, sizeof(int));
+ _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_CHECK_COPYBACK_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_SET_AUTOCONFIG, READWRITE, sizeof(int));
+ _(RAIDFRAME_SET_ROOT, READWRITE, sizeof(int));
+ _(RAIDFRAME_DELETE_COMPONENT, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_INCORPORATE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_CHECK_RECON_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT, READWRITE,
+ struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_CHECK_COPYBACK_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_PARITYMAP_STATUS, WRITE, struct_rf_pmstat_sz);
+ _(RAIDFRAME_PARITYMAP_GET_DISABLE, WRITE, sizeof(int));
+ _(RAIDFRAME_PARITYMAP_SET_DISABLE, READ, sizeof(int));
+ _(RAIDFRAME_PARITYMAP_SET_PARAMS, READ, struct_rf_pmparams_sz);
+ _(RAIDFRAME_SET_LAST_UNIT, READ, sizeof(int));
+ _(RAIDFRAME_GET_INFO, READWRITE, sizeof(uptr));
+ _(RAIDFRAME_CONFIGURE, READ, sizeof(uptr));
+ /* Entries from file: dev/sbus/mbppio.h */
+ _(MBPPIOCSPARAM, READ, struct_mbpp_param_sz);
+ _(MBPPIOCGPARAM, WRITE, struct_mbpp_param_sz);
+ _(MBPPIOCGSTAT, WRITE, sizeof(int));
+ /* Entries from file: dev/scsipi/ses.h */
+ _(SESIOC_GETNOBJ, NONE, 0);
+ _(SESIOC_GETOBJMAP, NONE, 0);
+ _(SESIOC_GETENCSTAT, NONE, 0);
+ _(SESIOC_SETENCSTAT, NONE, 0);
+ _(SESIOC_GETOBJSTAT, NONE, 0);
+ _(SESIOC_SETOBJSTAT, NONE, 0);
+ _(SESIOC_GETTEXT, NONE, 0);
+ _(SESIOC_INIT, NONE, 0);
+ /* Entries from file: dev/sun/disklabel.h */
+ _(SUN_DKIOCGGEOM, WRITE, struct_sun_dkgeom_sz);
+ _(SUN_DKIOCINFO, WRITE, struct_sun_dkctlr_sz);
+ _(SUN_DKIOCGPART, WRITE, struct_sun_dkpart_sz);
+ /* Entries from file: dev/sun/fbio.h */
+ _(FBIOGTYPE, WRITE, struct_fbtype_sz);
+ _(FBIOPUTCMAP, READ, struct_fbcmap_sz);
+ _(FBIOGETCMAP, READ, struct_fbcmap_sz);
+ _(FBIOGATTR, WRITE, struct_fbgattr_sz);
+ _(FBIOSVIDEO, READ, sizeof(int));
+ _(FBIOGVIDEO, WRITE, sizeof(int));
+ _(FBIOSCURSOR, READ, struct_fbcursor_sz);
+ _(FBIOGCURSOR, READWRITE, struct_fbcursor_sz);
+ _(FBIOSCURPOS, READ, struct_fbcurpos_sz);
+ _(FBIOGCURPOS, READ, struct_fbcurpos_sz);
+ _(FBIOGCURMAX, WRITE, struct_fbcurpos_sz);
+ /* Entries from file: dev/sun/kbio.h */
+ _(KIOCTRANS, READ, sizeof(int));
+ _(KIOCSETKEY, READWRITE, struct_okiockey_sz);
+ _(KIOCGETKEY, READWRITE, struct_okiockey_sz);
+ _(KIOCGTRANS, WRITE, sizeof(int));
+ _(KIOCCMD, READ, sizeof(int));
+ _(KIOCTYPE, WRITE, sizeof(int));
+ _(KIOCSDIRECT, READ, sizeof(int));
+ _(KIOCSKEY, READ, struct_kiockeymap_sz);
+ _(KIOCGKEY, READWRITE, struct_kiockeymap_sz);
+ _(KIOCSLED, READ, sizeof(char));
+ _(KIOCGLED, WRITE, sizeof(char));
+ _(KIOCLAYOUT, WRITE, sizeof(int));
+ /* Entries from file: dev/sun/vuid_event.h */
+ _(VUIDSFORMAT, READ, sizeof(int));
+ _(VUIDGFORMAT, WRITE, sizeof(int));
+ /* Entries from file: dev/tc/sticio.h */
+ _(STICIO_GXINFO, WRITE, struct_stic_xinfo_sz);
+ _(STICIO_RESET, NONE, 0);
+ _(STICIO_STARTQ, NONE, 0);
+ _(STICIO_STOPQ, NONE, 0);
+ /* Entries from file: dev/usb/ukyopon.h */
+ _(UKYOPON_IDENTIFY, WRITE, struct_ukyopon_identify_sz);
+ /* Entries from file: dev/usb/usb.h */
+ _(USB_REQUEST, READWRITE, struct_usb_ctl_request_sz);
+ _(USB_SETDEBUG, READ, sizeof(int));
+ _(USB_DISCOVER, NONE, 0);
+ _(USB_DEVICEINFO, READWRITE, struct_usb_device_info_sz);
+ _(USB_DEVICEINFO_OLD, READWRITE, struct_usb_device_info_old_sz);
+ _(USB_DEVICESTATS, WRITE, struct_usb_device_stats_sz);
+ _(USB_GET_REPORT_DESC, WRITE, struct_usb_ctl_report_desc_sz);
+ _(USB_SET_IMMED, READ, sizeof(int));
+ _(USB_GET_REPORT, READWRITE, struct_usb_ctl_report_sz);
+ _(USB_SET_REPORT, READ, struct_usb_ctl_report_sz);
+ _(USB_GET_REPORT_ID, WRITE, sizeof(int));
+ _(USB_GET_CONFIG, WRITE, sizeof(int));
+ _(USB_SET_CONFIG, READ, sizeof(int));
+ _(USB_GET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_SET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_GET_NO_ALT, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_GET_DEVICE_DESC, WRITE, struct_usb_device_descriptor_sz);
+ _(USB_GET_CONFIG_DESC, READWRITE, struct_usb_config_desc_sz);
+ _(USB_GET_INTERFACE_DESC, READWRITE, struct_usb_interface_desc_sz);
+ _(USB_GET_ENDPOINT_DESC, READWRITE, struct_usb_endpoint_desc_sz);
+ _(USB_GET_FULL_DESC, READWRITE, struct_usb_full_desc_sz);
+ _(USB_GET_STRING_DESC, READWRITE, struct_usb_string_desc_sz);
+ _(USB_DO_REQUEST, READWRITE, struct_usb_ctl_request_sz);
+ _(USB_GET_DEVICEINFO, WRITE, struct_usb_device_info_sz);
+ _(USB_GET_DEVICEINFO_OLD, WRITE, struct_usb_device_info_old_sz);
+ _(USB_SET_SHORT_XFER, READ, sizeof(int));
+ _(USB_SET_TIMEOUT, READ, sizeof(int));
+ _(USB_SET_BULK_RA, READ, sizeof(int));
+ _(USB_SET_BULK_WB, READ, sizeof(int));
+ _(USB_SET_BULK_RA_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);
+ _(USB_SET_BULK_WB_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);
+ _(USB_GET_CM_OVER_DATA, WRITE, sizeof(int));
+ _(USB_SET_CM_OVER_DATA, READ, sizeof(int));
+ /* Entries from file: dev/usb/utoppy.h */
+ _(UTOPPYIOTURBO, READ, sizeof(int));
+ _(UTOPPYIOREBOOT, NONE, 0);
+ _(UTOPPYIOSTATS, WRITE, struct_utoppy_stats_sz);
+ _(UTOPPYIORENAME, READ, struct_utoppy_rename_sz);
+ _(UTOPPYIOMKDIR, READ, sizeof(uptr));
+ _(UTOPPYIODELETE, READ, sizeof(uptr));
+ _(UTOPPYIOREADDIR, READ, sizeof(uptr));
+ _(UTOPPYIOREADFILE, READ, struct_utoppy_readfile_sz);
+ _(UTOPPYIOWRITEFILE, READ, struct_utoppy_writefile_sz);
+ /* Entries from file: dev/vme/xio.h */
+ _(DIOSXDCMD, READWRITE, struct_xd_iocmd_sz);
+ /* Entries from file: dev/wscons/wsdisplay_usl_io.h */
+ _(VT_OPENQRY, WRITE, sizeof(int));
+ _(VT_SETMODE, READ, struct_vt_mode_sz);
+ _(VT_GETMODE, WRITE, struct_vt_mode_sz);
+ _(VT_RELDISP, NONE, 0);
+ _(VT_ACTIVATE, NONE, 0);
+ _(VT_WAITACTIVE, NONE, 0);
+ _(VT_GETACTIVE, WRITE, sizeof(int));
+ _(VT_GETSTATE, WRITE, struct_vt_stat_sz);
+ _(KDGETKBENT, READWRITE, struct_kbentry_sz);
+ _(KDGKBMODE, WRITE, sizeof(int));
+ _(KDSKBMODE, NONE, 0);
+ _(KDMKTONE, NONE, 0);
+ _(KDSETMODE, NONE, 0);
+ _(KDENABIO, NONE, 0);
+ _(KDDISABIO, NONE, 0);
+ _(KDGKBTYPE, WRITE, sizeof(char));
+ _(KDGETLED, WRITE, sizeof(int));
+ _(KDSETLED, NONE, 0);
+ _(KDSETRAD, NONE, 0);
+ _(VGAPCVTID, READWRITE, struct_pcvtid_sz);
+ _(CONS_GETVERS, WRITE, sizeof(int));
+ /* Entries from file: dev/wscons/wsconsio.h */
+ _(WSKBDIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSKBDIO_BELL, NONE, 0);
+ _(WSKBDIO_COMPLEXBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_GETBELL, WRITE, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETDEFAULTBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_GETDEFAULTBELL, WRITE, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_GETKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_SETDEFAULTKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_GETDEFAULTKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_SETLEDS, READ, sizeof(int));
+ _(WSKBDIO_GETLEDS, WRITE, sizeof(int));
+ _(WSKBDIO_GETMAP, READWRITE, struct_wskbd_map_data_sz);
+ _(WSKBDIO_SETMAP, READ, struct_wskbd_map_data_sz);
+ _(WSKBDIO_GETENCODING, WRITE, sizeof(int));
+ _(WSKBDIO_SETENCODING, READ, sizeof(int));
+ _(WSKBDIO_SETMODE, READ, sizeof(int));
+ _(WSKBDIO_GETMODE, WRITE, sizeof(int));
+ _(WSKBDIO_SETKEYCLICK, READ, sizeof(int));
+ _(WSKBDIO_GETKEYCLICK, WRITE, sizeof(int));
+ _(WSKBDIO_GETSCROLL, WRITE, struct_wskbd_scroll_data_sz);
+ _(WSKBDIO_SETSCROLL, READ, struct_wskbd_scroll_data_sz);
+ _(WSKBDIO_SETVERSION, READ, sizeof(int));
+ _(WSMOUSEIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSMOUSEIO_SRES, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SSCALE, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SRATE, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SCALIBCOORDS, READ, struct_wsmouse_calibcoords_sz);
+ _(WSMOUSEIO_GCALIBCOORDS, WRITE, struct_wsmouse_calibcoords_sz);
+ _(WSMOUSEIO_GETID, READWRITE, struct_wsmouse_id_sz);
+ _(WSMOUSEIO_GETREPEAT, WRITE, struct_wsmouse_repeat_sz);
+ _(WSMOUSEIO_SETREPEAT, READ, struct_wsmouse_repeat_sz);
+ _(WSMOUSEIO_SETVERSION, READ, sizeof(int));
+ _(WSDISPLAYIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_GINFO, WRITE, struct_wsdisplay_fbinfo_sz);
+ _(WSDISPLAYIO_GETCMAP, READ, struct_wsdisplay_cmap_sz);
+ _(WSDISPLAYIO_PUTCMAP, READ, struct_wsdisplay_cmap_sz);
+ _(WSDISPLAYIO_GVIDEO, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SVIDEO, READ, sizeof(unsigned int));
+ _(WSDISPLAYIO_GCURPOS, WRITE, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_SCURPOS, READ, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_GCURMAX, WRITE, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_GCURSOR, READWRITE, struct_wsdisplay_cursor_sz);
+ _(WSDISPLAYIO_SCURSOR, READ, struct_wsdisplay_cursor_sz);
+ _(WSDISPLAYIO_GMODE, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SMODE, READ, sizeof(unsigned int));
+ _(WSDISPLAYIO_LDFONT, READ, struct_wsdisplay_font_sz);
+ _(WSDISPLAYIO_ADDSCREEN, READ, struct_wsdisplay_addscreendata_sz);
+ _(WSDISPLAYIO_DELSCREEN, READ, struct_wsdisplay_delscreendata_sz);
+ _(WSDISPLAYIO_SFONT, READ, struct_wsdisplay_usefontdata_sz);
+ _(_O_WSDISPLAYIO_SETKEYBOARD, READWRITE, struct_wsdisplay_kbddata_sz);
+ _(WSDISPLAYIO_GETPARAM, READWRITE, struct_wsdisplay_param_sz);
+ _(WSDISPLAYIO_SETPARAM, READWRITE, struct_wsdisplay_param_sz);
+ _(WSDISPLAYIO_GETACTIVESCREEN, WRITE, sizeof(int));
+ _(WSDISPLAYIO_GETWSCHAR, READWRITE, struct_wsdisplay_char_sz);
+ _(WSDISPLAYIO_PUTWSCHAR, READWRITE, struct_wsdisplay_char_sz);
+ _(WSDISPLAYIO_DGSCROLL, WRITE, struct_wsdisplay_scroll_data_sz);
+ _(WSDISPLAYIO_DSSCROLL, READ, struct_wsdisplay_scroll_data_sz);
+ _(WSDISPLAYIO_GMSGATTRS, WRITE, struct_wsdisplay_msgattrs_sz);
+ _(WSDISPLAYIO_SMSGATTRS, READ, struct_wsdisplay_msgattrs_sz);
+ _(WSDISPLAYIO_GBORDER, WRITE, sizeof(int));
+ _(WSDISPLAYIO_SBORDER, READ, sizeof(int));
+ _(WSDISPLAYIO_SSPLASH, READ, sizeof(int));
+ _(WSDISPLAYIO_SPROGRESS, READ, sizeof(int));
+ _(WSDISPLAYIO_LINEBYTES, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SETVERSION, READ, sizeof(int));
+ _(WSMUXIO_ADD_DEVICE, READ, struct_wsmux_device_sz);
+ _(WSMUXIO_REMOVE_DEVICE, READ, struct_wsmux_device_sz);
+ _(WSMUXIO_LIST_DEVICES, READWRITE, struct_wsmux_device_list_sz);
+ _(WSMUXIO_INJECTEVENT, READ, struct_wscons_event_sz);
+ _(WSDISPLAYIO_GET_BUSID, WRITE, struct_wsdisplayio_bus_id_sz);
+ _(WSDISPLAYIO_GET_EDID, READWRITE, struct_wsdisplayio_edid_info_sz);
+ _(WSDISPLAYIO_SET_POLLING, READ, sizeof(int));
+ _(WSDISPLAYIO_GET_FBINFO, READWRITE, struct_wsdisplayio_fbinfo_sz);
+ _(WSDISPLAYIO_DOBLIT, READWRITE, struct_wsdisplayio_blit_sz);
+ _(WSDISPLAYIO_WAITBLIT, READWRITE, struct_wsdisplayio_blit_sz);
+ /* Entries from file: dev/biovar.h */
+ _(BIOCLOCATE, READWRITE, struct_bio_locate_sz);
+ _(BIOCINQ, READWRITE, struct_bioc_inq_sz);
+ _(BIOCDISK_NOVOL, READWRITE, struct_bioc_disk_sz);
+ _(BIOCDISK, READWRITE, struct_bioc_disk_sz);
+ _(BIOCVOL, READWRITE, struct_bioc_vol_sz);
+ _(BIOCALARM, READWRITE, struct_bioc_alarm_sz);
+ _(BIOCBLINK, READWRITE, struct_bioc_blink_sz);
+ _(BIOCSETSTATE, READWRITE, struct_bioc_setstate_sz);
+ _(BIOCVOLOPS, READWRITE, struct_bioc_volops_sz);
+ /* Entries from file: dev/md.h */
+ _(MD_GETCONF, WRITE, struct_md_conf_sz);
+ _(MD_SETCONF, READ, struct_md_conf_sz);
+ /* Entries from file: dev/ccdvar.h */
+ _(CCDIOCSET, READWRITE, struct_ccd_ioctl_sz);
+ _(CCDIOCCLR, READ, struct_ccd_ioctl_sz);
+ /* Entries from file: dev/cgdvar.h */
+ _(CGDIOCSET, READWRITE, struct_cgd_ioctl_sz);
+ _(CGDIOCCLR, READ, struct_cgd_ioctl_sz);
+ _(CGDIOCGET, READWRITE, struct_cgd_user_sz);
+ /* Entries from file: dev/fssvar.h */
+ _(FSSIOCSET, READ, struct_fss_set_sz);
+ _(FSSIOCGET, WRITE, struct_fss_get_sz);
+ _(FSSIOCCLR, NONE, 0);
+ _(FSSIOFSET, READ, sizeof(int));
+ _(FSSIOFGET, WRITE, sizeof(int));
+ /* Entries from file: dev/bluetooth/btdev.h */
+ _(BTDEV_ATTACH, READ, struct_plistref_sz);
+ _(BTDEV_DETACH, READ, struct_plistref_sz);
+ /* Entries from file: dev/bluetooth/btsco.h */
+ _(BTSCO_GETINFO, WRITE, struct_btsco_info_sz);
+ /* Entries from file: dev/kttcpio.h */
+ _(KTTCP_IO_SEND, READWRITE, struct_kttcp_io_args_sz);
+ _(KTTCP_IO_RECV, READWRITE, struct_kttcp_io_args_sz);
+ /* Entries from file: dev/lockstat.h */
+ _(IOC_LOCKSTAT_GVERSION, WRITE, sizeof(int));
+ _(IOC_LOCKSTAT_ENABLE, READ, struct_lsenable_sz);
+ _(IOC_LOCKSTAT_DISABLE, WRITE, struct_lsdisable_sz);
+ /* Entries from file: dev/vndvar.h */
+ _(VNDIOCSET, READWRITE, struct_vnd_ioctl_sz);
+ _(VNDIOCCLR, READ, struct_vnd_ioctl_sz);
+ _(VNDIOCGET, READWRITE, struct_vnd_user_sz);
+ /* Entries from file: dev/spkrio.h */
+ _(SPKRTONE, READ, struct_tone_sz);
+ _(SPKRTUNE, NONE, 0);
+ _(SPKRGETVOL, WRITE, sizeof(unsigned int));
+ _(SPKRSETVOL, READ, sizeof(unsigned int));
+#if defined(__x86_64__)
+ /* Entries from file: dev/nvmm/nvmm_ioctl.h */
+ _(NVMM_IOC_CAPABILITY, WRITE, struct_nvmm_ioc_capability_sz);
+ _(NVMM_IOC_MACHINE_CREATE, READWRITE, struct_nvmm_ioc_machine_create_sz);
+ _(NVMM_IOC_MACHINE_DESTROY, READ, struct_nvmm_ioc_machine_destroy_sz);
+ _(NVMM_IOC_MACHINE_CONFIGURE, READ, struct_nvmm_ioc_machine_configure_sz);
+ _(NVMM_IOC_VCPU_CREATE, READ, struct_nvmm_ioc_vcpu_create_sz);
+ _(NVMM_IOC_VCPU_DESTROY, READ, struct_nvmm_ioc_vcpu_destroy_sz);
+ _(NVMM_IOC_VCPU_CONFIGURE, READ, struct_nvmm_ioc_vcpu_configure_sz);
+ _(NVMM_IOC_VCPU_SETSTATE, READ, struct_nvmm_ioc_vcpu_setstate_sz);
+ _(NVMM_IOC_VCPU_GETSTATE, READ, struct_nvmm_ioc_vcpu_getstate_sz);
+ _(NVMM_IOC_VCPU_INJECT, READ, struct_nvmm_ioc_vcpu_inject_sz);
+ _(NVMM_IOC_VCPU_RUN, READWRITE, struct_nvmm_ioc_vcpu_run_sz);
+ _(NVMM_IOC_GPA_MAP, READ, struct_nvmm_ioc_gpa_map_sz);
+ _(NVMM_IOC_GPA_UNMAP, READ, struct_nvmm_ioc_gpa_unmap_sz);
+ _(NVMM_IOC_HVA_MAP, READ, struct_nvmm_ioc_hva_map_sz);
+ _(NVMM_IOC_HVA_UNMAP, READ, struct_nvmm_ioc_hva_unmap_sz);
+ _(NVMM_IOC_CTL, READ, struct_nvmm_ioc_ctl_sz);
+#endif
+ /* Entries from file: dev/spi/spi_io.h */
+ _(SPI_IOCTL_CONFIGURE, READ, struct_spi_ioctl_configure_sz);
+ _(SPI_IOCTL_TRANSFER, READ, struct_spi_ioctl_transfer_sz);
+ /* Entries from file: fs/autofs/autofs_ioctl.h */
+ _(AUTOFSREQUEST, WRITE, struct_autofs_daemon_request_sz);
+ _(AUTOFSDONE, READ, struct_autofs_daemon_done_sz);
+ /* Entries from file: net/bpf.h */
+ _(BIOCGBLEN, WRITE, sizeof(unsigned int));
+ _(BIOCSBLEN, READWRITE, sizeof(unsigned int));
+ _(BIOCSETF, READ, struct_bpf_program_sz);
+ _(BIOCFLUSH, NONE, 0);
+ _(BIOCPROMISC, NONE, 0);
+ _(BIOCGDLT, WRITE, sizeof(unsigned int));
+ _(BIOCGETIF, WRITE, struct_ifreq_sz);
+ _(BIOCSETIF, READ, struct_ifreq_sz);
+ _(BIOCGSTATS, WRITE, struct_bpf_stat_sz);
+ _(BIOCGSTATSOLD, WRITE, struct_bpf_stat_old_sz);
+ _(BIOCIMMEDIATE, READ, sizeof(unsigned int));
+ _(BIOCVERSION, WRITE, struct_bpf_version_sz);
+ _(BIOCSTCPF, READ, struct_bpf_program_sz);
+ _(BIOCSUDPF, READ, struct_bpf_program_sz);
+ _(BIOCGHDRCMPLT, WRITE, sizeof(unsigned int));
+ _(BIOCSHDRCMPLT, READ, sizeof(unsigned int));
+ _(BIOCSDLT, READ, sizeof(unsigned int));
+ _(BIOCGDLTLIST, READWRITE, struct_bpf_dltlist_sz);
+ _(BIOCGDIRECTION, WRITE, sizeof(unsigned int));
+ _(BIOCSDIRECTION, READ, sizeof(unsigned int));
+ _(BIOCSRTIMEOUT, READ, struct_timeval_sz);
+ _(BIOCGRTIMEOUT, WRITE, struct_timeval_sz);
+ _(BIOCGFEEDBACK, WRITE, sizeof(unsigned int));
+ _(BIOCSFEEDBACK, READ, sizeof(unsigned int));
+ /* Entries from file: net/if_gre.h */
+ _(GRESADDRS, READ, struct_ifreq_sz);
+ _(GRESADDRD, READ, struct_ifreq_sz);
+ _(GREGADDRS, READWRITE, struct_ifreq_sz);
+ _(GREGADDRD, READWRITE, struct_ifreq_sz);
+ _(GRESPROTO, READ, struct_ifreq_sz);
+ _(GREGPROTO, READWRITE, struct_ifreq_sz);
+ _(GRESSOCK, READ, struct_ifreq_sz);
+ _(GREDSOCK, READ, struct_ifreq_sz);
+ /* Entries from file: net/if_ppp.h */
+ _(PPPIOCGRAWIN, WRITE, struct_ppp_rawin_sz);
+ _(PPPIOCGFLAGS, WRITE, sizeof(int));
+ _(PPPIOCSFLAGS, READ, sizeof(int));
+ _(PPPIOCGASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCSASYNCMAP, READ, sizeof(int));
+ _(PPPIOCGUNIT, WRITE, sizeof(int));
+ _(PPPIOCGRASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCSRASYNCMAP, READ, sizeof(int));
+ _(PPPIOCGMRU, WRITE, sizeof(int));
+ _(PPPIOCSMRU, READ, sizeof(int));
+ _(PPPIOCSMAXCID, READ, sizeof(int));
+ _(PPPIOCGXASYNCMAP, WRITE, (8 * sizeof(u32)));
+ _(PPPIOCSXASYNCMAP, READ, (8 * sizeof(u32)));
+ _(PPPIOCXFERUNIT, NONE, 0);
+ _(PPPIOCSCOMPRESS, READ, struct_ppp_option_data_sz);
+ _(PPPIOCGNPMODE, READWRITE, struct_npioctl_sz);
+ _(PPPIOCSNPMODE, READ, struct_npioctl_sz);
+ _(PPPIOCGIDLE, WRITE, struct_ppp_idle_sz);
+ _(PPPIOCGMTU, WRITE, sizeof(int));
+ _(PPPIOCSMTU, READ, sizeof(int));
+ _(SIOCGPPPSTATS, READWRITE, struct_ifpppstatsreq_sz);
+ _(SIOCGPPPCSTATS, READWRITE, struct_ifpppcstatsreq_sz);
+ /* Entries from file: net/npf.h */
+ _(IOC_NPF_VERSION, WRITE, sizeof(int));
+ _(IOC_NPF_SWITCH, READ, sizeof(int));
+ _(IOC_NPF_LOAD, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_TABLE, READ, struct_npf_ioctl_table_sz);
+ _(IOC_NPF_STATS, READ, sizeof(uptr));
+ _(IOC_NPF_SAVE, WRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_RULE, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_CONN_LOOKUP, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_TABLE_REPLACE, READWRITE, struct_nvlist_ref_sz);
+ /* Entries from file: net/if_pppoe.h */
+ _(PPPOESETPARMS, READ, struct_pppoediscparms_sz);
+ _(PPPOEGETPARMS, READWRITE, struct_pppoediscparms_sz);
+ _(PPPOEGETSESSION, READWRITE, struct_pppoeconnectionstate_sz);
+ /* Entries from file: net/if_sppp.h */
+ _(SPPPGETAUTHCFG, READWRITE, struct_spppauthcfg_sz);
+ _(SPPPSETAUTHCFG, READ, struct_spppauthcfg_sz);
+ _(SPPPGETLCPCFG, READWRITE, struct_sppplcpcfg_sz);
+ _(SPPPSETLCPCFG, READ, struct_sppplcpcfg_sz);
+ _(SPPPGETSTATUS, READWRITE, struct_spppstatus_sz);
+ _(SPPPGETSTATUSNCP, READWRITE, struct_spppstatusncp_sz);
+ _(SPPPGETIDLETO, READWRITE, struct_spppidletimeout_sz);
+ _(SPPPSETIDLETO, READ, struct_spppidletimeout_sz);
+ _(SPPPGETAUTHFAILURES, READWRITE, struct_spppauthfailurestats_sz);
+ _(SPPPSETAUTHFAILURE, READ, struct_spppauthfailuresettings_sz);
+ _(SPPPSETDNSOPTS, READ, struct_spppdnssettings_sz);
+ _(SPPPGETDNSOPTS, READWRITE, struct_spppdnssettings_sz);
+ _(SPPPGETDNSADDRS, READWRITE, struct_spppdnsaddrs_sz);
+ _(SPPPSETKEEPALIVE, READ, struct_spppkeepalivesettings_sz);
+ _(SPPPGETKEEPALIVE, READWRITE, struct_spppkeepalivesettings_sz);
+ /* Entries from file: net/if_srt.h */
+ _(SRT_GETNRT, WRITE, sizeof(unsigned int));
+ _(SRT_GETRT, READWRITE, struct_srt_rt_sz);
+ _(SRT_SETRT, READ, struct_srt_rt_sz);
+ _(SRT_DELRT, READ, sizeof(unsigned int));
+ _(SRT_SFLAGS, READ, sizeof(unsigned int));
+ _(SRT_GFLAGS, WRITE, sizeof(unsigned int));
+ _(SRT_SGFLAGS, READWRITE, sizeof(unsigned int));
+ _(SRT_DEBUG, READ, sizeof(uptr));
+ /* Entries from file: net/if_tap.h */
+ _(TAPGIFNAME, WRITE, struct_ifreq_sz);
+ /* Entries from file: net/if_tun.h */
+ _(TUNSDEBUG, READ, sizeof(int));
+ _(TUNGDEBUG, WRITE, sizeof(int));
+ _(TUNSIFMODE, READ, sizeof(int));
+ _(TUNSIFHEAD, READ, sizeof(int));
+ _(TUNGIFHEAD, WRITE, sizeof(int));
+ /* Entries from file: net/pfvar.h */
+ _(DIOCSTART, NONE, 0);
+ _(DIOCSTOP, NONE, 0);
+ _(DIOCADDRULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCGETRULES, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCGETRULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCSETLCK, READWRITE, sizeof(u32));
+ _(DIOCCLRSTATES, READWRITE, struct_pfioc_state_kill_sz);
+ _(DIOCGETSTATE, READWRITE, struct_pfioc_state_sz);
+ _(DIOCSETSTATUSIF, READWRITE, struct_pfioc_if_sz);
+ _(DIOCGETSTATUS, READWRITE, struct_pf_status_sz);
+ _(DIOCCLRSTATUS, NONE, 0);
+ _(DIOCNATLOOK, READWRITE, struct_pfioc_natlook_sz);
+ _(DIOCSETDEBUG, READWRITE, sizeof(u32));
+ _(DIOCGETSTATES, READWRITE, struct_pfioc_states_sz);
+ _(DIOCCHANGERULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCSETTIMEOUT, READWRITE, struct_pfioc_tm_sz);
+ _(DIOCGETTIMEOUT, READWRITE, struct_pfioc_tm_sz);
+ _(DIOCADDSTATE, READWRITE, struct_pfioc_state_sz);
+ _(DIOCCLRRULECTRS, NONE, 0);
+ _(DIOCGETLIMIT, READWRITE, struct_pfioc_limit_sz);
+ _(DIOCSETLIMIT, READWRITE, struct_pfioc_limit_sz);
+ _(DIOCKILLSTATES, READWRITE, struct_pfioc_state_kill_sz);
+ _(DIOCSTARTALTQ, NONE, 0);
+ _(DIOCSTOPALTQ, NONE, 0);
+ _(DIOCADDALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETALTQS, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCCHANGEALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETQSTATS, READWRITE, struct_pfioc_qstats_sz);
+ _(DIOCBEGINADDRS, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCADDADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCGETADDRS, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCGETADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCCHANGEADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCADDSTATES, READWRITE, struct_pfioc_states_sz);
+ _(DIOCGETRULESETS, READWRITE, struct_pfioc_ruleset_sz);
+ _(DIOCGETRULESET, READWRITE, struct_pfioc_ruleset_sz);
+ _(DIOCRCLRTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRADDTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRDELTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETTSTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRTSTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRADDADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRDELADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRSETADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETASTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRASTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRTSTADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRSETTFLAGS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRINADEFINE, READWRITE, struct_pfioc_table_sz);
+ _(DIOCOSFPFLUSH, NONE, 0);
+ _(DIOCOSFPADD, READWRITE, struct_pf_osfp_ioctl_sz);
+ _(DIOCOSFPGET, READWRITE, struct_pf_osfp_ioctl_sz);
+ _(DIOCXBEGIN, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCXCOMMIT, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCXROLLBACK, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCGETSRCNODES, READWRITE, struct_pfioc_src_nodes_sz);
+ _(DIOCCLRSRCNODES, NONE, 0);
+ _(DIOCSETHOSTID, READWRITE, sizeof(u32));
+ _(DIOCIGETIFACES, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCSETIFFLAG, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCCLRIFFLAG, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCKILLSRCNODES, READWRITE, struct_pfioc_src_node_kill_sz);
+ /* Entries from file: netbt/hci.h */
+ _(SIOCGBTINFO, READWRITE, struct_btreq_sz);
+ _(SIOCGBTINFOA, READWRITE, struct_btreq_sz);
+ _(SIOCNBTINFO, READWRITE, struct_btreq_sz);
+ _(SIOCSBTFLAGS, READWRITE, struct_btreq_sz);
+ _(SIOCSBTPOLICY, READWRITE, struct_btreq_sz);
+ _(SIOCSBTPTYPE, READWRITE, struct_btreq_sz);
+ _(SIOCGBTSTATS, READWRITE, struct_btreq_sz);
+ _(SIOCZBTSTATS, READWRITE, struct_btreq_sz);
+ _(SIOCBTDUMP, READ, struct_btreq_sz);
+ _(SIOCSBTSCOMTU, READWRITE, struct_btreq_sz);
+ _(SIOCGBTFEAT, READWRITE, struct_btreq_sz);
+ /* Entries from file: netinet/ip_nat.h */
+ _(SIOCADNAT, READ, struct_ipfobj_sz);
+ _(SIOCRMNAT, READ, struct_ipfobj_sz);
+ _(SIOCGNATS, READWRITE, struct_ipfobj_sz);
+ _(SIOCGNATL, READWRITE, struct_ipfobj_sz);
+ _(SIOCPURGENAT, READWRITE, struct_ipfobj_sz);
+ /* Entries from file: netinet/sctp_uio.h */
+ _(SIOCCONNECTX, READWRITE, struct_sctp_connectx_addrs_sz);
+ _(SIOCCONNECTXDEL, READWRITE, struct_sctp_connectx_addrs_sz);
+ /* Entries from file: netinet6/in6_var.h */
+ _(SIOCSIFINFO_FLAGS, READWRITE, struct_in6_ndireq_sz);
+ _(SIOCAADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);
+ _(SIOCDADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);
+ /* Entries from file: netsmb/smb_dev.h */
+ _(SMBIOC_OPENSESSION, READ, struct_smbioc_ossn_sz);
+ _(SMBIOC_OPENSHARE, READ, struct_smbioc_oshare_sz);
+ _(SMBIOC_REQUEST, READWRITE, struct_smbioc_rq_sz);
+ _(SMBIOC_SETFLAGS, READ, struct_smbioc_flags_sz);
+ _(SMBIOC_LOOKUP, READ, struct_smbioc_lookup_sz);
+ _(SMBIOC_READ, READWRITE, struct_smbioc_rw_sz);
+ _(SMBIOC_WRITE, READWRITE, struct_smbioc_rw_sz);
+ /* Entries from file: sys/agpio.h */
+ _(AGPIOC_INFO, WRITE, struct__agp_info_sz);
+ _(AGPIOC_ACQUIRE, NONE, 0);
+ _(AGPIOC_RELEASE, NONE, 0);
+ _(AGPIOC_SETUP, READ, struct__agp_setup_sz);
+ _(AGPIOC_ALLOCATE, READWRITE, struct__agp_allocate_sz);
+ _(AGPIOC_DEALLOCATE, READ, sizeof(int));
+ _(AGPIOC_BIND, READ, struct__agp_bind_sz);
+ _(AGPIOC_UNBIND, READ, struct__agp_unbind_sz);
+ /* Entries from file: sys/audioio.h */
+ _(AUDIO_GETINFO, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETINFO, READWRITE, struct_audio_info_sz);
+ _(AUDIO_DRAIN, NONE, 0);
+ _(AUDIO_FLUSH, NONE, 0);
+ _(AUDIO_WSEEK, WRITE, sizeof(unsigned long));
+ _(AUDIO_RERROR, WRITE, sizeof(int));
+ _(AUDIO_GETDEV, WRITE, struct_audio_device_sz);
+ _(AUDIO_GETENC, READWRITE, struct_audio_encoding_sz);
+ _(AUDIO_GETFD, WRITE, sizeof(int));
+ _(AUDIO_SETFD, READWRITE, sizeof(int));
+ _(AUDIO_PERROR, WRITE, sizeof(int));
+ _(AUDIO_GETIOFFS, WRITE, struct_audio_offset_sz);
+ _(AUDIO_GETOOFFS, WRITE, struct_audio_offset_sz);
+ _(AUDIO_GETPROPS, WRITE, sizeof(int));
+ _(AUDIO_GETBUFINFO, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETCHAN, READ, sizeof(int));
+ _(AUDIO_GETCHAN, WRITE, sizeof(int));
+ _(AUDIO_QUERYFORMAT, READWRITE, struct_audio_format_query_sz);
+ _(AUDIO_GETFORMAT, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETFORMAT, READ, struct_audio_info_sz);
+ _(AUDIO_MIXER_READ, READWRITE, struct_mixer_ctrl_sz);
+ _(AUDIO_MIXER_WRITE, READWRITE, struct_mixer_ctrl_sz);
+ _(AUDIO_MIXER_DEVINFO, READWRITE, struct_mixer_devinfo_sz);
+ /* Entries from file: sys/ataio.h */
+ _(ATAIOCCOMMAND, READWRITE, struct_atareq_sz);
+ _(ATABUSIOSCAN, READ, struct_atabusioscan_args_sz);
+ _(ATABUSIORESET, NONE, 0);
+ _(ATABUSIODETACH, READ, struct_atabusiodetach_args_sz);
+ /* Entries from file: sys/cdio.h */
+ _(CDIOCPLAYTRACKS, READ, struct_ioc_play_track_sz);
+ _(CDIOCPLAYBLOCKS, READ, struct_ioc_play_blocks_sz);
+ _(CDIOCREADSUBCHANNEL, READWRITE, struct_ioc_read_subchannel_sz);
+ _(CDIOREADTOCHEADER, WRITE, struct_ioc_toc_header_sz);
+ _(CDIOREADTOCENTRIES, READWRITE, struct_ioc_read_toc_entry_sz);
+ _(CDIOREADMSADDR, READWRITE, sizeof(int));
+ _(CDIOCSETPATCH, READ, struct_ioc_patch_sz);
+ _(CDIOCGETVOL, WRITE, struct_ioc_vol_sz);
+ _(CDIOCSETVOL, READ, struct_ioc_vol_sz);
+ _(CDIOCSETMONO, NONE, 0);
+ _(CDIOCSETSTEREO, NONE, 0);
+ _(CDIOCSETMUTE, NONE, 0);
+ _(CDIOCSETLEFT, NONE, 0);
+ _(CDIOCSETRIGHT, NONE, 0);
+ _(CDIOCSETDEBUG, NONE, 0);
+ _(CDIOCCLRDEBUG, NONE, 0);
+ _(CDIOCPAUSE, NONE, 0);
+ _(CDIOCRESUME, NONE, 0);
+ _(CDIOCRESET, NONE, 0);
+ _(CDIOCSTART, NONE, 0);
+ _(CDIOCSTOP, NONE, 0);
+ _(CDIOCEJECT, NONE, 0);
+ _(CDIOCALLOW, NONE, 0);
+ _(CDIOCPREVENT, NONE, 0);
+ _(CDIOCCLOSE, NONE, 0);
+ _(CDIOCPLAYMSF, READ, struct_ioc_play_msf_sz);
+ _(CDIOCLOADUNLOAD, READ, struct_ioc_load_unload_sz);
+ /* Entries from file: sys/chio.h */
+ _(CHIOMOVE, READ, struct_changer_move_request_sz);
+ _(CHIOEXCHANGE, READ, struct_changer_exchange_request_sz);
+ _(CHIOPOSITION, READ, struct_changer_position_request_sz);
+ _(CHIOSPICKER, READ, sizeof(int));
+ _(CHIOGPARAMS, WRITE, struct_changer_params_sz);
+ _(CHIOIELEM, NONE, 0);
+ _(OCHIOGSTATUS, READ, struct_ochanger_element_status_request_sz);
+ _(CHIOGSTATUS, READ, struct_changer_element_status_request_sz);
+ _(CHIOSVOLTAG, READ, struct_changer_set_voltag_request_sz);
+ /* Entries from file: sys/clockctl.h */
+ _(CLOCKCTL_SETTIMEOFDAY, READ, struct_clockctl_settimeofday_sz);
+ _(CLOCKCTL_ADJTIME, READWRITE, struct_clockctl_adjtime_sz);
+ _(CLOCKCTL_CLOCK_SETTIME, READ, struct_clockctl_clock_settime_sz);
+ _(CLOCKCTL_NTP_ADJTIME, READWRITE, struct_clockctl_ntp_adjtime_sz);
+ /* Entries from file: sys/cpuio.h */
+ _(IOC_CPU_SETSTATE, READ, struct_cpustate_sz);
+ _(IOC_CPU_GETSTATE, READWRITE, struct_cpustate_sz);
+ _(IOC_CPU_GETCOUNT, WRITE, sizeof(int));
+ _(IOC_CPU_MAPID, READWRITE, sizeof(int));
+ _(IOC_CPU_UCODE_GET_VERSION, READWRITE, struct_cpu_ucode_version_sz);
+ _(IOC_CPU_UCODE_APPLY, READ, struct_cpu_ucode_sz);
+ /* Entries from file: sys/dkio.h */
+ _(DIOCGDINFO, WRITE, struct_disklabel_sz);
+ _(DIOCSDINFO, READ, struct_disklabel_sz);
+ _(DIOCWDINFO, READ, 0);
+ _(DIOCRFORMAT, READWRITE, struct_format_op_sz);
+ _(DIOCWFORMAT, READWRITE, struct_format_op_sz);
+ _(DIOCSSTEP, READ, sizeof(int));
+ _(DIOCSRETRIES, READ, sizeof(int));
+ _(DIOCKLABEL, READ, sizeof(int));
+ _(DIOCWLABEL, READ, sizeof(int));
+ _(DIOCSBAD, READ, struct_dkbad_sz);
+ _(DIOCEJECT, READ, sizeof(int));
+ _(ODIOCEJECT, NONE, 0);
+ _(DIOCLOCK, READ, sizeof(int));
+ _(DIOCGDEFLABEL, WRITE, struct_disklabel_sz);
+ _(DIOCCLRLABEL, NONE, 0);
+ _(DIOCGCACHE, WRITE, sizeof(int));
+ _(DIOCSCACHE, READ, sizeof(int));
+ _(DIOCCACHESYNC, READ, sizeof(int));
+ _(DIOCBSLIST, READWRITE, struct_disk_badsecinfo_sz);
+ _(DIOCBSFLUSH, NONE, 0);
+ _(DIOCAWEDGE, READWRITE, struct_dkwedge_info_sz);
+ _(DIOCGWEDGEINFO, WRITE, struct_dkwedge_info_sz);
+ _(DIOCDWEDGE, READ, struct_dkwedge_info_sz);
+ _(DIOCLWEDGES, READWRITE, struct_dkwedge_list_sz);
+ _(DIOCGSTRATEGY, WRITE, struct_disk_strategy_sz);
+ _(DIOCSSTRATEGY, READ, struct_disk_strategy_sz);
+ _(DIOCGDISKINFO, WRITE, struct_plistref_sz);
+ _(DIOCTUR, WRITE, sizeof(int));
+ _(DIOCMWEDGES, WRITE, sizeof(int));
+ _(DIOCGSECTORSIZE, WRITE, sizeof(unsigned int));
+ _(DIOCGMEDIASIZE, WRITE, sizeof(uptr));
+ _(DIOCRMWEDGES, WRITE, sizeof(int));
+ /* Entries from file: sys/drvctlio.h */
+ _(DRVDETACHDEV, READ, struct_devdetachargs_sz);
+ _(DRVRESCANBUS, READ, struct_devrescanargs_sz);
+ _(DRVCTLCOMMAND, READWRITE, struct_plistref_sz);
+ _(DRVRESUMEDEV, READ, struct_devpmargs_sz);
+ _(DRVLISTDEV, READWRITE, struct_devlistargs_sz);
+ _(DRVGETEVENT, WRITE, struct_plistref_sz);
+ _(DRVSUSPENDDEV, READ, struct_devpmargs_sz);
+ /* Entries from file: sys/dvdio.h */
+ _(DVD_READ_STRUCT, READWRITE, union_dvd_struct_sz);
+ _(DVD_WRITE_STRUCT, READWRITE, union_dvd_struct_sz);
+ _(DVD_AUTH, READWRITE, union_dvd_authinfo_sz);
+ /* Entries from file: sys/envsys.h */
+ _(ENVSYS_GETDICTIONARY, READWRITE, struct_plistref_sz);
+ _(ENVSYS_SETDICTIONARY, READWRITE, struct_plistref_sz);
+ _(ENVSYS_REMOVEPROPS, READWRITE, struct_plistref_sz);
+ _(ENVSYS_GTREDATA, READWRITE, struct_envsys_tre_data_sz);
+ _(ENVSYS_GTREINFO, READWRITE, struct_envsys_basic_info_sz);
+ /* Entries from file: sys/event.h */
+ _(KFILTER_BYFILTER, READWRITE, struct_kfilter_mapping_sz);
+ _(KFILTER_BYNAME, READWRITE, struct_kfilter_mapping_sz);
+ /* Entries from file: sys/fdio.h */
+ _(FDIOCGETOPTS, WRITE, 0);
+ _(FDIOCSETOPTS, READ, sizeof(int));
+ _(FDIOCSETFORMAT, READ, struct_fdformat_parms_sz);
+ _(FDIOCGETFORMAT, WRITE, struct_fdformat_parms_sz);
+ _(FDIOCFORMAT_TRACK, READ, struct_fdformat_cmd_sz);
+ /* Entries from file: sys/filio.h */
+ _(FIOCLEX, NONE, 0);
+ _(FIONCLEX, NONE, 0);
+ _(FIOSEEKDATA, READWRITE, sizeof(uptr));
+ _(FIOSEEKHOLE, READWRITE, sizeof(uptr));
+ _(FIONREAD, WRITE, sizeof(int));
+ _(FIONBIO, READ, sizeof(int));
+ _(FIOASYNC, READ, sizeof(int));
+ _(FIOSETOWN, READ, sizeof(int));
+ _(FIOGETOWN, WRITE, sizeof(int));
+ _(OFIOGETBMAP, READWRITE, sizeof(u32));
+ _(FIOGETBMAP, READWRITE, sizeof(u64));
+ _(FIONWRITE, WRITE, sizeof(int));
+ _(FIONSPACE, WRITE, sizeof(int));
+ /* Entries from file: sys/gpio.h */
+ _(GPIOINFO, WRITE, struct_gpio_info_sz);
+ _(GPIOSET, READWRITE, struct_gpio_set_sz);
+ _(GPIOUNSET, READWRITE, struct_gpio_set_sz);
+ _(GPIOREAD, READWRITE, struct_gpio_req_sz);
+ _(GPIOWRITE, READWRITE, struct_gpio_req_sz);
+ _(GPIOTOGGLE, READWRITE, struct_gpio_req_sz);
+ _(GPIOATTACH, READWRITE, struct_gpio_attach_sz);
+ /* Entries from file: sys/ioctl.h */
+ _(PTIOCNETBSD, READ, struct_ioctl_pt_sz);
+ _(PTIOCSUNOS, READ, struct_ioctl_pt_sz);
+ _(PTIOCLINUX, READ, struct_ioctl_pt_sz);
+ _(PTIOCFREEBSD, READ, struct_ioctl_pt_sz);
+ _(PTIOCULTRIX, READ, struct_ioctl_pt_sz);
+ /* Entries from file: sys/ioctl_compat.h */
+ _(TIOCHPCL, NONE, 0);
+ _(TIOCGETP, WRITE, struct_sgttyb_sz);
+ _(TIOCSETP, READ, struct_sgttyb_sz);
+ _(TIOCSETN, READ, 0);
+ _(TIOCSETC, READ, struct_tchars_sz);
+ _(TIOCGETC, WRITE, struct_tchars_sz);
+ _(TIOCLBIS, READ, sizeof(int));
+ _(TIOCLBIC, READ, sizeof(int));
+ _(TIOCLSET, READ, sizeof(int));
+ _(TIOCLGET, WRITE, sizeof(int));
+ _(TIOCSLTC, READ, struct_ltchars_sz);
+ _(TIOCGLTC, WRITE, struct_ltchars_sz);
+ _(OTIOCCONS, NONE, 0);
+ /* Entries from file: sys/joystick.h */
+ _(JOY_SETTIMEOUT, READ, sizeof(int));
+ _(JOY_GETTIMEOUT, WRITE, sizeof(int));
+ _(JOY_SET_X_OFFSET, READ, sizeof(int));
+ _(JOY_SET_Y_OFFSET, READ, sizeof(int));
+ _(JOY_GET_Y_OFFSET, WRITE, sizeof(int));
+ /* Entries from file: sys/ksyms.h */
+ _(OKIOCGSYMBOL, READ, struct_ksyms_ogsymbol_sz);
+ _(OKIOCGVALUE, READ, struct_ksyms_ogsymbol_sz);
+ _(KIOCGSIZE, WRITE, sizeof(int));
+ _(KIOCGVALUE, READWRITE, struct_ksyms_gvalue_sz);
+ _(KIOCGSYMBOL, READWRITE, struct_ksyms_gsymbol_sz);
+ /* Entries from file: sys/lua.h */
+ _(LUAINFO, READWRITE, struct_lua_info_sz);
+ _(LUACREATE, READWRITE, struct_lua_create_sz);
+ _(LUADESTROY, READWRITE, struct_lua_create_sz);
+ _(LUAREQUIRE, READWRITE, struct_lua_require_sz);
+ _(LUALOAD, READWRITE, struct_lua_load_sz);
+ /* Entries from file: sys/midiio.h */
+ _(MIDI_PRETIME, READWRITE, sizeof(int));
+ _(MIDI_MPUMODE, READWRITE, sizeof(int));
+ _(MIDI_MPUCMD, READWRITE, struct_mpu_command_rec_sz);
+ _(SEQUENCER_RESET, NONE, 0);
+ _(SEQUENCER_SYNC, NONE, 0);
+ _(SEQUENCER_INFO, READWRITE, struct_synth_info_sz);
+ _(SEQUENCER_CTRLRATE, READWRITE, sizeof(int));
+ _(SEQUENCER_GETOUTCOUNT, WRITE, sizeof(int));
+ _(SEQUENCER_GETINCOUNT, WRITE, sizeof(int));
+ _(SEQUENCER_RESETSAMPLES, READ, sizeof(int));
+ _(SEQUENCER_NRSYNTHS, WRITE, sizeof(int));
+ _(SEQUENCER_NRMIDIS, WRITE, sizeof(int));
+ _(SEQUENCER_THRESHOLD, READ, sizeof(int));
+ _(SEQUENCER_MEMAVL, READWRITE, sizeof(int));
+ _(SEQUENCER_PANIC, NONE, 0);
+ _(SEQUENCER_OUTOFBAND, READ, struct_seq_event_rec_sz);
+ _(SEQUENCER_GETTIME, WRITE, sizeof(int));
+ _(SEQUENCER_TMR_TIMEBASE, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_START, NONE, 0);
+ _(SEQUENCER_TMR_STOP, NONE, 0);
+ _(SEQUENCER_TMR_CONTINUE, NONE, 0);
+ _(SEQUENCER_TMR_TEMPO, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_SOURCE, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_METRONOME, READ, sizeof(int));
+ _(SEQUENCER_TMR_SELECT, READ, sizeof(int));
+ /* Entries from file: sys/mtio.h */
+ _(MTIOCTOP, READ, struct_mtop_sz);
+ _(MTIOCGET, WRITE, struct_mtget_sz);
+ _(MTIOCIEOT, NONE, 0);
+ _(MTIOCEEOT, NONE, 0);
+ _(MTIOCRDSPOS, WRITE, sizeof(u32));
+ _(MTIOCRDHPOS, WRITE, sizeof(u32));
+ _(MTIOCSLOCATE, READ, sizeof(u32));
+ _(MTIOCHLOCATE, READ, sizeof(u32));
+ /* Entries from file: sys/power.h */
+ _(POWER_EVENT_RECVDICT, READWRITE, struct_plistref_sz);
+ _(POWER_IOC_GET_TYPE, WRITE, struct_power_type_sz);
+ /* Entries from file: sys/radioio.h */
+ _(RIOCGINFO, WRITE, struct_radio_info_sz);
+ _(RIOCSINFO, READWRITE, struct_radio_info_sz);
+ _(RIOCSSRCH, READ, sizeof(int));
+ /* Entries from file: sys/rndio.h */
+ _(RNDGETENTCNT, WRITE, sizeof(u32));
+ _(RNDGETSRCNUM, READWRITE, struct_rndstat_sz);
+ _(RNDGETSRCNAME, READWRITE, struct_rndstat_name_sz);
+ _(RNDCTL, READ, struct_rndctl_sz);
+ _(RNDADDDATA, READ, struct_rnddata_sz);
+ _(RNDGETPOOLSTAT, WRITE, struct_rndpoolstat_sz);
+ _(RNDGETESTNUM, READWRITE, struct_rndstat_est_sz);
+ _(RNDGETESTNAME, READWRITE, struct_rndstat_est_name_sz);
+ /* Entries from file: sys/scanio.h */
+ _(SCIOCGET, WRITE, struct_scan_io_sz);
+ _(SCIOCSET, READ, struct_scan_io_sz);
+ _(SCIOCRESTART, NONE, 0);
+ /* Entries from file: sys/scsiio.h */
+ _(SCIOCCOMMAND, READWRITE, struct_scsireq_sz);
+ _(SCIOCDEBUG, READ, sizeof(int));
+ _(SCIOCIDENTIFY, WRITE, struct_scsi_addr_sz);
+ _(OSCIOCIDENTIFY, WRITE, struct_oscsi_addr_sz);
+ _(SCIOCDECONFIG, NONE, 0);
+ _(SCIOCRECONFIG, NONE, 0);
+ _(SCIOCRESET, NONE, 0);
+ _(SCBUSIOSCAN, READ, struct_scbusioscan_args_sz);
+ _(SCBUSIORESET, NONE, 0);
+ _(SCBUSIODETACH, READ, struct_scbusiodetach_args_sz);
+ _(SCBUSACCEL, READ, struct_scbusaccel_args_sz);
+ /* Entries from file: sys/sockio.h */
+ _(SIOCSHIWAT, READ, sizeof(int));
+ _(SIOCGHIWAT, WRITE, sizeof(int));
+ _(SIOCSLOWAT, READ, sizeof(int));
+ _(SIOCGLOWAT, WRITE, sizeof(int));
+ _(SIOCATMARK, WRITE, sizeof(int));
+ _(SIOCSPGRP, READ, sizeof(int));
+ _(SIOCGPGRP, WRITE, sizeof(int));
+ _(SIOCPEELOFF, READWRITE, sizeof(int));
+ _(SIOCADDRT, READ, struct_ortentry_sz);
+ _(SIOCDELRT, READ, struct_ortentry_sz);
+ _(SIOCSIFADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFDSTADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
+ _(SIOCGIFFLAGS, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFBRDADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFCONF, READWRITE, struct_ifconf_sz);
+ _(SIOCGIFNETMASK, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
+ _(SIOCGIFMETRIC, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
+ _(SIOCDIFADDR, READ, struct_ifreq_sz);
+ _(SIOCAIFADDR, READ, struct_ifaliasreq_sz);
+ _(SIOCGIFALIAS, READWRITE, struct_ifaliasreq_sz);
+ _(SIOCGIFAFLAG_IN, READWRITE, struct_ifreq_sz);
+ _(SIOCALIFADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCGLIFADDR, READWRITE, struct_if_laddrreq_sz);
+ _(SIOCDLIFADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCSIFADDRPREF, READ, struct_if_addrprefreq_sz);
+ _(SIOCGIFADDRPREF, READWRITE, struct_if_addrprefreq_sz);
+ _(SIOCADDMULTI, READ, struct_ifreq_sz);
+ _(SIOCDELMULTI, READ, struct_ifreq_sz);
+ _(SIOCGETVIFCNT, READWRITE, struct_sioc_vif_req_sz);
+ _(SIOCGETSGCNT, READWRITE, struct_sioc_sg_req_sz);
+ _(SIOCSIFMEDIA, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFMEDIA, READWRITE, struct_ifmediareq_sz);
+ _(SIOCSIFGENERIC, READ, struct_ifreq_sz);
+ _(SIOCGIFGENERIC, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFPHYADDR, READ, struct_ifaliasreq_sz);
+ _(SIOCGIFPSRCADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFPDSTADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCDIFPHYADDR, READ, struct_ifreq_sz);
+ _(SIOCSLIFPHYADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCGLIFPHYADDR, READWRITE, struct_if_laddrreq_sz);
+ _(SIOCSIFMTU, READ, struct_ifreq_sz);
+ _(SIOCGIFMTU, READWRITE, struct_ifreq_sz);
+ _(SIOCSDRVSPEC, READ, struct_ifdrv_sz);
+ _(SIOCGDRVSPEC, READWRITE, struct_ifdrv_sz);
+ _(SIOCIFCREATE, READ, struct_ifreq_sz);
+ _(SIOCIFDESTROY, READ, struct_ifreq_sz);
+ _(SIOCIFGCLONERS, READWRITE, struct_if_clonereq_sz);
+ _(SIOCGIFDLT, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFCAP, READWRITE, struct_ifcapreq_sz);
+ _(SIOCSIFCAP, READ, struct_ifcapreq_sz);
+ _(SIOCSVH, READWRITE, struct_ifreq_sz);
+ _(SIOCGVH, READWRITE, struct_ifreq_sz);
+ _(SIOCINITIFADDR, READWRITE, struct_ifaddr_sz);
+ _(SIOCGIFDATA, READWRITE, struct_ifdatareq_sz);
+ _(SIOCZIFDATA, READWRITE, struct_ifdatareq_sz);
+ _(SIOCGLINKSTR, READWRITE, struct_ifdrv_sz);
+ _(SIOCSLINKSTR, READ, struct_ifdrv_sz);
+ _(SIOCGETHERCAP, READWRITE, struct_eccapreq_sz);
+ _(SIOCGIFINDEX, READWRITE, struct_ifreq_sz);
+ _(SIOCSETHERCAP, READ, struct_eccapreq_sz);
+ _(SIOCSIFDESCR, READ, struct_ifreq_sz);
+ _(SIOCGIFDESCR, READWRITE, struct_ifreq_sz);
+ _(SIOCGUMBINFO, READWRITE, struct_ifreq_sz);
+ _(SIOCSUMBPARAM, READ, struct_ifreq_sz);
+ _(SIOCGUMBPARAM, READWRITE, struct_ifreq_sz);
+ _(SIOCSETPFSYNC, READ, struct_ifreq_sz);
+ _(SIOCGETPFSYNC, READWRITE, struct_ifreq_sz);
+ /* Entries from file: sys/timepps.h */
+ _(PPS_IOC_CREATE, NONE, 0);
+ _(PPS_IOC_DESTROY, NONE, 0);
+ _(PPS_IOC_SETPARAMS, READ, struct_pps_params_sz);
+ _(PPS_IOC_GETPARAMS, WRITE, struct_pps_params_sz);
+ _(PPS_IOC_GETCAP, WRITE, sizeof(int));
+ _(PPS_IOC_FETCH, READWRITE, struct_pps_info_sz);
+ _(PPS_IOC_KCBIND, READ, sizeof(int));
+ /* Entries from file: sys/ttycom.h */
+ _(TIOCEXCL, NONE, 0);
+ _(TIOCNXCL, NONE, 0);
+ _(TIOCFLUSH, READ, sizeof(int));
+ _(TIOCGETA, WRITE, struct_termios_sz);
+ _(TIOCSETA, READ, struct_termios_sz);
+ _(TIOCSETAW, READ, 0);
+ _(TIOCSETAF, READ, 0);
+ _(TIOCGETD, WRITE, sizeof(int));
+ _(TIOCSETD, READ, sizeof(int));
+ _(TIOCGLINED, WRITE, (32 * sizeof(char)));
+ _(TIOCSLINED, READ, (32 * sizeof(char)));
+ _(TIOCSBRK, NONE, 0);
+ _(TIOCCBRK, NONE, 0);
+ _(TIOCSDTR, NONE, 0);
+ _(TIOCCDTR, NONE, 0);
+ _(TIOCGPGRP, WRITE, sizeof(int));
+ _(TIOCSPGRP, READ, sizeof(int));
+ _(TIOCOUTQ, WRITE, sizeof(int));
+ _(TIOCSTI, READ, sizeof(char));
+ _(TIOCNOTTY, NONE, 0);
+ _(TIOCPKT, READ, sizeof(int));
+ _(TIOCSTOP, NONE, 0);
+ _(TIOCSTART, NONE, 0);
+ _(TIOCMSET, READ, sizeof(int));
+ _(TIOCMBIS, READ, sizeof(int));
+ _(TIOCMBIC, READ, sizeof(int));
+ _(TIOCMGET, WRITE, sizeof(int));
+ _(TIOCREMOTE, READ, sizeof(int));
+ _(TIOCGWINSZ, WRITE, struct_winsize_sz);
+ _(TIOCSWINSZ, READ, struct_winsize_sz);
+ _(TIOCUCNTL, READ, sizeof(int));
+ _(TIOCSTAT, READ, sizeof(int));
+ _(TIOCGSID, WRITE, sizeof(int));
+ _(TIOCCONS, READ, sizeof(int));
+ _(TIOCSCTTY, NONE, 0);
+ _(TIOCEXT, READ, sizeof(int));
+ _(TIOCSIG, NONE, 0);
+ _(TIOCDRAIN, NONE, 0);
+ _(TIOCGFLAGS, WRITE, sizeof(int));
+ _(TIOCSFLAGS, READ, sizeof(int));
+ _(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
+ _(TIOCRCVFRAME, READ, sizeof(uptr));
+ _(TIOCXMTFRAME, READ, sizeof(uptr));
+ _(TIOCPTMGET, WRITE, struct_ptmget_sz);
+ _(TIOCGRANTPT, NONE, 0);
+ _(TIOCPTSNAME, WRITE, struct_ptmget_sz);
+ _(TIOCSQSIZE, READ, sizeof(int));
+ _(TIOCGQSIZE, WRITE, sizeof(int));
+ /* Entries from file: sys/verified_exec.h */
+ _(VERIEXEC_LOAD, READ, struct_plistref_sz);
+ _(VERIEXEC_TABLESIZE, READ, struct_plistref_sz);
+ _(VERIEXEC_DELETE, READ, struct_plistref_sz);
+ _(VERIEXEC_QUERY, READWRITE, struct_plistref_sz);
+ _(VERIEXEC_DUMP, WRITE, struct_plistref_sz);
+ _(VERIEXEC_FLUSH, NONE, 0);
+ /* Entries from file: sys/videoio.h */
+ _(VIDIOC_QUERYCAP, WRITE, struct_v4l2_capability_sz);
+ _(VIDIOC_RESERVED, NONE, 0);
+ _(VIDIOC_ENUM_FMT, READWRITE, struct_v4l2_fmtdesc_sz);
+ _(VIDIOC_G_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_S_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_REQBUFS, READWRITE, struct_v4l2_requestbuffers_sz);
+ _(VIDIOC_QUERYBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_G_FBUF, WRITE, struct_v4l2_framebuffer_sz);
+ _(VIDIOC_S_FBUF, READ, struct_v4l2_framebuffer_sz);
+ _(VIDIOC_OVERLAY, READ, sizeof(int));
+ _(VIDIOC_QBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_DQBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_STREAMON, READ, sizeof(int));
+ _(VIDIOC_STREAMOFF, READ, sizeof(int));
+ _(VIDIOC_G_PARM, READWRITE, struct_v4l2_streamparm_sz);
+ _(VIDIOC_S_PARM, READWRITE, struct_v4l2_streamparm_sz);
+ _(VIDIOC_G_STD, WRITE, sizeof(u64));
+ _(VIDIOC_S_STD, READ, sizeof(u64));
+ _(VIDIOC_ENUMSTD, READWRITE, struct_v4l2_standard_sz);
+ _(VIDIOC_ENUMINPUT, READWRITE, struct_v4l2_input_sz);
+ _(VIDIOC_G_CTRL, READWRITE, struct_v4l2_control_sz);
+ _(VIDIOC_S_CTRL, READWRITE, struct_v4l2_control_sz);
+ _(VIDIOC_G_TUNER, READWRITE, struct_v4l2_tuner_sz);
+ _(VIDIOC_S_TUNER, READ, struct_v4l2_tuner_sz);
+ _(VIDIOC_G_AUDIO, WRITE, struct_v4l2_audio_sz);
+ _(VIDIOC_S_AUDIO, READ, struct_v4l2_audio_sz);
+ _(VIDIOC_QUERYCTRL, READWRITE, struct_v4l2_queryctrl_sz);
+ _(VIDIOC_QUERYMENU, READWRITE, struct_v4l2_querymenu_sz);
+ _(VIDIOC_G_INPUT, WRITE, sizeof(int));
+ _(VIDIOC_S_INPUT, READWRITE, sizeof(int));
+ _(VIDIOC_G_OUTPUT, WRITE, sizeof(int));
+ _(VIDIOC_S_OUTPUT, READWRITE, sizeof(int));
+ _(VIDIOC_ENUMOUTPUT, READWRITE, struct_v4l2_output_sz);
+ _(VIDIOC_G_AUDOUT, WRITE, struct_v4l2_audioout_sz);
+ _(VIDIOC_S_AUDOUT, READ, struct_v4l2_audioout_sz);
+ _(VIDIOC_G_MODULATOR, READWRITE, struct_v4l2_modulator_sz);
+ _(VIDIOC_S_MODULATOR, READ, struct_v4l2_modulator_sz);
+ _(VIDIOC_G_FREQUENCY, READWRITE, struct_v4l2_frequency_sz);
+ _(VIDIOC_S_FREQUENCY, READ, struct_v4l2_frequency_sz);
+ _(VIDIOC_CROPCAP, READWRITE, struct_v4l2_cropcap_sz);
+ _(VIDIOC_G_CROP, READWRITE, struct_v4l2_crop_sz);
+ _(VIDIOC_S_CROP, READ, struct_v4l2_crop_sz);
+ _(VIDIOC_G_JPEGCOMP, WRITE, struct_v4l2_jpegcompression_sz);
+ _(VIDIOC_S_JPEGCOMP, READ, struct_v4l2_jpegcompression_sz);
+ _(VIDIOC_QUERYSTD, WRITE, sizeof(u64));
+ _(VIDIOC_TRY_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_ENUMAUDIO, READWRITE, struct_v4l2_audio_sz);
+ _(VIDIOC_ENUMAUDOUT, READWRITE, struct_v4l2_audioout_sz);
+ _(VIDIOC_G_PRIORITY, WRITE, enum_v4l2_priority_sz);
+ _(VIDIOC_S_PRIORITY, READ, enum_v4l2_priority_sz);
+ _(VIDIOC_ENUM_FRAMESIZES, READWRITE, struct_v4l2_frmsizeenum_sz);
+ _(VIDIOC_ENUM_FRAMEINTERVALS, READWRITE, struct_v4l2_frmivalenum_sz);
+ /* Entries from file: sys/wdog.h */
+ _(WDOGIOC_GMODE, READWRITE, struct_wdog_mode_sz);
+ _(WDOGIOC_SMODE, READ, struct_wdog_mode_sz);
+ _(WDOGIOC_WHICH, WRITE, struct_wdog_mode_sz);
+ _(WDOGIOC_TICKLE, NONE, 0);
+ _(WDOGIOC_GTICKLER, WRITE, sizeof(int));
+ _(WDOGIOC_GWDOGS, READWRITE, struct_wdog_conf_sz);
+ /* Entries from file: sys/kcov.h */
+ _(KCOV_IOC_SETBUFSIZE, READ, sizeof(u64));
+ _(KCOV_IOC_ENABLE, READ, sizeof(int));
+ _(KCOV_IOC_DISABLE, NONE, 0);
+ /* Entries from file: sys/ipmi.h */
+ _(IPMICTL_RECEIVE_MSG_TRUNC, READWRITE, struct_ipmi_recv_sz);
+ _(IPMICTL_RECEIVE_MSG, READWRITE, struct_ipmi_recv_sz);
+ _(IPMICTL_SEND_COMMAND, READ, struct_ipmi_req_sz);
+ _(IPMICTL_REGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);
+ _(IPMICTL_UNREGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);
+ _(IPMICTL_SET_GETS_EVENTS_CMD, READ, sizeof(int));
+ _(IPMICTL_SET_MY_ADDRESS_CMD, READ, sizeof(unsigned int));
+ _(IPMICTL_GET_MY_ADDRESS_CMD, WRITE, sizeof(unsigned int));
+ _(IPMICTL_SET_MY_LUN_CMD, READ, sizeof(unsigned int));
+ _(IPMICTL_GET_MY_LUN_CMD, WRITE, sizeof(unsigned int));
+ /* Entries from file: soundcard.h */
+ _(SNDCTL_DSP_RESET, NONE, 0);
+ _(SNDCTL_DSP_SYNC, NONE, 0);
+ _(SNDCTL_DSP_SPEED, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_STEREO, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_GETBLKSIZE, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFMT, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_CHANNELS, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_FILTER, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
+ _(SNDCTL_DSP_POST, NONE, 0);
+ _(SNDCTL_DSP_SUBDIVIDE, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFRAGMENT, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_NONBLOCK, NONE, 0);
+ _(SNDCTL_DSP_GETCAPS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETTRIGGER, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SETTRIGGER, READ, sizeof(int));
+ _(SNDCTL_DSP_GETIPTR, WRITE, struct_count_info_sz);
+ _(SNDCTL_DSP_GETOPTR, WRITE, struct_count_info_sz);
+ _(SNDCTL_DSP_MAPINBUF, WRITE, struct_buffmem_desc_sz);
+ _(SNDCTL_DSP_MAPOUTBUF, WRITE, struct_buffmem_desc_sz);
+ _(SNDCTL_DSP_SETSYNCRO, NONE, 0);
+ _(SNDCTL_DSP_SETDUPLEX, NONE, 0);
+ _(SNDCTL_DSP_PROFILE, READ, sizeof(int));
+ _(SNDCTL_DSP_GETODELAY, WRITE, sizeof(int));
+ _(SOUND_MIXER_INFO, WRITE, struct_mixer_info_sz);
+ _(SOUND_OLD_MIXER_INFO, WRITE, struct__old_mixer_info_sz);
+ _(OSS_GETVERSION, WRITE, sizeof(int));
+ _(SNDCTL_SYSINFO, WRITE, struct_oss_sysinfo_sz);
+ _(SNDCTL_AUDIOINFO, READWRITE, struct_oss_audioinfo_sz);
+ _(SNDCTL_ENGINEINFO, READWRITE, struct_oss_audioinfo_sz);
+ _(SNDCTL_DSP_GETPLAYVOL, WRITE, sizeof(unsigned int));
+ _(SNDCTL_DSP_SETPLAYVOL, READ, sizeof(unsigned int));
+ _(SNDCTL_DSP_GETRECVOL, WRITE, sizeof(unsigned int));
+ _(SNDCTL_DSP_SETRECVOL, READ, sizeof(unsigned int));
+ _(SNDCTL_DSP_SKIP, NONE, 0);
+ _(SNDCTL_DSP_SILENCE, NONE, 0);
+ /* Entries from file: dev/filemon/filemon.h (compat <= 9.99.26) */
+ _(FILEMON_SET_FD, READWRITE, sizeof(int));
+ _(FILEMON_SET_PID, READWRITE, sizeof(int));
+ /* Entries from file: dev/usb/urio.h (compat <= 9.99.43) */
+ _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
+ _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
+#undef _
+} // NOLINT
+
+static bool ioctl_initialized = false;
+
+struct ioctl_desc_compare {
+ bool operator()(const ioctl_desc &left, const ioctl_desc &right) const {
+ return left.req < right.req;
+ }
+};
+
+static void ioctl_init() {
+ ioctl_table_fill();
+ Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());
+
+ bool bad = false;
+ for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
+ if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
+ Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
+ ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
+ ioctl_table[i + 1].name);
+ bad = true;
+ }
+ }
+
+ if (bad)
+ Die();
+
+ ioctl_initialized = true;
+}
+
+static const ioctl_desc *ioctl_table_lookup(unsigned req) {
+ int left = 0;
+ int right = ioctl_table_size;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ if (ioctl_table[mid].req < req)
+ left = mid + 1;
+ else
+ right = mid;
+ }
+ if (left == right && ioctl_table[left].req == req)
+ return ioctl_table + left;
+ else
+ return nullptr;
+}
+
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF)
+ return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0))
+ return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0)
+ return false;
+ return true;
+}
+
+static const ioctl_desc *ioctl_lookup(unsigned req) {
+ const ioctl_desc *desc = ioctl_table_lookup(req);
+ if (desc)
+ return desc;
+
+ // Try stripping access size from the request id.
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
+ // Sanity check: requests that encode access size are either read or write and
+ // have size of 0 in the table.
+ if (desc && desc->size == 0 &&
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
+ return desc;
+ return nullptr;
+}
+
+static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char *)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
+ }
+}
+
+static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
+ // FIXME: add verbose output
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
+ }
+}
+
+#endif // SANITIZER_NETBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_interface_internal.h b/lib/tsan/sanitizer_common/sanitizer_interface_internal.h
new file mode 100644
index 0000000000..be8023e9e1
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_interface_internal.h
@@ -0,0 +1,118 @@
+//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// This header declares the sanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/common_interface_defs.h
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERFACE_INTERNAL_H
+#define SANITIZER_INTERFACE_INTERNAL_H
+
+#include "sanitizer_internal_defs.h"
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ // The special values are "stdout" and "stderr".
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_path(const char *path);
+ // Tell the tools to write their reports to the provided file descriptor
+ // (casted to void *).
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_fd(void *fd);
+
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_report_error_summary(const char *error_summary);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+
+ // Returns 1 on the first call, then returns 0 thereafter. Called by the tool
+ // to ensure only one report is printed when multiple errors occur
+ // simultaneously.
+ SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_switch();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_gep();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_indir();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
+ __sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_8bit_counters_init();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_cov_bool_flag_init();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_cov_pcs_init();
+} // extern "C"
+
+#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_internal_defs.h b/lib/tsan/sanitizer_common/sanitizer_internal_defs.h
new file mode 100644
index 0000000000..d0ffc79b06
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_internal_defs.h
@@ -0,0 +1,459 @@
+//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+// It contains macro used in run-time libraries code.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_DEFS_H
+#define SANITIZER_DEFS_H
+
+#include "sanitizer_platform.h"
+
+#ifndef SANITIZER_DEBUG
+# define SANITIZER_DEBUG 0
+#endif
+
+#define SANITIZER_STRINGIFY_(S) #S
+#define SANITIZER_STRINGIFY(S) SANITIZER_STRINGIFY_(S)
+
+// Only use SANITIZER_*ATTRIBUTE* before the function return type!
+#if SANITIZER_WINDOWS
+#if SANITIZER_IMPORT_INTERFACE
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
+#endif
+# define SANITIZER_WEAK_ATTRIBUTE
+#elif SANITIZER_GO
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
+#endif
+
+// TLS is handled differently on different platforms
+#if SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_FREEBSD || SANITIZER_OPENBSD
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
+ __attribute__((tls_model("initial-exec"))) thread_local
+#else
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
+#endif
+
+//--------------------------- WEAK FUNCTIONS ---------------------------------//
+// When working with weak functions, to simplify the code and make it more
+// portable, when possible define a default implementation using this macro:
+//
+// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
+//
+// For example:
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
+//
+#if SANITIZER_WINDOWS
+#include "sanitizer_win_defs.h"
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
+#else
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
+ ReturnType Name(__VA_ARGS__)
+#endif
+
+// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
+// will evaluate to a null pointer when not defined.
+#ifndef SANITIZER_SUPPORTS_WEAK_HOOKS
+#if (SANITIZER_LINUX || SANITIZER_SOLARIS) && !SANITIZER_GO
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
+// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
+// by Xcode >= 4.5.
+#elif SANITIZER_MAC && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+#else
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
+#endif
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+// For some weak hooks that will be called very often and we want to avoid the
+// overhead of executing the default implementation when it is not necessary,
+// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
+// implementation for platforms that doesn't support weak symbols. For example:
+//
+// #if !SANITIZER_SUPPORT_WEAK_HOOKS
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
+// return a > b;
+// }
+// #endif
+//
+// And then use it as: if (compare_hook) compare_hook(a, b);
+//----------------------------------------------------------------------------//
+
+
+// We can use .preinit_array section on Linux to call sanitizer initialization
+// functions very early in the process startup (unless PIC macro is defined).
+//
+// On FreeBSD, .preinit_array functions are called with rtld_bind_lock writer
+// lock held. It will lead to dead lock if unresolved PLT functions (which helds
+// rtld_bind_lock reader lock) are called inside .preinit_array functions.
+//
+// FIXME: do we have anything like this on Mac?
+#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY
+#if ((SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_OPENBSD || \
+ SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)
+#define SANITIZER_CAN_USE_PREINIT_ARRAY 1
+// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.
+// FIXME: Check for those conditions.
+#elif SANITIZER_SOLARIS && !defined(PIC)
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
+#else
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
+#endif
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// GCC does not understand __has_feature
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+// Older GCCs do not understand __has_attribute.
+#if !defined(__has_attribute)
+# define __has_attribute(x) 0
+#endif
+
+// For portability reasons we do not include stddef.h, stdint.h or any other
+// system header, but we do need some basic types that are not defined
+// in a portable way by the language itself.
+namespace __sanitizer {
+
+#if defined(_WIN64)
+// 64-bit Windows uses LLP64 data model.
+typedef unsigned long long uptr;
+typedef signed long long sptr;
+#else
+typedef unsigned long uptr;
+typedef signed long sptr;
+#endif // defined(_WIN64)
+#if defined(__x86_64__)
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// 64-bit pointer to unwind stack frame.
+typedef unsigned long long uhwptr;
+#else
+typedef uptr uhwptr;
+#endif
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+#if SANITIZER_WINDOWS
+// On Windows, files are HANDLE, which is a synonim of void*.
+// Use void* to avoid including <windows.h> everywhere.
+typedef void* fd_t;
+typedef unsigned error_t;
+#else
+typedef int fd_t;
+typedef int error_t;
+#endif
+#if SANITIZER_SOLARIS && !defined(_LP64)
+typedef long pid_t;
+#else
+typedef int pid_t;
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_MAC || \
+ (SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
+ (SANITIZER_LINUX && defined(__x86_64__))
+typedef u64 OFF_T;
+#else
+typedef uptr OFF_T;
+#endif
+typedef u64 OFF64_T;
+
+#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
+typedef uptr operator_new_size_type;
+#else
+# if SANITIZER_OPENBSD || defined(__s390__) && !defined(__s390x__)
+// Special case: 31-bit s390 has unsigned long as size_t.
+typedef unsigned long operator_new_size_type;
+# else
+typedef u32 operator_new_size_type;
+# endif
+#endif
+
+typedef u64 tid_t;
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers to avoid portability issues.
+
+// Common defs.
+#ifndef INLINE
+#define INLINE inline
+#endif
+#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#define SANITIZER_WEAK_DEFAULT_IMPL \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
+#define SANITIZER_WEAK_CXX_DEFAULT_IMPL \
+ extern "C++" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
+
+// Platform-specific defs.
+#if defined(_MSC_VER)
+# define ALWAYS_INLINE __forceinline
+// FIXME(timurrrr): do we need this on Windows?
+# define ALIAS(x)
+# define ALIGNED(x) __declspec(align(x))
+# define FORMAT(f, a)
+# define NOINLINE __declspec(noinline)
+# define NORETURN __declspec(noreturn)
+# define THREADLOCAL __declspec(thread)
+# define LIKELY(x) (x)
+# define UNLIKELY(x) (x)
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0
+# define WARN_UNUSED_RESULT
+#else // _MSC_VER
+# define ALWAYS_INLINE inline __attribute__((always_inline))
+# define ALIAS(x) __attribute__((alias(x)))
+// Please only use the ALIGNED macro before the type.
+// Using ALIGNED after the variable declaration is not portable!
+# define ALIGNED(x) __attribute__((aligned(x)))
+# define FORMAT(f, a) __attribute__((format(printf, f, a)))
+# define NOINLINE __attribute__((noinline))
+# define NORETURN __attribute__((noreturn))
+# define THREADLOCAL __thread
+# define LIKELY(x) __builtin_expect(!!(x), 1)
+# define UNLIKELY(x) __builtin_expect(!!(x), 0)
+# if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(x) generates prefetchnt0 on x86
+# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
+# else
+# define PREFETCH(x) __builtin_prefetch(x)
+# endif
+# define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#endif // _MSC_VER
+
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#else
+# define UNUSED
+# define USED
+#endif
+
+#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
+# define NOEXCEPT noexcept
+#else
+# define NOEXCEPT throw()
+#endif
+
+// Unaligned versions of basic types.
+typedef ALIGNED(1) u16 uu16;
+typedef ALIGNED(1) u32 uu32;
+typedef ALIGNED(1) u64 uu64;
+typedef ALIGNED(1) s16 us16;
+typedef ALIGNED(1) s32 us32;
+typedef ALIGNED(1) s64 us64;
+
+#if SANITIZER_WINDOWS
+} // namespace __sanitizer
+typedef unsigned long DWORD;
+namespace __sanitizer {
+typedef DWORD thread_return_t;
+# define THREAD_CALLING_CONV __stdcall
+#else // _WIN32
+typedef void* thread_return_t;
+# define THREAD_CALLING_CONV
+#endif // _WIN32
+typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
+
+// NOTE: Functions below must be defined in each run-time.
+void NORETURN Die();
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+
+// Check macro
+#define RAW_CHECK_MSG(expr, msg) do { \
+ if (UNLIKELY(!(expr))) { \
+ RawWrite(msg); \
+ Die(); \
+ } \
+} while (0)
+
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+
+#define CHECK_IMPL(c1, op, c2) \
+ do { \
+ __sanitizer::u64 v1 = (__sanitizer::u64)(c1); \
+ __sanitizer::u64 v2 = (__sanitizer::u64)(c2); \
+ if (UNLIKELY(!(v1 op v2))) \
+ __sanitizer::CheckFailed(__FILE__, __LINE__, \
+ "(" #c1 ") " #op " (" #c2 ")", v1, v2); \
+ } while (false) \
+/**/
+
+#define CHECK(a) CHECK_IMPL((a), !=, 0)
+#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))
+#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))
+#define CHECK_LT(a, b) CHECK_IMPL((a), <, (b))
+#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))
+#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
+#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
+
+#if SANITIZER_DEBUG
+#define DCHECK(a) CHECK(a)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#else
+#define DCHECK(a)
+#define DCHECK_EQ(a, b)
+#define DCHECK_NE(a, b)
+#define DCHECK_LT(a, b)
+#define DCHECK_LE(a, b)
+#define DCHECK_GT(a, b)
+#define DCHECK_GE(a, b)
+#endif
+
+#define UNREACHABLE(msg) do { \
+ CHECK(0 && msg); \
+ Die(); \
+} while (0)
+
+#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
+
+#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+
+#define IMPL_PASTE(a, b) a##b
+#define IMPL_COMPILER_ASSERT(pred, line) \
+ typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
+
+// Limits for integral types. We have to redefine it in case we don't
+// have stdint.h (like in Visual Studio 9).
+#undef __INT64_C
+#undef __UINT64_C
+#if SANITIZER_WORDSIZE == 64
+# define __INT64_C(c) c ## L
+# define __UINT64_C(c) c ## UL
+#else
+# define __INT64_C(c) c ## LL
+# define __UINT64_C(c) c ## ULL
+#endif // SANITIZER_WORDSIZE == 64
+#undef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#undef INT32_MAX
+#define INT32_MAX (2147483647)
+#undef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#undef INT64_MIN
+#define INT64_MIN (-__INT64_C(9223372036854775807)-1)
+#undef INT64_MAX
+#define INT64_MAX (__INT64_C(9223372036854775807))
+#undef UINT64_MAX
+#define UINT64_MAX (__UINT64_C(18446744073709551615))
+#undef UINTPTR_MAX
+#if SANITIZER_WORDSIZE == 64
+# define UINTPTR_MAX (18446744073709551615UL)
+#else
+# define UINTPTR_MAX (4294967295U)
+#endif // SANITIZER_WORDSIZE == 64
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+#if !defined(_MSC_VER) || defined(__clang__)
+#if SANITIZER_S390_31
+#define GET_CALLER_PC() \
+ (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
+#endif
+#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
+inline void Trap() {
+ __builtin_trap();
+}
+#else
+extern "C" void* _ReturnAddress(void);
+extern "C" void* _AddressOfReturnAddress(void);
+# pragma intrinsic(_ReturnAddress)
+# pragma intrinsic(_AddressOfReturnAddress)
+#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
+// CaptureStackBackTrace doesn't need to know BP on Windows.
+#define GET_CURRENT_FRAME() \
+ (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
+
+extern "C" void __ud2(void);
+# pragma intrinsic(__ud2)
+inline void Trap() {
+ __ud2();
+}
+#endif
+
+#define HANDLE_EINTR(res, f) \
+ { \
+ int rverrno; \
+ do { \
+ res = (f); \
+ } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
+ }
+
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile __sanitizer::uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ (void)enable_fp; \
+ } while (0)
+
+} // namespace __sanitizer
+
+namespace __asan {
+using namespace __sanitizer;
+}
+namespace __dsan {
+using namespace __sanitizer;
+}
+namespace __dfsan {
+using namespace __sanitizer;
+}
+namespace __lsan {
+using namespace __sanitizer;
+}
+namespace __msan {
+using namespace __sanitizer;
+}
+namespace __hwasan {
+using namespace __sanitizer;
+}
+namespace __tsan {
+using namespace __sanitizer;
+}
+namespace __scudo {
+using namespace __sanitizer;
+}
+namespace __ubsan {
+using namespace __sanitizer;
+}
+namespace __xray {
+using namespace __sanitizer;
+}
+namespace __interception {
+using namespace __sanitizer;
+}
+namespace __hwasan {
+using namespace __sanitizer;
+}
+
+#endif // SANITIZER_DEFS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_lfstack.h b/lib/tsan/sanitizer_common/sanitizer_lfstack.h
new file mode 100644
index 0000000000..af2ca55ec3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_lfstack.h
@@ -0,0 +1,72 @@
+//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Lock-free stack.
+// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
+// The memory passed to Push() must not be ever munmap'ed.
+// The type T must contain T *next field.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LFSTACK_H
+#define SANITIZER_LFSTACK_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+template<typename T>
+struct LFStack {
+ void Clear() {
+ atomic_store(&head_, 0, memory_order_relaxed);
+ }
+
+ bool Empty() const {
+ return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
+ }
+
+ void Push(T *p) {
+ u64 cmp = atomic_load(&head_, memory_order_relaxed);
+ for (;;) {
+ u64 cnt = (cmp & kCounterMask) + kCounterInc;
+ u64 xch = (u64)(uptr)p | cnt;
+ p->next = (T*)(uptr)(cmp & kPtrMask);
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_release))
+ break;
+ }
+ }
+
+ T *Pop() {
+ u64 cmp = atomic_load(&head_, memory_order_acquire);
+ for (;;) {
+ T *cur = (T*)(uptr)(cmp & kPtrMask);
+ if (!cur)
+ return nullptr;
+ T *nxt = cur->next;
+ u64 cnt = (cmp & kCounterMask);
+ u64 xch = (u64)(uptr)nxt | cnt;
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_acquire))
+ return cur;
+ }
+ }
+
+ // private:
+ static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
+ static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
+ static const u64 kCounterMask = ~kPtrMask;
+ static const u64 kCounterInc = kPtrMask + 1;
+
+ atomic_uint64_t head_;
+};
+} // namespace __sanitizer
+
+#endif // SANITIZER_LFSTACK_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_libc.cpp b/lib/tsan/sanitizer_common/sanitizer_libc.cpp
new file mode 100644
index 0000000000..4bc04b4868
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_libc.cpp
@@ -0,0 +1,280 @@
+//===-- sanitizer_libc.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer_libc.h for details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+s64 internal_atoll(const char *nptr) {
+ return internal_simple_strtoll(nptr, nullptr, 10);
+}
+
+void *internal_memchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ for (uptr i = 0; i < n; ++i, ++t)
+ if (*t == c)
+ return reinterpret_cast<void *>(const_cast<char *>(t));
+ return nullptr;
+}
+
+void *internal_memrchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ void *res = nullptr;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
+ }
+ return res;
+}
+
+int internal_memcmp(const void* s1, const void* s2, uptr n) {
+ const char *t1 = (const char *)s1;
+ const char *t2 = (const char *)s2;
+ for (uptr i = 0; i < n; ++i, ++t1, ++t2)
+ if (*t1 != *t2)
+ return *t1 < *t2 ? -1 : 1;
+ return 0;
+}
+
+void *internal_memcpy(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ const char *s = (const char *)src;
+ for (uptr i = 0; i < n; ++i)
+ d[i] = s[i];
+ return dest;
+}
+
+void *internal_memmove(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ const char *s = (const char *)src;
+ sptr i, signed_n = (sptr)n;
+ CHECK_GE(signed_n, 0);
+ if (d < s) {
+ for (i = 0; i < signed_n; ++i)
+ d[i] = s[i];
+ } else {
+ if (d > s && signed_n > 0) {
+ for (i = signed_n - 1; i >= 0; --i) {
+ d[i] = s[i];
+ }
+ }
+ }
+ return dest;
+}
+
+void *internal_memset(void* s, int c, uptr n) {
+ // Optimize for the most performance-critical case:
+ if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
+ u64 *p = reinterpret_cast<u64*>(s);
+ u64 *e = p + n / 8;
+ u64 v = c;
+ v |= v << 8;
+ v |= v << 16;
+ v |= v << 32;
+ for (; p < e; p += 2)
+ p[0] = p[1] = v;
+ return s;
+ }
+ // The next line prevents Clang from making a call to memset() instead of the
+ // loop below.
+ // FIXME: building the runtime with -ffreestanding is a better idea. However
+ // there currently are linktime problems due to PR12396.
+ char volatile *t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ *t = c;
+ }
+ return s;
+}
+
+uptr internal_strcspn(const char *s, const char *reject) {
+ uptr i;
+ for (i = 0; s[i]; i++) {
+ if (internal_strchr(reject, s[i]))
+ return i;
+ }
+ return i;
+}
+
+char* internal_strdup(const char *s) {
+ uptr len = internal_strlen(s);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+int internal_strcmp(const char *s1, const char *s2) {
+ while (true) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+int internal_strncmp(const char *s1, const char *s2, uptr n) {
+ for (uptr i = 0; i < n; i++) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+char* internal_strchr(const char *s, int c) {
+ while (true) {
+ if (*s == (char)c)
+ return const_cast<char *>(s);
+ if (*s == 0)
+ return nullptr;
+ s++;
+ }
+}
+
+char *internal_strchrnul(const char *s, int c) {
+ char *res = internal_strchr(s, c);
+ if (!res)
+ res = const_cast<char *>(s) + internal_strlen(s);
+ return res;
+}
+
+char *internal_strrchr(const char *s, int c) {
+ const char *res = nullptr;
+ for (uptr i = 0; s[i]; i++) {
+ if (s[i] == c) res = s + i;
+ }
+ return const_cast<char *>(res);
+}
+
+uptr internal_strlen(const char *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ const uptr dstlen = internal_strnlen(dst, maxlen);
+ if (dstlen == maxlen) return maxlen + srclen;
+ if (srclen < maxlen - dstlen) {
+ internal_memmove(dst + dstlen, src, srclen + 1);
+ } else {
+ internal_memmove(dst + dstlen, src, maxlen - dstlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return dstlen + srclen;
+}
+
+char *internal_strncat(char *dst, const char *src, uptr n) {
+ uptr len = internal_strlen(dst);
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[len + i] = src[i];
+ dst[len + i] = 0;
+ return dst;
+}
+
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ if (srclen < maxlen) {
+ internal_memmove(dst, src, srclen + 1);
+ } else if (maxlen != 0) {
+ internal_memmove(dst, src, maxlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return srclen;
+}
+
+char *internal_strncpy(char *dst, const char *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[i] = src[i];
+ internal_memset(dst + i, '\0', n - i);
+ return dst;
+}
+
+uptr internal_strnlen(const char *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
+char *internal_strstr(const char *haystack, const char *needle) {
+ // This is O(N^2), but we are not using it in hot places.
+ uptr len1 = internal_strlen(haystack);
+ uptr len2 = internal_strlen(needle);
+ if (len1 < len2) return nullptr;
+ for (uptr pos = 0; pos <= len1 - len2; pos++) {
+ if (internal_memcmp(haystack + pos, needle, len2) == 0)
+ return const_cast<char *>(haystack) + pos;
+ }
+ return nullptr;
+}
+
+s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
+ CHECK_EQ(base, 10);
+ while (IsSpace(*nptr)) nptr++;
+ int sgn = 1;
+ u64 res = 0;
+ bool have_digits = false;
+ char *old_nptr = const_cast<char *>(nptr);
+ if (*nptr == '+') {
+ sgn = 1;
+ nptr++;
+ } else if (*nptr == '-') {
+ sgn = -1;
+ nptr++;
+ }
+ while (IsDigit(*nptr)) {
+ res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;
+ int digit = ((*nptr) - '0');
+ res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;
+ have_digits = true;
+ nptr++;
+ }
+ if (endptr) {
+ *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
+ }
+ if (sgn > 0) {
+ return (s64)(Min((u64)INT64_MAX, res));
+ } else {
+ return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);
+ }
+}
+
+bool mem_is_zero(const char *beg, uptr size) {
+ CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ const char *end = beg + size;
+ uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
+ uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
+ uptr all = 0;
+ // Prologue.
+ for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
+ all |= *mem;
+ // Aligned loop.
+ for (; aligned_beg < aligned_end; aligned_beg++)
+ all |= *aligned_beg;
+ // Epilogue.
+ if ((char *)aligned_end >= beg) {
+ for (const char *mem = (char *)aligned_end; mem < end; mem++) all |= *mem;
+ }
+ return all == 0;
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_libc.h b/lib/tsan/sanitizer_common/sanitizer_libc.h
new file mode 100644
index 0000000000..ec0a6ded00
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_libc.h
@@ -0,0 +1,85 @@
+//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// These tools can not use some of the libc functions directly because those
+// functions are intercepted. Instead, we implement a tiny subset of libc here.
+// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIBC_H
+#define SANITIZER_LIBC_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// internal_X() is a custom implementation of X() for use in RTL.
+
+// String functions
+s64 internal_atoll(const char *nptr);
+void *internal_memchr(const void *s, int c, uptr n);
+void *internal_memrchr(const void *s, int c, uptr n);
+int internal_memcmp(const void* s1, const void* s2, uptr n);
+void *internal_memcpy(void *dest, const void *src, uptr n);
+void *internal_memmove(void *dest, const void *src, uptr n);
+// Should not be used in performance-critical places.
+void *internal_memset(void *s, int c, uptr n);
+char* internal_strchr(const char *s, int c);
+char *internal_strchrnul(const char *s, int c);
+int internal_strcmp(const char *s1, const char *s2);
+uptr internal_strcspn(const char *s, const char *reject);
+char *internal_strdup(const char *s);
+uptr internal_strlen(const char *s);
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen);
+char *internal_strncat(char *dst, const char *src, uptr n);
+int internal_strncmp(const char *s1, const char *s2, uptr n);
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen);
+char *internal_strncpy(char *dst, const char *src, uptr n);
+uptr internal_strnlen(const char *s, uptr maxlen);
+char *internal_strrchr(const char *s, int c);
+char *internal_strstr(const char *haystack, const char *needle);
+// Works only for base=10 and doesn't set errno.
+s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+
+// Return true if all bytes in [mem, mem+size) are zero.
+// Optimized for the case when the result is true.
+bool mem_is_zero(const char *mem, uptr size);
+
+// I/O
+// Define these as macros so we can use them in linker initialized global
+// structs without dynamic initialization.
+#define kInvalidFd ((fd_t)-1)
+#define kStdinFd ((fd_t)0)
+#define kStdoutFd ((fd_t)1)
+#define kStderrFd ((fd_t)2)
+
+uptr internal_ftruncate(fd_t fd, uptr size);
+
+// OS
+void NORETURN internal__exit(int exitcode);
+unsigned int internal_sleep(unsigned int seconds);
+
+uptr internal_getpid();
+uptr internal_getppid();
+
+int internal_dlinfo(void *handle, int request, void *p);
+
+// Threading
+uptr internal_sched_yield();
+
+// Error handling
+bool internal_iserror(uptr retval, int *rverrno = nullptr);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBC_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_libignore.cpp b/lib/tsan/sanitizer_common/sanitizer_libignore.cpp
new file mode 100644
index 0000000000..eb9bb76501
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_libignore.cpp
@@ -0,0 +1,129 @@
+//===-- sanitizer_libignore.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+ SANITIZER_NETBSD || SANITIZER_OPENBSD
+
+#include "sanitizer_libignore.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+LibIgnore::LibIgnore(LinkerInitialized) {
+}
+
+void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
+ BlockingMutexLock lock(&mutex_);
+ if (count_ >= kMaxLibs) {
+ Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ kMaxLibs);
+ Die();
+ }
+ Lib *lib = &libs_[count_++];
+ lib->templ = internal_strdup(name_templ);
+ lib->name = nullptr;
+ lib->real_name = nullptr;
+ lib->loaded = false;
+}
+
+void LibIgnore::OnLibraryLoaded(const char *name) {
+ BlockingMutexLock lock(&mutex_);
+ // Try to match suppressions with symlink target.
+ InternalScopedString buf(kMaxPathLength);
+ if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
+ buf[0]) {
+ for (uptr i = 0; i < count_; i++) {
+ Lib *lib = &libs_[i];
+ if (!lib->loaded && (!lib->real_name) &&
+ TemplateMatch(lib->templ, name))
+ lib->real_name = internal_strdup(buf.data());
+ }
+ }
+
+ // Scan suppressions list and find newly loaded and unloaded libraries.
+ ListOfModules modules;
+ modules.init();
+ for (uptr i = 0; i < count_; i++) {
+ Lib *lib = &libs_[i];
+ bool loaded = false;
+ for (const auto &mod : modules) {
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (!TemplateMatch(lib->templ, mod.full_name()) &&
+ !(lib->real_name &&
+ internal_strcmp(lib->real_name, mod.full_name()) == 0))
+ continue;
+ if (loaded) {
+ Report("%s: called_from_lib suppression '%s' is matched against"
+ " 2 libraries: '%s' and '%s'\n",
+ SanitizerToolName, lib->templ, lib->name, mod.full_name());
+ Die();
+ }
+ loaded = true;
+ if (lib->loaded)
+ continue;
+ VReport(1,
+ "Matched called_from_lib suppression '%s' against library"
+ " '%s'\n",
+ lib->templ, mod.full_name());
+ lib->loaded = true;
+ lib->name = internal_strdup(mod.full_name());
+ const uptr idx =
+ atomic_load(&ignored_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_));
+ ignored_code_ranges_[idx].begin = range.beg;
+ ignored_code_ranges_[idx].end = range.end;
+ atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
+ break;
+ }
+ }
+ if (lib->loaded && !loaded) {
+ Report("%s: library '%s' that was matched against called_from_lib"
+ " suppression '%s' is unloaded\n",
+ SanitizerToolName, lib->name, lib->templ);
+ Die();
+ }
+ }
+
+ // Track instrumented ranges.
+ if (track_instrumented_libs_) {
+ for (const auto &mod : modules) {
+ if (!mod.instrumented())
+ continue;
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
+ continue;
+ VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+ range.beg, range.end, mod.full_name());
+ const uptr idx =
+ atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_));
+ instrumented_code_ranges_[idx].begin = range.beg;
+ instrumented_code_ranges_[idx].end = range.end;
+ atomic_store(&instrumented_ranges_count_, idx + 1,
+ memory_order_release);
+ }
+ }
+ }
+}
+
+void LibIgnore::OnLibraryUnloaded() {
+ OnLibraryLoaded(nullptr);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
+ // SANITIZER_NETBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_libignore.h b/lib/tsan/sanitizer_common/sanitizer_libignore.h
new file mode 100644
index 0000000000..256f685979
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_libignore.h
@@ -0,0 +1,115 @@
+//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LibIgnore allows to ignore all interceptors called from a particular set
+// of dynamic libraries. LibIgnore can be initialized with several templates
+// of names of libraries to be ignored. It finds code ranges for the libraries;
+// and checks whether the provided PC value belongs to the code ranges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIBIGNORE_H
+#define SANITIZER_LIBIGNORE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+class LibIgnore {
+ public:
+ explicit LibIgnore(LinkerInitialized);
+
+ // Must be called during initialization.
+ void AddIgnoredLibrary(const char *name_templ);
+ void IgnoreNoninstrumentedModules(bool enable) {
+ track_instrumented_libs_ = enable;
+ }
+
+ // Must be called after a new dynamic library is loaded.
+ void OnLibraryLoaded(const char *name);
+
+ // Must be called after a dynamic library is unloaded.
+ void OnLibraryUnloaded();
+
+ // Checks whether the provided PC belongs to one of the ignored libraries or
+ // the PC should be ignored because it belongs to an non-instrumented module
+ // (when ignore_noninstrumented_modules=1). Also returns true via
+ // "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
+ bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
+
+ // Checks whether the provided PC belongs to an instrumented module.
+ bool IsPcInstrumented(uptr pc) const;
+
+ private:
+ struct Lib {
+ char *templ;
+ char *name;
+ char *real_name; // target of symlink
+ bool loaded;
+ };
+
+ struct LibCodeRange {
+ uptr begin;
+ uptr end;
+ };
+
+ inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
+ return (pc >= range.begin && pc < range.end);
+ }
+
+ static const uptr kMaxIgnoredRanges = 128;
+ static const uptr kMaxInstrumentedRanges = 1024;
+ static const uptr kMaxLibs = 1024;
+
+ // Hot part:
+ atomic_uintptr_t ignored_ranges_count_;
+ LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
+
+ atomic_uintptr_t instrumented_ranges_count_;
+ LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
+
+ // Cold part:
+ BlockingMutex mutex_;
+ uptr count_;
+ Lib libs_[kMaxLibs];
+ bool track_instrumented_libs_;
+
+ // Disallow copying of LibIgnore objects.
+ LibIgnore(const LibIgnore&); // not implemented
+ void operator = (const LibIgnore&); // not implemented
+};
+
+inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
+ const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, ignored_code_ranges_[i])) {
+ *pc_in_ignored_lib = true;
+ return true;
+ }
+ }
+ *pc_in_ignored_lib = false;
+ if (track_instrumented_libs_ && !IsPcInstrumented(pc))
+ return true;
+ return false;
+}
+
+inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
+ const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, instrumented_code_ranges_[i]))
+ return true;
+ }
+ return false;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBIGNORE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_linux.cpp b/lib/tsan/sanitizer_common/sanitizer_linux.cpp
new file mode 100644
index 0000000000..470f4b70f0
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_linux.cpp
@@ -0,0 +1,2269 @@
+//===-- sanitizer_linux.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_getauxval.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#if SANITIZER_LINUX && !SANITIZER_GO
+#include <asm/param.h>
+#endif
+
+// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
+// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
+// access stat from asm/stat.h, without conflicting with definition in
+// sys/stat.h, we use this trick.
+#if defined(__mips64)
+#include <asm/unistd.h>
+#include <sys/types.h>
+#define stat kernel_stat
+#include <asm/stat.h>
+#undef stat
+#endif
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#if !SANITIZER_SOLARIS
+#include <sys/ptrace.h>
+#endif
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#if !SANITIZER_OPENBSD
+#include <ucontext.h>
+#endif
+#if SANITIZER_OPENBSD
+#include <sys/futex.h>
+#include <sys/sysctl.h>
+#endif
+#include <unistd.h>
+
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
+#if SANITIZER_FREEBSD
+#include <sys/exec.h>
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+#include <sys/thr.h>
+#endif // SANITIZER_FREEBSD
+
+#if SANITIZER_NETBSD
+#include <limits.h> // For NAME_MAX
+#include <sys/sysctl.h>
+#include <sys/exec.h>
+extern struct ps_strings *__ps_strings;
+#endif // SANITIZER_NETBSD
+
+#if SANITIZER_SOLARIS
+#include <stdlib.h>
+#include <thread.h>
+#define environ _environ
+#endif
+
+extern char **environ;
+
+#if SANITIZER_LINUX
+// <linux/time.h>
+struct kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+// <linux/futex.h> is broken on some linux distributions.
+const int FUTEX_WAIT = 0;
+const int FUTEX_WAKE = 1;
+const int FUTEX_PRIVATE_FLAG = 128;
+const int FUTEX_WAIT_PRIVATE = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
+const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
+#endif // SANITIZER_LINUX
+
+// Are we using 32-bit or 64-bit Linux syscalls?
+// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
+// but it still needs to use 64-bit syscalls.
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64)
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+#else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+#endif
+
+// Note : FreeBSD had implemented both
+// Linux and OpenBSD apis, available from
+// future 12.x version most likely
+#if SANITIZER_LINUX && defined(__NR_getrandom)
+# if !defined(GRND_NONBLOCK)
+# define GRND_NONBLOCK 1
+# endif
+# define SANITIZER_USE_GETRANDOM 1
+#else
+# define SANITIZER_USE_GETRANDOM 0
+#endif // SANITIZER_LINUX && defined(__NR_getrandom)
+
+#if SANITIZER_OPENBSD
+# define SANITIZER_USE_GETENTROPY 1
+#else
+# if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
+# define SANITIZER_USE_GETENTROPY 1
+# else
+# define SANITIZER_USE_GETENTROPY 0
+# endif
+#endif // SANITIZER_USE_GETENTROPY
+
+namespace __sanitizer {
+
+#if SANITIZER_LINUX && defined(__x86_64__)
+#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && defined(__aarch64__)
+#include "sanitizer_syscall_linux_aarch64.inc"
+#elif SANITIZER_LINUX && defined(__arm__)
+#include "sanitizer_syscall_linux_arm.inc"
+#else
+#include "sanitizer_syscall_generic.inc"
+#endif
+
+// --------------- sanitizer_libc.h
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if !SANITIZER_S390 && !SANITIZER_OPENBSD
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ u64 offset) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
+ offset);
+#else
+ // mmap2 specifies file offset in 4096-byte units.
+ CHECK(IsAligned(offset, 4096));
+ return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+ offset / 4096);
+#endif
+}
+#endif // !SANITIZER_S390 && !SANITIZER_OPENBSD
+
+#if !SANITIZER_OPENBSD
+uptr internal_munmap(void *addr, uptr length) {
+ return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
+}
+#endif
+
+uptr internal_close(fd_t fd) {
+ return internal_syscall(SYSCALL(close), fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+ mode);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, count));
+ return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf, count));
+ return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
+ (OFF_T)size));
+ return res;
+}
+
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
+static void stat64_to_stat(struct stat64 *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+ out->st_atime = in->st_atime;
+ out->st_mtime = in->st_mtime;
+ out->st_ctime = in->st_ctime;
+}
+#endif
+
+#if defined(__mips64)
+// Undefine compatibility macros from <sys/stat.h>
+// so that they would not clash with the kernel_stat
+// st_[a|m|c]time fields
+#undef st_atime
+#undef st_mtime
+#undef st_ctime
+#if defined(SANITIZER_ANDROID)
+// Bionic sys/stat.h defines additional macros
+// for compatibility with the old NDKs and
+// they clash with the kernel_stat structure
+// st_[a|m|c]time_nsec fields.
+#undef st_atime_nsec
+#undef st_mtime_nsec
+#undef st_ctime_nsec
+#endif
+static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+#if defined(__USE_MISC) || \
+ defined(__USE_XOPEN2K8) || \
+ defined(SANITIZER_ANDROID)
+ out->st_atim.tv_sec = in->st_atime;
+ out->st_atim.tv_nsec = in->st_atime_nsec;
+ out->st_mtim.tv_sec = in->st_mtime;
+ out->st_mtim.tv_nsec = in->st_mtime_nsec;
+ out->st_ctim.tv_sec = in->st_ctime;
+ out->st_ctim.tv_nsec = in->st_ctime_nsec;
+#else
+ out->st_atime = in->st_atime;
+ out->st_atimensec = in->st_atime_nsec;
+ out->st_mtime = in->st_mtime;
+ out->st_mtimensec = in->st_mtime_nsec;
+ out->st_ctime = in->st_ctime;
+ out->st_atimensec = in->st_ctime_nsec;
+#endif
+}
+#endif
+
+uptr internal_stat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if defined(__mips64)
+ // For mips64, stat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(stat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(stat64), path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+ // For mips64, lstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD || \
+ SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if SANITIZER_MIPS64 && !SANITIZER_OPENBSD
+ // For mips64, fstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ return internal_syscall(SYSCALL(dup), oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+ return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
+ bufsize);
+#elif SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
+ bufsize);
+#else
+ return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
+}
+
+uptr internal_unlink(const char *path) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+ return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if defined(__riscv)
+ return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath, 0);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath);
+#else
+ return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
+}
+
+uptr internal_sched_yield() {
+ return internal_syscall(SYSCALL(sched_yield));
+}
+
+void internal__exit(int exitcode) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
+ Die(); // Unreachable.
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ struct timespec ts;
+ ts.tv_sec = seconds;
+ ts.tv_nsec = 0;
+ int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
+ if (res) return ts.tv_sec;
+ return 0;
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+ (uptr)envp);
+}
+#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ if (ShouldMockFailureToOpen(filename))
+ return false;
+ struct stat st;
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+#else
+ if (internal_stat(filename, &st))
+#endif
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+#if !SANITIZER_NETBSD
+tid_t GetTid() {
+#if SANITIZER_FREEBSD
+ long Tid;
+ thr_self(&Tid);
+ return Tid;
+#elif SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(getthrid));
+#elif SANITIZER_SOLARIS
+ return thr_self();
+#else
+ return internal_syscall(SYSCALL(gettid));
+#endif
+}
+
+int TgKill(pid_t pid, tid_t tid, int sig) {
+#if SANITIZER_LINUX
+ return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
+#elif SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);
+#elif SANITIZER_OPENBSD
+ (void)pid;
+ return internal_syscall(SYSCALL(thrkill), tid, sig, nullptr);
+#elif SANITIZER_SOLARIS
+ (void)pid;
+ return thr_kill(tid, sig);
+#endif
+}
+#endif
+
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+u64 NanoTime() {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ timeval tv;
+#else
+ kernel_timeval tv;
+#endif
+ internal_memset(&tv, 0, sizeof(tv));
+ internal_syscall(SYSCALL(gettimeofday), &tv, 0);
+ return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
+}
+#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on some others) and does not use libc. This function
+// should be called first inside __asan_init.
+const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
+ SANITIZER_SOLARIS
+ if (::environ != 0) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = ::environ; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return 0; // Not found.
+#elif SANITIZER_LINUX
+ static char *environ;
+ static uptr len;
+ static bool inited;
+ if (!inited) {
+ inited = true;
+ uptr environ_size;
+ if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
+ environ = nullptr;
+ }
+ if (!environ || len == 0) return nullptr;
+ uptr namelen = internal_strlen(name);
+ const char *p = environ;
+ while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
+ // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
+ const char* endp =
+ (char*)internal_memchr(p, '\0', len - (p - environ));
+ if (!endp) // this entry isn't NUL terminated
+ return nullptr;
+ else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
+ return p + namelen + 1; // point after =
+ p = endp + 1;
+ }
+ return nullptr; // Not found.
+#else
+#error "Unsupported platform"
+#endif
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD && \
+ !SANITIZER_GO
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
+}
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \
+ !SANITIZER_OPENBSD
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+ int arr_size) {
+ char *buff;
+ uptr buff_size;
+ uptr buff_len;
+ *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+ if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) {
+ (*arr)[0] = nullptr;
+ return;
+ }
+ (*arr)[0] = buff;
+ int count, i;
+ for (count = 1, i = 1; ; i++) {
+ if (buff[i] == 0) {
+ if (buff[i+1] == 0) break;
+ (*arr)[count] = &buff[i+1];
+ CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
+ count++;
+ }
+ }
+ (*arr)[count] = nullptr;
+}
+#endif
+
+#if !SANITIZER_OPENBSD
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+#if SANITIZER_FREEBSD
+ // On FreeBSD, retrieving the argument and environment arrays is done via the
+ // kern.ps_strings sysctl, which returns a pointer to a structure containing
+ // this information. See also <sys/exec.h>.
+ ps_strings *pss;
+ uptr sz = sizeof(pss);
+ if (internal_sysctlbyname("kern.ps_strings", &pss, &sz, NULL, 0) == -1) {
+ Printf("sysctl kern.ps_strings failed\n");
+ Die();
+ }
+ *argv = pss->ps_argvstr;
+ *envp = pss->ps_envstr;
+#elif SANITIZER_NETBSD
+ *argv = __ps_strings->ps_argvstr;
+ *envp = __ps_strings->ps_envstr;
+#else // SANITIZER_FREEBSD
+#if !SANITIZER_GO
+ if (&__libc_stack_end) {
+ uptr* stack_end = (uptr*)__libc_stack_end;
+ // Normally argc can be obtained from *stack_end, however, on ARM glibc's
+ // _start clobbers it:
+ // https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75
+ // Do not special-case ARM and infer argc from argv everywhere.
+ int argc = 0;
+ while (stack_end[argc + 1]) argc++;
+ *argv = (char**)(stack_end + 1);
+ *envp = (char**)(stack_end + argc + 2);
+ } else {
+#endif // !SANITIZER_GO
+ static const int kMaxArgv = 2000, kMaxEnvp = 2000;
+ ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
+#if !SANITIZER_GO
+ }
+#endif // !SANITIZER_GO
+#endif // SANITIZER_FREEBSD
+}
+
+char **GetArgv() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return argv;
+}
+
+char **GetEnviron() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return envp;
+}
+
+#endif // !SANITIZER_OPENBSD
+
+#if !SANITIZER_SOLARIS
+enum MutexState {
+ MtxUnlocked = 0,
+ MtxLocked = 1,
+ MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#elif SANITIZER_NETBSD
+ sched_yield(); /* No userspace futex-like synchronization */
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
+ 0, 0, 0);
+#endif
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#elif SANITIZER_NETBSD
+ /* No userspace futex-like synchronization */
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
+#endif
+ }
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+#endif // !SANITIZER_SOLARIS
+
+// ----------------- sanitizer_linux.h
+// The actual size of this structure is specified by d_reclen.
+// Note that getdents64 uses a different structure format. We only provide the
+// 32-bit syscall here.
+#if SANITIZER_NETBSD
+// Not used
+#elif SANITIZER_OPENBSD
+// struct dirent is different for Linux and us. At this moment, we use only
+// d_fileno (Linux call this d_ino), d_reclen, and d_name.
+struct linux_dirent {
+ u64 d_ino; // d_fileno
+ u16 d_reclen;
+ u16 d_namlen; // not used
+ u8 d_type; // not used
+ char d_name[NAME_MAX + 1];
+};
+#else
+struct linux_dirent {
+#if SANITIZER_X32 || defined(__aarch64__)
+ u64 d_ino;
+ u64 d_off;
+#else
+ unsigned long d_ino;
+ unsigned long d_off;
+#endif
+ unsigned short d_reclen;
+#ifdef __aarch64__
+ unsigned char d_type;
+#endif
+ char d_name[256];
+};
+#endif
+
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+// Syscall wrappers.
+uptr internal_ptrace(int request, int pid, void *addr, void *data) {
+ return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
+ 0 /* rusage */);
+}
+
+uptr internal_getpid() {
+ return internal_syscall(SYSCALL(getpid));
+}
+
+uptr internal_getppid() {
+ return internal_syscall(SYSCALL(getppid));
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+#if SANITIZER_FREEBSD
+ return dlinfo(handle, request, p);
+#else
+ UNIMPLEMENTED();
+#endif
+}
+
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+ return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+ return internal_syscall(SYSCALL(lseek), fd, offset, whence);
+}
+
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+ return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
+}
+#endif
+
+uptr internal_sigaltstack(const void *ss, void *oss) {
+ return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
+}
+
+int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
+ return internal_syscall(SYSCALL(fork));
+#endif
+}
+
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+#if SANITIZER_OPENBSD
+ return sysctl(name, namelen, oldp, (size_t *)oldlenp, (void *)newp,
+ (size_t)newlen);
+#else
+ return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,
+ (size_t *)oldlenp, newp, (size_t)newlen);
+#endif
+}
+
+#if SANITIZER_FREEBSD
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ static decltype(sysctlbyname) *real = nullptr;
+ if (!real)
+ real = (decltype(sysctlbyname) *)dlsym(RTLD_NEXT, "sysctlbyname");
+ CHECK(real);
+ return real(sname, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
+}
+#endif
+#endif
+
+#if SANITIZER_LINUX
+#define SA_RESTORER 0x04000000
+// Doesn't set sa_restorer if the caller did not set it, so use with caution
+//(see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+ __sanitizer_kernel_sigaction_t k_act, k_oldact;
+ internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act;
+ __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+ if (u_act) {
+ k_act.handler = u_act->handler;
+ k_act.sigaction = u_act->sigaction;
+ internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL).
+ k_act.sa_flags = u_act->sa_flags | SA_RESTORER;
+ // FIXME: most often sa_restorer is unset, however the kernel requires it
+ // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+ // If sa_restorer passed to the kernel is NULL, the program may crash upon
+ // signal delivery or fail to unwind the stack in the signal handler.
+ // libc implementation of sigaction() passes its own restorer to
+ // rt_sigaction, so we need to do the same (we'll need to reimplement the
+ // restorers; for x86_64 the restorer address can be obtained from
+ // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+ k_act.sa_restorer = u_act->sa_restorer;
+#endif
+ }
+
+ uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+ (uptr)(u_act ? &k_act : nullptr),
+ (uptr)(u_oldact ? &k_oldact : nullptr),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+ if ((result == 0) && u_oldact) {
+ u_oldact->handler = k_oldact.handler;
+ u_oldact->sigaction = k_oldact.sigaction;
+ internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ u_oldact->sa_flags = k_oldact.sa_flags;
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+ u_oldact->sa_restorer = k_oldact.sa_restorer;
+#endif
+ }
+ return result;
+}
+#endif // SANITIZER_LINUX
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,
+ (uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));
+#endif
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
+ internal_memset(set, 0xff, sizeof(*set));
+}
+
+void internal_sigemptyset(__sanitizer_sigset_t *set) {
+ internal_memset(set, 0, sizeof(*set));
+}
+
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
+ signum -= 1;
+ CHECK_GE(signum, 0);
+ CHECK_LT(signum, sizeof(*set) * 8);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ k_set->sig[idx] &= ~(1 << bit);
+}
+
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
+ signum -= 1;
+ CHECK_GE(signum, 0);
+ CHECK_LT(signum, sizeof(*set) * 8);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ return k_set->sig[idx] & (1 << bit);
+}
+#elif SANITIZER_FREEBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
+ sigset_t *rset = reinterpret_cast<sigset_t *>(set);
+ sigdelset(rset, signum);
+}
+
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
+ sigset_t *rset = reinterpret_cast<sigset_t *>(set);
+ return sigismember(rset, signum);
+}
+#endif
+#endif // !SANITIZER_SOLARIS
+
+#if !SANITIZER_NETBSD
+// ThreadLister implementation.
+ThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) {
+ char task_directory_path[80];
+ internal_snprintf(task_directory_path, sizeof(task_directory_path),
+ "/proc/%d/task/", pid);
+ descriptor_ = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
+ if (internal_iserror(descriptor_)) {
+ Report("Can't open /proc/%d/task for reading.\n", pid);
+ }
+}
+
+ThreadLister::Result ThreadLister::ListThreads(
+ InternalMmapVector<tid_t> *threads) {
+ if (internal_iserror(descriptor_))
+ return Error;
+ internal_lseek(descriptor_, 0, SEEK_SET);
+ threads->clear();
+
+ Result result = Ok;
+ for (bool first_read = true;; first_read = false) {
+ // Resize to max capacity if it was downsized by IsAlive.
+ buffer_.resize(buffer_.capacity());
+ CHECK_GE(buffer_.size(), 4096);
+ uptr read = internal_getdents(
+ descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size());
+ if (!read)
+ return result;
+ if (internal_iserror(read)) {
+ Report("Can't read directory entries from /proc/%d/task.\n", pid_);
+ return Error;
+ }
+
+ for (uptr begin = (uptr)buffer_.data(), end = begin + read; begin < end;) {
+ struct linux_dirent *entry = (struct linux_dirent *)begin;
+ begin += entry->d_reclen;
+ if (entry->d_ino == 1) {
+ // Inode 1 is for bad blocks and also can be a reason for early return.
+ // Should be emitted if kernel tried to output terminating thread.
+ // See proc_task_readdir implementation in Linux.
+ result = Incomplete;
+ }
+ if (entry->d_ino && *entry->d_name >= '0' && *entry->d_name <= '9')
+ threads->push_back(internal_atoll(entry->d_name));
+ }
+
+ // Now we are going to detect short-read or early EOF. In such cases Linux
+ // can return inconsistent list with missing alive threads.
+ // Code will just remember that the list can be incomplete but it will
+ // continue reads to return as much as possible.
+ if (!first_read) {
+ // The first one was a short-read by definition.
+ result = Incomplete;
+ } else if (read > buffer_.size() - 1024) {
+ // Read was close to the buffer size. So double the size and assume the
+ // worst.
+ buffer_.resize(buffer_.size() * 2);
+ result = Incomplete;
+ } else if (!threads->empty() && !IsAlive(threads->back())) {
+ // Maybe Linux early returned from read on terminated thread (!pid_alive)
+ // and failed to restore read position.
+ // See next_tid and proc_task_instantiate in Linux.
+ result = Incomplete;
+ }
+ }
+}
+
+bool ThreadLister::IsAlive(int tid) {
+ // /proc/%d/task/%d/status uses same call to detect alive threads as
+ // proc_task_readdir. See task_state implementation in Linux.
+ char path[80];
+ internal_snprintf(path, sizeof(path), "/proc/%d/task/%d/status", pid_, tid);
+ if (!ReadFileToVector(path, &buffer_) || buffer_.empty())
+ return false;
+ buffer_.push_back(0);
+ static const char kPrefix[] = "\nPPid:";
+ const char *field = internal_strstr(buffer_.data(), kPrefix);
+ if (!field)
+ return false;
+ field += internal_strlen(kPrefix);
+ return (int)internal_atoll(field) != 0;
+}
+
+ThreadLister::~ThreadLister() {
+ if (!internal_iserror(descriptor_))
+ internal_close(descriptor_);
+}
+#endif
+
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX && !SANITIZER_X32
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ if (proc_maps.Error())
+ return 0;
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK) && internal_uname(&uname_info) == 0 &&
+ internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX && !SANITIZER_X32
+}
+#endif // SANITIZER_WORDSIZE == 32
+
+uptr GetMaxVirtualAddress() {
+#if (SANITIZER_NETBSD || SANITIZER_OPENBSD) && defined(__x86_64__)
+ return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
+#elif SANITIZER_WORDSIZE == 64
+# if defined(__powerpc64__) || defined(__aarch64__)
+ // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
+ // We somehow need to figure out which one we are using now and choose
+ // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
+ // Note that with 'ulimit -s unlimited' the stack is moved away from the top
+ // of the address space, so simply checking the stack address is not enough.
+ // This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+# elif defined(__mips64)
+ return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
+# elif defined(__s390x__)
+ return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
+#elif defined(__sparc__)
+ return ~(uptr)0;
+# else
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# endif
+#else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
+ return (1ULL << 31) - 1; // 0x7fffffff;
+# else
+ return (1ULL << 32) - 1; // 0xffffffff;
+# endif
+#endif // SANITIZER_WORDSIZE
+}
+
+uptr GetMaxUserVirtualAddress() {
+ uptr addr = GetMaxVirtualAddress();
+#if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
+ if (!common_flags()->full_address_space)
+ addr -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&addr), addr);
+#endif
+ return addr;
+}
+
+#if !SANITIZER_ANDROID
+uptr GetPageSize() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \
+ defined(EXEC_PAGESIZE)
+ return EXEC_PAGESIZE;
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+// Use sysctl as sysconf can trigger interceptors internally.
+ int pz = 0;
+ uptr pzl = sizeof(pz);
+ int mib[2] = {CTL_HW, HW_PAGESIZE};
+ int rv = internal_sysctl(mib, 2, &pz, &pzl, nullptr, 0);
+ CHECK_EQ(rv, 0);
+ return (uptr)pz;
+#elif SANITIZER_USE_GETAUXVAL
+ return getauxval(AT_PAGESZ);
+#else
+ return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
+#endif
+}
+#endif // !SANITIZER_ANDROID
+
+#if !SANITIZER_OPENBSD
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+#if SANITIZER_SOLARIS
+ const char *default_module_name = getexecname();
+ CHECK_NE(default_module_name, NULL);
+ return internal_snprintf(buf, buf_len, "%s", default_module_name);
+#else
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD
+ const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
+#else
+ const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
+#endif
+ const char *default_module_name = "kern.proc.pathname";
+ uptr Size = buf_len;
+ bool IsErr =
+ (internal_sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
+ int readlink_error = IsErr ? errno : 0;
+ uptr module_name_len = Size;
+#else
+ const char *default_module_name = "/proc/self/exe";
+ uptr module_name_len = internal_readlink(
+ default_module_name, buf, buf_len);
+ int readlink_error;
+ bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif // SANITIZER_SOLARIS
+ if (IsErr) {
+ // We can't read binary name for some reason, assume it's unknown.
+ Report("WARNING: reading executable name failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "%s",
+ default_module_name);
+ CHECK_LT(module_name_len, buf_len);
+ }
+ return module_name_len;
+#endif
+}
+#endif // !SANITIZER_OPENBSD
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+#if SANITIZER_LINUX
+ char *tmpbuf;
+ uptr tmpsize;
+ uptr tmplen;
+ if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen,
+ 1024 * 1024)) {
+ internal_strncpy(buf, tmpbuf, buf_len);
+ UnmapOrDie(tmpbuf, tmpsize);
+ return internal_strlen(buf);
+ }
+#endif
+ return ReadBinaryName(buf, buf_len);
+}
+
+// Match full names of the form /path/to/base_name{-,.}*
+bool LibraryNameIs(const char *full_name, const char *base_name) {
+ const char *name = full_name;
+ // Strip path.
+ while (*name != '\0') name++;
+ while (name > full_name && *name != '/') name--;
+ if (*name == '/') name++;
+ uptr base_name_length = internal_strlen(base_name);
+ if (internal_strncmp(name, base_name, base_name_length)) return false;
+ return (name[base_name_length] == '-' || name[base_name_length] == '.');
+}
+
+#if !SANITIZER_ANDROID
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+ CHECK_NE(map, nullptr);
+#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+ typedef ElfW(Phdr) Elf_Phdr;
+ typedef ElfW(Ehdr) Elf_Ehdr;
+#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+ char *base = (char *)map->l_addr;
+ Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
+ char *phdrs = base + ehdr->e_phoff;
+ char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize;
+
+ // Find the segment with the minimum base so we can "relocate" the p_vaddr
+ // fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC
+ // objects have a non-zero base.
+ uptr preferred_base = (uptr)-1;
+ for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+ Elf_Phdr *phdr = (Elf_Phdr *)iter;
+ if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr)
+ preferred_base = (uptr)phdr->p_vaddr;
+ }
+
+ // Compute the delta from the real base to get a relocation delta.
+ sptr delta = (uptr)base - preferred_base;
+ // Now we can figure out what the loader really mapped.
+ for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+ Elf_Phdr *phdr = (Elf_Phdr *)iter;
+ if (phdr->p_type == PT_LOAD) {
+ uptr seg_start = phdr->p_vaddr + delta;
+ uptr seg_end = seg_start + phdr->p_memsz;
+ // None of these values are aligned. We consider the ragged edges of the
+ // load command as defined, since they are mapped from the file.
+ seg_start = RoundDownTo(seg_start, GetPageSizeCached());
+ seg_end = RoundUpTo(seg_end, GetPageSizeCached());
+ cb((void *)seg_start, seg_end - seg_start);
+ }
+ }
+}
+#endif
+
+#if defined(__x86_64__) && SANITIZER_LINUX
+// We cannot use glibc's clone wrapper, because it messes with the child
+// task's TLS. It writes the PID and TID of the child task to its thread
+// descriptor, but in our case the child task shares the thread descriptor with
+// the parent (because we don't know how to allocate a new thread
+// descriptor to keep glibc happy). So the stock version of clone(), when
+// used with CLONE_VM, would end up corrupting the parent's thread descriptor.
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+ register void *r8 __asm__("r8") = newtls;
+ register int *r10 __asm__("r10") = child_tidptr;
+ __asm__ __volatile__(
+ /* %rax = syscall(%rax = SYSCALL(clone),
+ * %rdi = flags,
+ * %rsi = child_stack,
+ * %rdx = parent_tidptr,
+ * %r8 = new_tls,
+ * %r10 = child_tidptr)
+ */
+ "syscall\n"
+
+ /* if (%rax != 0)
+ * return;
+ */
+ "testq %%rax,%%rax\n"
+ "jnz 1f\n"
+
+ /* In the child. Terminate unwind chain. */
+ // XXX: We should also terminate the CFI unwind chain
+ // here. Unfortunately clang 3.2 doesn't support the
+ // necessary CFI directives, so we skip that part.
+ "xorq %%rbp,%%rbp\n"
+
+ /* Call "fn(arg)". */
+ "popq %%rax\n"
+ "popq %%rdi\n"
+ "call *%%rax\n"
+
+ /* Call _exit(%rax). */
+ "movq %%rax,%%rdi\n"
+ "movq %2,%%rax\n"
+ "syscall\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "S"(child_stack),
+ "D"(flags),
+ "d"(parent_tidptr),
+ "r"(r8),
+ "r"(r10)
+ : "memory", "r11", "rcx");
+ return res;
+}
+#elif defined(__mips__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+ register void *a3 __asm__("$7") = newtls;
+ register int *a4 __asm__("$8") = child_tidptr;
+ // We don't have proper CFI directives here because it requires alot of code
+ // for very marginal benefits.
+ __asm__ __volatile__(
+ /* $v0 = syscall($v0 = __NR_clone,
+ * $a0 = flags,
+ * $a1 = child_stack,
+ * $a2 = parent_tidptr,
+ * $a3 = new_tls,
+ * $a4 = child_tidptr)
+ */
+ ".cprestore 16;\n"
+ "move $4,%1;\n"
+ "move $5,%2;\n"
+ "move $6,%3;\n"
+ "move $7,%4;\n"
+ /* Store the fifth argument on stack
+ * if we are using 32-bit abi.
+ */
+#if SANITIZER_WORDSIZE == 32
+ "lw %5,16($29);\n"
+#else
+ "move $8,%5;\n"
+#endif
+ "li $2,%6;\n"
+ "syscall;\n"
+
+ /* if ($v0 != 0)
+ * return;
+ */
+ "bnez $2,1f;\n"
+
+ /* Call "fn(arg)". */
+#if SANITIZER_WORDSIZE == 32
+#ifdef __BIG_ENDIAN__
+ "lw $25,4($29);\n"
+ "lw $4,12($29);\n"
+#else
+ "lw $25,0($29);\n"
+ "lw $4,8($29);\n"
+#endif
+#else
+ "ld $25,0($29);\n"
+ "ld $4,8($29);\n"
+#endif
+ "jal $25;\n"
+
+ /* Call _exit($v0). */
+ "move $4,$2;\n"
+ "li $2,%7;\n"
+ "syscall;\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "r"(flags),
+ "r"(child_stack),
+ "r"(parent_tidptr),
+ "r"(a3),
+ "r"(a4),
+ "i"(__NR_clone),
+ "i"(__NR_exit)
+ : "memory", "$29" );
+ return res;
+}
+#elif defined(__aarch64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+ register int (*__fn)(void *) __asm__("x0") = fn;
+ register void *__stack __asm__("x1") = child_stack;
+ register int __flags __asm__("x2") = flags;
+ register void *__arg __asm__("x3") = arg;
+ register int *__ptid __asm__("x4") = parent_tidptr;
+ register void *__tls __asm__("x5") = newtls;
+ register int *__ctid __asm__("x6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mov x0,x2\n" /* flags */
+ "mov x2,x4\n" /* ptid */
+ "mov x3,x5\n" /* tls */
+ "mov x4,x6\n" /* ctid */
+ "mov x8,%9\n" /* clone */
+
+ "svc 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp x0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldp x1, x0, [sp], #16\n"
+ "blr x1\n"
+
+ /* Call _exit(%r0). */
+ "mov x8, %10\n"
+ "svc 0x0\n"
+ "1:\n"
+
+ : "=r" (res)
+ : "i"(-EINVAL),
+ "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "x30", "memory");
+ return res;
+}
+#elif defined(__powerpc64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+// Stack frame structure.
+#if SANITIZER_PPC64V1
+// Back chain == 0 (SP + 112)
+// Frame (112 bytes):
+// Parameter save area (SP + 48), 8 doublewords
+// TOC save area (SP + 40)
+// Link editor doubleword (SP + 32)
+// Compiler doubleword (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+#elif SANITIZER_PPC64V2
+// Back chain == 0 (SP + 32)
+// Frame (32 bytes):
+// TOC save area (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
+#else
+# error "Unsupported PPC64 ABI"
+#endif
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+
+ register int (*__fn)(void *) __asm__("r3") = fn;
+ register void *__cstack __asm__("r4") = child_stack;
+ register int __flags __asm__("r5") = flags;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+ /* fn and arg are saved across the syscall */
+ "mr 28, %5\n\t"
+ "mr 27, %8\n\t"
+
+ /* syscall
+ r0 == __NR_clone
+ r3 == flags
+ r4 == child_stack
+ r5 == parent_tidptr
+ r6 == newtls
+ r7 == child_tidptr */
+ "mr 3, %7\n\t"
+ "mr 5, %9\n\t"
+ "mr 6, %10\n\t"
+ "mr 7, %11\n\t"
+ "li 0, %3\n\t"
+ "sc\n\t"
+
+ /* Test if syscall was successful */
+ "cmpdi cr1, 3, 0\n\t"
+ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+ "bne- cr1, 1f\n\t"
+
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
+ /* Do the function call */
+ "std 2, %13(1)\n\t"
+#if SANITIZER_PPC64V1
+ "ld 0, 0(28)\n\t"
+ "ld 2, 8(28)\n\t"
+ "mtctr 0\n\t"
+#elif SANITIZER_PPC64V2
+ "mr 12, 28\n\t"
+ "mtctr 12\n\t"
+#else
+# error "Unsupported PPC64 ABI"
+#endif
+ "mr 3, 27\n\t"
+ "bctrl\n\t"
+ "ld 2, %13(1)\n\t"
+
+ /* Call _exit(r3) */
+ "li 0, %4\n\t"
+ "sc\n\t"
+
+ /* Return to parent */
+ "1:\n\t"
+ "mr %0, 3\n\t"
+ : "=r" (res)
+ : "0" (-1),
+ "i" (EINVAL),
+ "i" (__NR_clone),
+ "i" (__NR_exit),
+ "r" (__fn),
+ "r" (__cstack),
+ "r" (__flags),
+ "r" (__arg),
+ "r" (__ptidptr),
+ "r" (__newtls),
+ "r" (__ctidptr),
+ "i" (FRAME_SIZE),
+ "i" (FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ return res;
+}
+#elif defined(__i386__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 7 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)flags;
+ ((unsigned int *)child_stack)[1] = (uptr)0;
+ ((unsigned int *)child_stack)[2] = (uptr)fn;
+ ((unsigned int *)child_stack)[3] = (uptr)arg;
+ __asm__ __volatile__(
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+#ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+#endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "c"(child_stack),
+ "d"(parent_tidptr),
+ "S"(newtls),
+ "D"(child_tidptr)
+ : "memory");
+ return res;
+}
+#elif defined(__arm__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ unsigned int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)fn;
+ ((unsigned int *)child_stack)[1] = (uptr)arg;
+ register int r0 __asm__("r0") = flags;
+ register void *r1 __asm__("r1") = child_stack;
+ register int *r2 __asm__("r2") = parent_tidptr;
+ register void *r3 __asm__("r3") = newtls;
+ register int *r4 __asm__("r4") = child_tidptr;
+ register int r7 __asm__("r7") = __NR_clone;
+
+#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+#endif
+#if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+#endif
+
+#ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+#else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+#endif
+
+ __asm__ __volatile__(
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n"
+ BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
+ "i"(__NR_exit)
+ : "memory");
+ return res;
+}
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+int internal_uname(struct utsname *buf) {
+ return internal_syscall(SYSCALL(uname), buf);
+}
+#endif
+
+#if SANITIZER_ANDROID
+#if __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Any name starting with "lib" indicates a bug in L where library base names
+ // are returned instead of paths.
+ if (info->dlpi_name && info->dlpi_name[0] == 'l' &&
+ info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {
+ *(bool *)data = true;
+ return 1;
+ }
+ return 0;
+}
+
+static atomic_uint32_t android_api_level;
+
+static AndroidApiLevel AndroidDetectApiLevelStatic() {
+#if __ANDROID_API__ <= 19
+ return ANDROID_KITKAT;
+#elif __ANDROID_API__ <= 22
+ return ANDROID_LOLLIPOP_MR1;
+#else
+ return ANDROID_POST_LOLLIPOP;
+#endif
+}
+
+static AndroidApiLevel AndroidDetectApiLevel() {
+ if (!&dl_iterate_phdr)
+ return ANDROID_KITKAT; // K or lower
+ bool base_name_seen = false;
+ dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
+ if (base_name_seen)
+ return ANDROID_LOLLIPOP_MR1; // L MR1
+ return ANDROID_POST_LOLLIPOP; // post-L
+ // Plain L (API level 21) is completely broken wrt ASan and not very
+ // interesting to detect.
+}
+
+extern "C" __attribute__((weak)) void* _DYNAMIC;
+
+AndroidApiLevel AndroidGetApiLevel() {
+ AndroidApiLevel level =
+ (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
+ if (level) return level;
+ level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()
+ : AndroidDetectApiLevel();
+ atomic_store(&android_api_level, level, memory_order_relaxed);
+ return level;
+}
+
+#endif
+
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGTRAP:
+ return common_flags()->handle_sigtrap;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
+}
+
+#if !SANITIZER_GO
+void *internal_start_thread(void *(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+ // on any thread, setuid call hangs (see test/tsan/setuid.c).
+ internal_sigdelset(&set, 33);
+#endif
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ void *th;
+ real_pthread_create(&th, nullptr, func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, nullptr);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ real_pthread_join(th, nullptr);
+}
+#else
+void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
+
+void internal_join_thread(void *th) {}
+#endif
+
+#if defined(__aarch64__)
+// Android headers in the older NDK releases miss this definition.
+struct __sanitizer_esr_context {
+ struct _aarch64_ctx head;
+ uint64_t esr;
+};
+
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
+ static const u32 kEsrMagic = 0x45535201;
+ u8 *aux = ucontext->uc_mcontext.__reserved;
+ while (true) {
+ _aarch64_ctx *ctx = (_aarch64_ctx *)aux;
+ if (ctx->size == 0) break;
+ if (ctx->magic == kEsrMagic) {
+ *esr = ((__sanitizer_esr_context *)ctx)->esr;
+ return true;
+ }
+ aux += ctx->size;
+ }
+ return false;
+}
+#endif
+
+#if SANITIZER_OPENBSD
+using Context = sigcontext;
+#else
+using Context = ucontext_t;
+#endif
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+ Context *ucontext = (Context *)context;
+#if defined(__x86_64__) || defined(__i386__)
+ static const uptr PF_WRITE = 1U << 1;
+#if SANITIZER_FREEBSD
+ uptr err = ucontext->uc_mcontext.mc_err;
+#elif SANITIZER_NETBSD
+ uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
+#elif SANITIZER_OPENBSD
+ uptr err = ucontext->sc_err;
+#elif SANITIZER_SOLARIS && defined(__i386__)
+ const int Err = 13;
+ uptr err = ucontext->uc_mcontext.gregs[Err];
+#else
+ uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
+#endif // SANITIZER_FREEBSD
+ return err & PF_WRITE ? WRITE : READ;
+#elif defined(__mips__)
+ uint32_t *exception_source;
+ uint32_t faulty_instruction;
+ uint32_t op_code;
+
+ exception_source = (uint32_t *)ucontext->uc_mcontext.pc;
+ faulty_instruction = (uint32_t)(*exception_source);
+
+ op_code = (faulty_instruction >> 26) & 0x3f;
+
+ // FIXME: Add support for FPU, microMIPS, DSP, MSA memory instructions.
+ switch (op_code) {
+ case 0x28: // sb
+ case 0x29: // sh
+ case 0x2b: // sw
+ case 0x3f: // sd
+#if __mips_isa_rev < 6
+ case 0x2c: // sdl
+ case 0x2d: // sdr
+ case 0x2a: // swl
+ case 0x2e: // swr
+#endif
+ return SignalContext::WRITE;
+
+ case 0x20: // lb
+ case 0x24: // lbu
+ case 0x21: // lh
+ case 0x25: // lhu
+ case 0x23: // lw
+ case 0x27: // lwu
+ case 0x37: // ld
+#if __mips_isa_rev < 6
+ case 0x1a: // ldl
+ case 0x1b: // ldr
+ case 0x22: // lwl
+ case 0x26: // lwr
+#endif
+ return SignalContext::READ;
+#if __mips_isa_rev == 6
+ case 0x3b: // pcrel
+ op_code = (faulty_instruction >> 19) & 0x3;
+ switch (op_code) {
+ case 0x1: // lwpc
+ case 0x2: // lwupc
+ return SignalContext::READ;
+ }
+#endif
+ }
+ return SignalContext::UNKNOWN;
+#elif defined(__arm__)
+ static const uptr FSR_WRITE = 1U << 11;
+ uptr fsr = ucontext->uc_mcontext.error_code;
+ return fsr & FSR_WRITE ? WRITE : READ;
+#elif defined(__aarch64__)
+ static const u64 ESR_ELx_WNR = 1U << 6;
+ u64 esr;
+ if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
+ return esr & ESR_ELx_WNR ? WRITE : READ;
+#elif defined(__sparc__)
+ // Decode the instruction to determine the access type.
+ // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
+#if SANITIZER_SOLARIS
+ uptr pc = ucontext->uc_mcontext.gregs[REG_PC];
+#else
+ // Historical BSDism here.
+ struct sigcontext *scontext = (struct sigcontext *)context;
+#if defined(__arch64__)
+ uptr pc = scontext->sigc_regs.tpc;
+#else
+ uptr pc = scontext->si_regs.pc;
+#endif
+#endif
+ u32 instr = *(u32 *)pc;
+ return (instr >> 21) & 1 ? WRITE: READ;
+#elif defined(__riscv)
+ unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
+ unsigned faulty_instruction = *(uint16_t *)pc;
+
+#if defined(__riscv_compressed)
+ if ((faulty_instruction & 0x3) != 0x3) { // it's a compressed instruction
+ // set op_bits to the instruction bits [1, 0, 15, 14, 13]
+ unsigned op_bits =
+ ((faulty_instruction & 0x3) << 3) | (faulty_instruction >> 13);
+ unsigned rd = faulty_instruction & 0xF80; // bits 7-11, inclusive
+ switch (op_bits) {
+ case 0b10'010: // c.lwsp (rd != x0)
+#if __riscv_xlen == 64
+ case 0b10'011: // c.ldsp (rd != x0)
+#endif
+ return rd ? SignalContext::READ : SignalContext::UNKNOWN;
+ case 0b00'010: // c.lw
+#if __riscv_flen >= 32 && __riscv_xlen == 32
+ case 0b10'011: // c.flwsp
+#endif
+#if __riscv_flen >= 32 || __riscv_xlen == 64
+ case 0b00'011: // c.flw / c.ld
+#endif
+#if __riscv_flen == 64
+ case 0b00'001: // c.fld
+ case 0b10'001: // c.fldsp
+#endif
+ return SignalContext::READ;
+ case 0b00'110: // c.sw
+ case 0b10'110: // c.swsp
+#if __riscv_flen >= 32 || __riscv_xlen == 64
+ case 0b00'111: // c.fsw / c.sd
+ case 0b10'111: // c.fswsp / c.sdsp
+#endif
+#if __riscv_flen == 64
+ case 0b00'101: // c.fsd
+ case 0b10'101: // c.fsdsp
+#endif
+ return SignalContext::WRITE;
+ default:
+ return SignalContext::UNKNOWN;
+ }
+ }
+#endif
+
+ unsigned opcode = faulty_instruction & 0x7f; // lower 7 bits
+ unsigned funct3 = (faulty_instruction >> 12) & 0x7; // bits 12-14, inclusive
+ switch (opcode) {
+ case 0b0000011: // loads
+ switch (funct3) {
+ case 0b000: // lb
+ case 0b001: // lh
+ case 0b010: // lw
+#if __riscv_xlen == 64
+ case 0b011: // ld
+#endif
+ case 0b100: // lbu
+ case 0b101: // lhu
+ return SignalContext::READ;
+ default:
+ return SignalContext::UNKNOWN;
+ }
+ case 0b0100011: // stores
+ switch (funct3) {
+ case 0b000: // sb
+ case 0b001: // sh
+ case 0b010: // sw
+#if __riscv_xlen == 64
+ case 0b011: // sd
+#endif
+ return SignalContext::WRITE;
+ default:
+ return SignalContext::UNKNOWN;
+ }
+#if __riscv_flen >= 32
+ case 0b0000111: // floating-point loads
+ switch (funct3) {
+ case 0b010: // flw
+#if __riscv_flen == 64
+ case 0b011: // fld
+#endif
+ return SignalContext::READ;
+ default:
+ return SignalContext::UNKNOWN;
+ }
+ case 0b0100111: // floating-point stores
+ switch (funct3) {
+ case 0b010: // fsw
+#if __riscv_flen == 64
+ case 0b011: // fsd
+#endif
+ return SignalContext::WRITE;
+ default:
+ return SignalContext::UNKNOWN;
+ }
+#endif
+ default:
+ return SignalContext::UNKNOWN;
+ }
+#else
+ (void)ucontext;
+ return UNKNOWN; // FIXME: Implement.
+#endif
+}
+
+bool SignalContext::IsTrueFaultingAddress() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ // SIGSEGV signals without a true fault address have si_code set to 128.
+ return si->si_signo == SIGSEGV && si->si_code != 128;
+}
+
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if SANITIZER_NETBSD
+ // This covers all NetBSD architectures
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = _UC_MACHINE_PC(ucontext);
+ *bp = _UC_MACHINE_FP(ucontext);
+ *sp = _UC_MACHINE_SP(ucontext);
+#elif defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.arm_pc;
+ *bp = ucontext->uc_mcontext.arm_fp;
+ *sp = ucontext->uc_mcontext.arm_sp;
+#elif defined(__aarch64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.regs[29];
+ *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.sc_iaoq[0];
+ /* GCC uses %r3 whenever a frame pointer is needed. */
+ *bp = ucontext->uc_mcontext.sc_gr[3];
+ *sp = ucontext->uc_mcontext.sc_gr[30];
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_rip;
+ *bp = ucontext->uc_mcontext.mc_rbp;
+ *sp = ucontext->uc_mcontext.mc_rsp;
+#elif SANITIZER_OPENBSD
+ sigcontext *ucontext = (sigcontext *)context;
+ *pc = ucontext->sc_rip;
+ *bp = ucontext->sc_rbp;
+ *sp = ucontext->sc_rsp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_RIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_RBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_RSP];
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_eip;
+ *bp = ucontext->uc_mcontext.mc_ebp;
+ *sp = ucontext->uc_mcontext.mc_esp;
+#elif SANITIZER_OPENBSD
+ sigcontext *ucontext = (sigcontext *)context;
+ *pc = ucontext->sc_eip;
+ *bp = ucontext->sc_ebp;
+ *sp = ucontext->sc_esp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if SANITIZER_SOLARIS
+ /* Use the numeric values: the symbolic ones are undefined by llvm
+ include/llvm/Support/Solaris.h. */
+# ifndef REG_EIP
+# define REG_EIP 14 // REG_PC
+# endif
+# ifndef REG_EBP
+# define REG_EBP 6 // REG_FP
+# endif
+# ifndef REG_UESP
+# define REG_UESP 17 // REG_SP
+# endif
+# endif
+ *pc = ucontext->uc_mcontext.gregs[REG_EIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_EBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_UESP];
+# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.regs->nip;
+ *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+ // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+ // pointer, but GCC always uses r31 when we need a frame pointer.
+ *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
+#elif defined(__sparc__)
+#if defined(__arch64__) || defined(__sparcv9)
+#define STACK_BIAS 2047
+#else
+#define STACK_BIAS 0
+# endif
+# if SANITIZER_SOLARIS
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
+ *sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;
+#else
+ // Historical BSDism here.
+ struct sigcontext *scontext = (struct sigcontext *)context;
+#if defined(__arch64__)
+ *pc = scontext->sigc_regs.tpc;
+ *sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS;
+#else
+ *pc = scontext->si_regs.pc;
+ *sp = scontext->si_regs.u_regs[14];
+#endif
+# endif
+ *bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;
+#elif defined(__mips__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.gregs[30];
+ *sp = ucontext->uc_mcontext.gregs[29];
+#elif defined(__s390__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__s390x__)
+ *pc = ucontext->uc_mcontext.psw.addr;
+# else
+ *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;
+# endif
+ *bp = ucontext->uc_mcontext.gregs[11];
+ *sp = ucontext->uc_mcontext.gregs[15];
+#elif defined(__riscv)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.__gregs[REG_PC];
+ *bp = ucontext->uc_mcontext.__gregs[REG_S0];
+ *sp = ucontext->uc_mcontext.__gregs[REG_SP];
+#else
+# error "Unsupported arch"
+#endif
+}
+
+void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
+
+void InitializePlatformEarly() {
+ // Do nothing.
+}
+
+void MaybeReexec() {
+ // No need to re-exec on Linux.
+}
+
+void CheckASLR() {
+#if SANITIZER_NETBSD
+ int mib[3];
+ int paxflags;
+ uptr len = sizeof(paxflags);
+
+ mib[0] = CTL_PROC;
+ mib[1] = internal_getpid();
+ mib[2] = PROC_PID_PAXFLAGS;
+
+ if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {
+ Printf("sysctl failed\n");
+ Die();
+ }
+
+ if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) {
+ Printf("This sanitizer is not compatible with enabled ASLR.\n"
+ "To disable ASLR, please run \"paxctl +a %s\" and try again.\n",
+ GetArgv()[0]);
+ Die();
+ }
+#elif SANITIZER_PPC64V2
+ // Disable ASLR for Linux PPC64LE.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is being run with address space layout "
+ "randomization (ASLR) enabled which prevents the thread and "
+ "memory sanitizers from working on powerpc64le.\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ ReExec();
+ }
+#elif SANITIZER_FREEBSD
+ int aslr_pie;
+ uptr len = sizeof(aslr_pie);
+#if SANITIZER_WORDSIZE == 64
+ if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable",
+ &aslr_pie, &len, NULL, 0) == -1)) {
+ // We're making things less 'dramatic' here since
+ // the OID is not necessarily guaranteed to be here
+ // just yet regarding FreeBSD release
+ return;
+ }
+
+ if (aslr_pie > 0) {
+ Printf("This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n");
+ Die();
+ }
+#endif
+ // there might be 32 bits compat for 64 bits
+ if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable",
+ &aslr_pie, &len, NULL, 0) == -1)) {
+ return;
+ }
+
+ if (aslr_pie > 0) {
+ Printf("This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n");
+ Die();
+ }
+#else
+ // Do nothing
+#endif
+}
+
+void CheckMPROTECT() {
+#if SANITIZER_NETBSD
+ int mib[3];
+ int paxflags;
+ uptr len = sizeof(paxflags);
+
+ mib[0] = CTL_PROC;
+ mib[1] = internal_getpid();
+ mib[2] = PROC_PID_PAXFLAGS;
+
+ if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {
+ Printf("sysctl failed\n");
+ Die();
+ }
+
+ if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_MPROTECT)) {
+ Printf("This sanitizer is not compatible with enabled MPROTECT\n");
+ Die();
+ }
+#else
+ // Do nothing
+#endif
+}
+
+void PrintModuleMap() { }
+
+void CheckNoDeepBind(const char *filename, int flag) {
+#ifdef RTLD_DEEPBIND
+ if (flag & RTLD_DEEPBIND) {
+ Report(
+ "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
+ " which is incompatible with sanitizer runtime "
+ "(see https://github.com/google/sanitizers/issues/611 for details"
+ "). If you want to run %s library under sanitizers please remove "
+ "RTLD_DEEPBIND from dlopen flags.\n",
+ filename, filename);
+ Die();
+ }
+#endif
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ UNREACHABLE("FindAvailableMemoryRange is not available");
+ return 0;
+}
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ if (!buffer || !length || length > 256)
+ return false;
+#if SANITIZER_USE_GETENTROPY
+ uptr rnd = getentropy(buffer, length);
+ int rverrno = 0;
+ if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT)
+ return false;
+ else if (rnd == 0)
+ return true;
+#endif // SANITIZER_USE_GETENTROPY
+
+#if SANITIZER_USE_GETRANDOM
+ static atomic_uint8_t skip_getrandom_syscall;
+ if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
+ // Up to 256 bytes, getrandom will not be interrupted.
+ uptr res = internal_syscall(SYSCALL(getrandom), buffer, length,
+ blocking ? 0 : GRND_NONBLOCK);
+ int rverrno = 0;
+ if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)
+ atomic_store_relaxed(&skip_getrandom_syscall, 1);
+ else if (res == length)
+ return true;
+ }
+#endif // SANITIZER_USE_GETRANDOM
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ uptr fd = internal_open("/dev/urandom", O_RDONLY);
+ if (internal_iserror(fd))
+ return false;
+ uptr res = internal_read(fd, buffer, length);
+ if (internal_iserror(res))
+ return false;
+ internal_close(fd);
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_linux.h b/lib/tsan/sanitizer_common/sanitizer_linux.h
new file mode 100644
index 0000000000..c162d1ca5d
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_linux.h
@@ -0,0 +1,162 @@
+//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Linux-specific syscall wrappers and classes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_H
+#define SANITIZER_LINUX_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_freebsd.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_openbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
+#include "sanitizer_posix.h"
+
+struct link_map; // Opaque type returned by dlopen().
+struct utsname;
+
+namespace __sanitizer {
+// Dirent structure for getdents(). Note that this structure is different from
+// the one in <dirent.h>, which is used by readdir().
+struct linux_dirent;
+
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+
+struct MemoryMappingLayoutData {
+ ProcSelfMapsBuff proc_self_maps;
+ const char *current;
+};
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+
+// Syscall wrappers.
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
+uptr internal_sigaltstack(const void* ss, void* oss);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
+
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
+ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
+ || defined(__arm__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr);
+#endif
+int internal_uname(struct utsname *buf);
+#elif SANITIZER_FREEBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#elif SANITIZER_NETBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
+#endif // SANITIZER_LINUX
+
+// This class reads thread IDs from /proc/<pid>/task using only syscalls.
+class ThreadLister {
+ public:
+ explicit ThreadLister(pid_t pid);
+ ~ThreadLister();
+ enum Result {
+ Error,
+ Incomplete,
+ Ok,
+ };
+ Result ListThreads(InternalMmapVector<tid_t> *threads);
+
+ private:
+ bool IsAlive(int tid);
+
+ pid_t pid_;
+ int descriptor_ = -1;
+ InternalMmapVector<char> buffer_;
+};
+
+// Exposed for testing.
+uptr ThreadDescriptorSize();
+uptr ThreadSelf();
+uptr ThreadSelfOffset();
+
+// Matches a library's file name against a base name (stripping path and version
+// information).
+bool LibraryNameIs(const char *full_name, const char *base_name);
+
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
+
+// Releases memory pages entirely within the [beg, end] address range.
+// The pages no longer count toward RSS; reads are guaranteed to return 0.
+// Requires (but does not verify!) that pages are MAP_PRIVATE.
+INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
+ // man madvise on Linux promises zero-fill for anonymous private pages.
+ // Testing shows the same behaviour for private (but not anonymous) mappings
+ // of shm_open() files, as long as the underlying file is untouched.
+ CHECK(SANITIZER_LINUX);
+ ReleaseMemoryPagesToOS(beg, end);
+}
+
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
+#elif defined(__arm__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
+#elif defined(__mips__)
+// On mips32r1, this goes via a kernel illegal instruction trap that's
+// optimized for v1.
+# define __get_tls() \
+ ({ register void** __v asm("v1"); \
+ __asm__(".set push\n" \
+ ".set mips32r2\n" \
+ "rdhwr %0,$29\n" \
+ ".set pop\n" : "=r"(__v)); \
+ __v; })
+#elif defined(__i386__)
+# define __get_tls() \
+ ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 6;
+
+ALWAYS_INLINE uptr *get_android_tls_ptr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SANITIZER_ANDROID
+
+} // namespace __sanitizer
+
+#endif
+#endif // SANITIZER_LINUX_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_linux_s390.cpp b/lib/tsan/sanitizer_common/sanitizer_linux_s390.cpp
new file mode 100644
index 0000000000..bb2f5b5f9f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_linux_s390.cpp
@@ -0,0 +1,222 @@
+//===-- sanitizer_linux_s390.cpp ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements s390-linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX && SANITIZER_S390
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+
+namespace __sanitizer {
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ u64 offset) {
+ struct s390_mmap_params {
+ unsigned long addr;
+ unsigned long length;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+ } params = {
+ (unsigned long)addr,
+ (unsigned long)length,
+ (unsigned long)prot,
+ (unsigned long)flags,
+ (unsigned long)fd,
+# ifdef __s390x__
+ (unsigned long)offset,
+# else
+ (unsigned long)(offset / 4096),
+# endif
+ };
+# ifdef __s390x__
+ return syscall(__NR_mmap, &params);
+# else
+ return syscall(__NR_mmap2, &params);
+# endif
+}
+
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ // Minimum frame size.
+#ifdef __s390x__
+ child_stack = (char *)child_stack - 160;
+#else
+ child_stack = (char *)child_stack - 96;
+#endif
+ // Terminate unwind chain.
+ ((unsigned long *)child_stack)[0] = 0;
+ // And pass parameters.
+ ((unsigned long *)child_stack)[1] = (uptr)fn;
+ ((unsigned long *)child_stack)[2] = (uptr)arg;
+ register long res __asm__("r2");
+ register void *__cstack __asm__("r2") = child_stack;
+ register int __flags __asm__("r3") = flags;
+ register int * __ptidptr __asm__("r4") = parent_tidptr;
+ register int * __ctidptr __asm__("r5") = child_tidptr;
+ register void * __newtls __asm__("r6") = newtls;
+
+ __asm__ __volatile__(
+ /* Clone. */
+ "svc %1\n"
+
+ /* if (%r2 != 0)
+ * return;
+ */
+#ifdef __s390x__
+ "cghi %%r2, 0\n"
+#else
+ "chi %%r2, 0\n"
+#endif
+ "jne 1f\n"
+
+ /* Call "fn(arg)". */
+#ifdef __s390x__
+ "lmg %%r1, %%r2, 8(%%r15)\n"
+#else
+ "lm %%r1, %%r2, 4(%%r15)\n"
+#endif
+ "basr %%r14, %%r1\n"
+
+ /* Call _exit(%r2). */
+ "svc %2\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "i"(__NR_clone), "i"(__NR_exit),
+ "r"(__cstack),
+ "r"(__flags),
+ "r"(__ptidptr),
+ "r"(__ctidptr),
+ "r"(__newtls)
+ : "memory", "cc");
+ return res;
+}
+
+#if SANITIZER_S390_64
+static bool FixedCVE_2016_2143() {
+ // Try to determine if the running kernel has a fix for CVE-2016-2143,
+ // return false if in doubt (better safe than sorry). Distros may want to
+ // adjust this for their own kernels.
+ struct utsname buf;
+ unsigned int major, minor, patch = 0;
+ // This should never fail, but just in case...
+ if (internal_uname(&buf))
+ return false;
+ const char *ptr = buf.release;
+ major = internal_simple_strtoll(ptr, &ptr, 10);
+ // At least first 2 should be matched.
+ if (ptr[0] != '.')
+ return false;
+ minor = internal_simple_strtoll(ptr+1, &ptr, 10);
+ // Third is optional.
+ if (ptr[0] == '.')
+ patch = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (major < 3) {
+ if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el6")) {
+ // Check RHEL6
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 657) // 2.6.32-657.el6 or later
+ return true;
+ if (r1 == 642 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
+ return true;
+ }
+ }
+ // <3.0 is bad.
+ return false;
+ } else if (major == 3) {
+ // 3.2.79+ is OK.
+ if (minor == 2 && patch >= 79)
+ return true;
+ // 3.12.58+ is OK.
+ if (minor == 12 && patch >= 58)
+ return true;
+ if (minor == 10 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el7")) {
+ // Check RHEL7
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 426) // 3.10.0-426.el7 or later
+ return true;
+ if (r1 == 327 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
+ return true;
+ }
+ }
+ // Otherwise, bad.
+ return false;
+ } else if (major == 4) {
+ // 4.1.21+ is OK.
+ if (minor == 1 && patch >= 21)
+ return true;
+ // 4.4.6+ is OK.
+ if (minor == 4 && patch >= 6)
+ return true;
+ if (minor == 4 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(buf.version, "Ubuntu")) {
+ // Check Ubuntu 16.04
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 13) // 4.4.0-13 or later
+ return true;
+ }
+ // Otherwise, OK if 4.5+.
+ return minor >= 5;
+ } else {
+ // Linux 5 and up are fine.
+ return true;
+ }
+}
+
+void AvoidCVE_2016_2143() {
+ // Older kernels are affected by CVE-2016-2143 - they will crash hard
+ // if someone uses 4-level page tables (ie. virtual addresses >= 4TB)
+ // and fork() in the same process. Unfortunately, sanitizers tend to
+ // require such addresses. Since this is very likely to crash the whole
+ // machine (sanitizers themselves use fork() for llvm-symbolizer, for one),
+ // abort the process at initialization instead.
+ if (FixedCVE_2016_2143())
+ return;
+ if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
+ return;
+ Report(
+ "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
+ "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
+ "machine, or worse.\n"
+ "\n"
+ "If you are certain your kernel is not vulnerable (you have compiled it\n"
+ "yourself, or are using an unrecognized distribution kernel), you can\n"
+ "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
+ "with any value.\n");
+ Die();
+}
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX && SANITIZER_S390
diff --git a/lib/tsan/sanitizer_common/sanitizer_list.h b/lib/tsan/sanitizer_common/sanitizer_list.h
new file mode 100644
index 0000000000..f0b925945e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_list.h
@@ -0,0 +1,166 @@
+//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains implementation of a list class to be used by
+// ThreadSanitizer, etc run-times.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIST_H
+#define SANITIZER_LIST_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Intrusive singly-linked list with size(), push_back(), push_front()
+// pop_front(), append_front() and append_back().
+// This class should be a POD (so that it can be put into TLS)
+// and an object with all zero fields should represent a valid empty list.
+// This class does not have a CTOR, so clear() should be called on all
+// non-zero-initialized objects before using.
+template<class Item>
+struct IntrusiveList {
+ friend class Iterator;
+
+ void clear() {
+ first_ = last_ = nullptr;
+ size_ = 0;
+ }
+
+ bool empty() const { return size_ == 0; }
+ uptr size() const { return size_; }
+
+ void push_back(Item *x) {
+ if (empty()) {
+ x->next = nullptr;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = nullptr;
+ last_->next = x;
+ last_ = x;
+ size_++;
+ }
+ }
+
+ void push_front(Item *x) {
+ if (empty()) {
+ x->next = nullptr;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = first_;
+ first_ = x;
+ size_++;
+ }
+ }
+
+ void pop_front() {
+ CHECK(!empty());
+ first_ = first_->next;
+ if (!first_)
+ last_ = nullptr;
+ size_--;
+ }
+
+ void extract(Item *prev, Item *x) {
+ CHECK(!empty());
+ CHECK_NE(prev, nullptr);
+ CHECK_NE(x, nullptr);
+ CHECK_EQ(prev->next, x);
+ prev->next = x->next;
+ if (last_ == x)
+ last_ = prev;
+ size_--;
+ }
+
+ Item *front() { return first_; }
+ const Item *front() const { return first_; }
+ Item *back() { return last_; }
+ const Item *back() const { return last_; }
+
+ void append_front(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else if (!l->empty()) {
+ l->last_->next = first_;
+ first_ = l->first_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else {
+ last_->next = l->first_;
+ last_ = l->last_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void CheckConsistency() {
+ if (size_ == 0) {
+ CHECK_EQ(first_, 0);
+ CHECK_EQ(last_, 0);
+ } else {
+ uptr count = 0;
+ for (Item *i = first_; ; i = i->next) {
+ count++;
+ if (i == last_) break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(last_->next, 0);
+ }
+ }
+
+ template<class ItemTy>
+ class IteratorBase {
+ public:
+ explicit IteratorBase(ItemTy *current) : current_(current) {}
+ IteratorBase &operator++() {
+ current_ = current_->next;
+ return *this;
+ }
+ bool operator!=(IteratorBase other) const {
+ return current_ != other.current_;
+ }
+ ItemTy &operator*() {
+ return *current_;
+ }
+ private:
+ ItemTy *current_;
+ };
+
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(first_); }
+ Iterator end() { return Iterator(0); }
+
+ ConstIterator begin() const { return ConstIterator(first_); }
+ ConstIterator end() const { return ConstIterator(0); }
+
+// private, don't use directly.
+ uptr size_;
+ Item *first_;
+ Item *last_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIST_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_local_address_space_view.h b/lib/tsan/sanitizer_common/sanitizer_local_address_space_view.h
new file mode 100644
index 0000000000..5d1b5264b5
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_local_address_space_view.h
@@ -0,0 +1,76 @@
+//===-- sanitizer_local_address_space_view.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// `LocalAddressSpaceView` provides the local (i.e. target and current address
+// space are the same) implementation of the `AddressSpaveView` interface which
+// provides a simple interface to load memory from another process (i.e.
+// out-of-process)
+//
+// The `AddressSpaceView` interface requires that the type can be used as a
+// template parameter to objects that wish to be able to operate in an
+// out-of-process manner. In normal usage, objects are in-process and are thus
+// instantiated with the `LocalAddressSpaceView` type. This type is used to
+// load any pointers in instance methods. This implementation is effectively
+// a no-op. When an object is to be used in an out-of-process manner it is
+// instansiated with the `RemoteAddressSpaceView` type.
+//
+// By making `AddressSpaceView` a template parameter of an object, it can
+// change its implementation at compile time which has no run time overhead.
+// This also allows unifying in-process and out-of-process code which avoids
+// code duplication.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
+#define SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
+
+namespace __sanitizer {
+struct LocalAddressSpaceView {
+ // Load memory `sizeof(T) * num_elements` bytes of memory from the target
+ // process (always local for this implementation) starting at address
+ // `target_address`. The local copy of this memory is returned as a pointer.
+ // The caller should not write to this memory. The behaviour when doing so is
+ // undefined. Callers should use `LoadWritable()` to get access to memory
+ // that is writable.
+ //
+ // The lifetime of loaded memory is implementation defined.
+ template <typename T>
+ static const T *Load(const T *target_address, uptr num_elements = 1) {
+ // The target address space is the local address space so
+ // nothing needs to be copied. Just return the pointer.
+ return target_address;
+ }
+
+ // Load memory `sizeof(T) * num_elements` bytes of memory from the target
+ // process (always local for this implementation) starting at address
+ // `target_address`. The local copy of this memory is returned as a pointer.
+ // The memory returned may be written to.
+ //
+ // Writes made to the returned memory will be visible in the memory returned
+ // by subsequent `Load()` or `LoadWritable()` calls provided the
+ // `target_address` parameter is the same. It is not guaranteed that the
+ // memory returned by previous calls to `Load()` will contain any performed
+ // writes. If two or more overlapping regions of memory are loaded via
+ // separate calls to `LoadWritable()`, it is implementation defined whether
+ // writes made to the region returned by one call are visible in the regions
+ // returned by other calls.
+ //
+ // Given the above it is recommended to load the largest possible object
+ // that requires modification (e.g. a class) rather than individual fields
+ // from a class to avoid issues with overlapping writable regions.
+ //
+ // The lifetime of loaded memory is implementation defined.
+ template <typename T>
+ static T *LoadWritable(T *target_address, uptr num_elements = 1) {
+ // The target address space is the local address space so
+ // nothing needs to be copied. Just return the pointer.
+ return target_address;
+ }
+};
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_mac.cpp b/lib/tsan/sanitizer_common/sanitizer_mac.cpp
new file mode 100644
index 0000000000..7a3dfbcc27
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_mac.cpp
@@ -0,0 +1,1228 @@
+//===-- sanitizer_mac.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_mac.h"
+#include "interception/interception.h"
+
+// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
+// the clients will most certainly use 64-bit ones as well.
+#ifndef _DARWIN_USE_64_BIT_INODE
+#define _DARWIN_USE_64_BIT_INODE 1
+#endif
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_ptrauth.h"
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetEnviron
+#else
+extern char **environ;
+#endif
+
+#if defined(__has_include) && __has_include(<os/trace.h>)
+#define SANITIZER_OS_TRACE 1
+#include <os/trace.h>
+#else
+#define SANITIZER_OS_TRACE 0
+#endif
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+#else
+extern "C" {
+ extern char ***_NSGetArgv(void);
+}
+#endif
+
+#include <asl.h>
+#include <dlfcn.h> // for dladdr()
+#include <errno.h>
+#include <fcntl.h>
+#include <libkern/OSAtomic.h>
+#include <mach-o/dyld.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/vm_statistics.h>
+#include <malloc/malloc.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <spawn.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <util.h>
+
+// From <crt_externs.h>, but we don't have that file on iOS.
+extern "C" {
+ extern char ***_NSGetArgv(void);
+ extern char ***_NSGetEnviron(void);
+}
+
+// From <mach/mach_vm.h>, but we don't have that file on iOS.
+extern "C" {
+ extern kern_return_t mach_vm_region_recurse(
+ vm_map_t target_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ natural_t *nesting_depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt);
+}
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
+extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
+ off_t off) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+
+// ---------------------- sanitizer_libc.h
+
+// From <mach/vm_statistics.h>, but not on older OSs.
+#ifndef VM_MEMORY_SANITIZER
+#define VM_MEMORY_SANITIZER 99
+#endif
+
+// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of
+// giant memory regions (i.e. shadow memory regions).
+#define kXnuFastMmapFd 0x4
+static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
+static bool use_xnu_fast_mmap = false;
+
+uptr internal_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, u64 offset) {
+ if (fd == -1) {
+ fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
+ if (length >= kXnuFastMmapThreshold) {
+ if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;
+ }
+ }
+ if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) {
+ if (&__munmap) return __munmap(addr, length);
+ return munmap(addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return mprotect(addr, length, prot);
+}
+
+uptr internal_close(fd_t fd) {
+ return close(fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+ return open(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return open(filename, flags, mode);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ return read(fd, buf, count);
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ return write(fd, buf, count);
+}
+
+uptr internal_stat(const char *path, void *buf) {
+ return stat(path, (struct stat *)buf);
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ return lstat(path, (struct stat *)buf);
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ return fstat(fd, (struct stat *)buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ return dup(oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+ return dup2(oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return readlink(path, buf, bufsize);
+}
+
+uptr internal_unlink(const char *path) {
+ return unlink(path);
+}
+
+uptr internal_sched_yield() {
+ return sched_yield();
+}
+
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ return sleep(seconds);
+}
+
+uptr internal_getpid() {
+ return getpid();
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+ UNIMPLEMENTED();
+}
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ return sigaction(signum,
+ (const struct sigaction *)act, (struct sigaction *)oldact);
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ // Don't use sigprocmask here, because it affects all threads.
+ return pthread_sigmask(how, set, oldset);
+}
+
+// Doesn't call pthread_atfork() handlers (but not available on 10.6).
+extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
+
+int internal_fork() {
+ if (&__fork)
+ return __fork();
+ return fork();
+}
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+ return sysctl(const_cast<int *>(name), namelen, oldp, (size_t *)oldlenp,
+ const_cast<void *>(newp), (size_t)newlen);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ return sysctlbyname(sname, oldp, (size_t *)oldlenp, const_cast<void *>(newp),
+ (size_t)newlen);
+}
+
+static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
+ pid_t *pid) {
+ fd_t master_fd = kInvalidFd;
+ fd_t slave_fd = kInvalidFd;
+
+ auto fd_closer = at_scope_exit([&] {
+ internal_close(master_fd);
+ internal_close(slave_fd);
+ });
+
+ // We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
+ // in particular detects when it's talking to a pipe and forgets to flush the
+ // output stream after sending a response.
+ master_fd = posix_openpt(O_RDWR);
+ if (master_fd == kInvalidFd) return kInvalidFd;
+
+ int res = grantpt(master_fd) || unlockpt(master_fd);
+ if (res != 0) return kInvalidFd;
+
+ // Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
+ char slave_pty_name[128];
+ res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name);
+ if (res == -1) return kInvalidFd;
+
+ slave_fd = internal_open(slave_pty_name, O_RDWR);
+ if (slave_fd == kInvalidFd) return kInvalidFd;
+
+ // File descriptor actions
+ posix_spawn_file_actions_t acts;
+ res = posix_spawn_file_actions_init(&acts);
+ if (res != 0) return kInvalidFd;
+
+ auto acts_cleanup = at_scope_exit([&] {
+ posix_spawn_file_actions_destroy(&acts);
+ });
+
+ res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) ||
+ posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) ||
+ posix_spawn_file_actions_addclose(&acts, slave_fd);
+ if (res != 0) return kInvalidFd;
+
+ // Spawn attributes
+ posix_spawnattr_t attrs;
+ res = posix_spawnattr_init(&attrs);
+ if (res != 0) return kInvalidFd;
+
+ auto attrs_cleanup = at_scope_exit([&] {
+ posix_spawnattr_destroy(&attrs);
+ });
+
+ // In the spawned process, close all file descriptors that are not explicitly
+ // described by the file actions object. This is Darwin-specific extension.
+ res = posix_spawnattr_setflags(&attrs, POSIX_SPAWN_CLOEXEC_DEFAULT);
+ if (res != 0) return kInvalidFd;
+
+ // posix_spawn
+ char **argv_casted = const_cast<char **>(argv);
+ char **envp_casted = const_cast<char **>(envp);
+ res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, envp_casted);
+ if (res != 0) return kInvalidFd;
+
+ // Disable echo in the new terminal, disable CR.
+ struct termios termflags;
+ tcgetattr(master_fd, &termflags);
+ termflags.c_oflag &= ~ONLCR;
+ termflags.c_lflag &= ~ECHO;
+ tcsetattr(master_fd, TCSANOW, &termflags);
+
+ // On success, do not close master_fd on scope exit.
+ fd_t fd = master_fd;
+ master_fd = kInvalidFd;
+
+ return fd;
+}
+
+fd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid) {
+ // The client program may close its stdin and/or stdout and/or stderr thus
+ // allowing open/posix_openpt to reuse file descriptors 0, 1 or 2. In this
+ // case the communication is broken if either the parent or the child tries to
+ // close or duplicate these descriptors. We temporarily reserve these
+ // descriptors here to prevent this.
+ fd_t low_fds[3];
+ size_t count = 0;
+
+ for (; count < 3; count++) {
+ low_fds[count] = posix_openpt(O_RDWR);
+ if (low_fds[count] >= STDERR_FILENO)
+ break;
+ }
+
+ fd_t fd = internal_spawn_impl(argv, envp, pid);
+
+ for (; count > 0; count--) {
+ internal_close(low_fds[count]);
+ }
+
+ return fd;
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ return rename(oldpath, newpath);
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return execve(filename, argv, envp);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return waitpid(pid, status, options);
+}
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ if (ShouldMockFailureToOpen(filename))
+ return false;
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+tid_t GetTid() {
+ tid_t tid;
+ pthread_threadid_np(nullptr, &tid);
+ return tid;
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ // pthread_get_stacksize_np() returns an incorrect stack size for the main
+ // thread on Mavericks. See
+ // https://github.com/google/sanitizers/issues/261
+ if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&
+ stacksize == (1 << 19)) {
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+ // Most often rl.rlim_cur will be the desired 8M.
+ if (rl.rlim_cur < kMaxThreadStackSize) {
+ stacksize = rl.rlim_cur;
+ } else {
+ stacksize = kMaxThreadStackSize;
+ }
+ }
+ void *stackaddr = pthread_get_stackaddr_np(pthread_self());
+ *stack_top = (uptr)stackaddr;
+ *stack_bottom = *stack_top - stacksize;
+}
+
+char **GetEnviron() {
+#if !SANITIZER_IOS
+ char ***env_ptr = _NSGetEnviron();
+ if (!env_ptr) {
+ Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
+ "called after libSystem_initializer().\n");
+ CHECK(env_ptr);
+ }
+ char **environ = *env_ptr;
+#endif
+ CHECK(environ);
+ return environ;
+}
+
+const char *GetEnv(const char *name) {
+ char **env = GetEnviron();
+ uptr name_len = internal_strlen(name);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
+ if (len > name_len) {
+ const char *p = *env;
+ if (!internal_memcmp(p, name, name_len) &&
+ p[name_len] == '=') { // Match.
+ return *env + name_len + 1; // String starting after =.
+ }
+ }
+ env++;
+ }
+ return 0;
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ CHECK_LE(kMaxPathLength, buf_len);
+
+ // On OS X the executable path is saved to the stack by dyld. Reading it
+ // from there is much faster than calling dladdr, especially for large
+ // binaries with symbols.
+ InternalScopedString exe_path(kMaxPathLength);
+ uint32_t size = exe_path.size();
+ if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
+ realpath(exe_path.data(), buf) != 0) {
+ return internal_strlen(buf);
+ }
+ return 0;
+}
+
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void CheckASLR() {
+ // Do nothing
+}
+
+void CheckMPROTECT() {
+ // Do nothing
+}
+
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+extern "C" unsigned malloc_num_zones;
+extern "C" malloc_zone_t **malloc_zones;
+malloc_zone_t sanitizer_zone;
+
+// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
+// libmalloc tries to set up a different zone as malloc_zones[0], it will call
+// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
+// make sure we are still the first (default) zone.
+void MprotectMallocZones(void *addr, int prot) {
+ if (addr == malloc_zones && prot == PROT_READ) {
+ if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
+ for (unsigned i = 1; i < malloc_num_zones; i++) {
+ if (malloc_zones[i] == &sanitizer_zone) {
+ // Swap malloc_zones[0] and malloc_zones[i].
+ malloc_zones[i] = malloc_zones[0];
+ malloc_zones[0] = &sanitizer_zone;
+ break;
+ }
+ }
+ }
+ }
+}
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK_EQ(OS_SPINLOCK_INIT, 0);
+ CHECK_EQ(owner_, 0);
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+}
+
+void BlockingMutex::Unlock() {
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
+}
+
+u64 NanoTime() {
+ timeval tv;
+ internal_memset(&tv, 0, sizeof(tv));
+ gettimeofday(&tv, 0);
+ return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+}
+
+// This needs to be called during initialization to avoid being racy.
+u64 MonotonicNanoTime() {
+ static mach_timebase_info_data_t timebase_info;
+ if (timebase_info.denom == 0) mach_timebase_info(&timebase_info);
+ return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom;
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+uptr TlsBaseAddr() {
+ uptr segbase = 0;
+#if defined(__x86_64__)
+ asm("movq %%gs:0,%0" : "=r"(segbase));
+#elif defined(__i386__)
+ asm("movl %%gs:0,%0" : "=r"(segbase));
+#endif
+ return segbase;
+}
+
+// The size of the tls on darwin does not appear to be well documented,
+// however the vm memory map suggests that it is 1024 uptrs in size,
+// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
+uptr TlsSize() {
+#if defined(__x86_64__) || defined(__i386__)
+ return 1024 * sizeof(uptr);
+#else
+ return 0;
+#endif
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#if !SANITIZER_GO
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = TlsBaseAddr();
+ *tls_size = TlsSize();
+#else
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ MemoryMappingLayout memory_mapping(false);
+ memory_mapping.DumpListOfModules(&modules_);
+}
+
+void ListOfModules::fallbackInit() { clear(); }
+
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGTRAP:
+ return common_flags()->handle_sigtrap;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ // Handling fatal signals on watchOS and tvOS devices is disallowed.
+ if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
+ return kHandleSignalNo;
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
+}
+
+// This corresponds to Triple::getMacOSXVersion() in the Clang driver.
+static MacosVersion GetMacosAlignedVersionInternal() {
+ u16 kernel_major = GetDarwinKernelVersion().major;
+ // Darwin 0-3 -> unsupported
+ // Darwin 4-19 -> macOS 10.x
+ // Darwin 20+ -> macOS 11+
+ CHECK_GE(kernel_major, 4);
+ u16 major, minor;
+ if (kernel_major < 20) {
+ major = 10;
+ minor = kernel_major - 4;
+ } else {
+ major = 11 + kernel_major - 20;
+ minor = 0;
+ }
+ return MacosVersion(major, minor);
+}
+
+static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),
+ "MacosVersion cache size");
+static atomic_uint32_t cached_macos_version;
+
+MacosVersion GetMacosAlignedVersion() {
+ atomic_uint32_t::Type result =
+ atomic_load(&cached_macos_version, memory_order_acquire);
+ if (!result) {
+ MacosVersion version = GetMacosAlignedVersionInternal();
+ result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);
+ atomic_store(&cached_macos_version, result, memory_order_release);
+ }
+ return *reinterpret_cast<MacosVersion *>(&result);
+}
+
+void ParseVersion(const char *vers, u16 *major, u16 *minor) {
+ // Format: <major>.<minor>.<patch>\0
+ CHECK_GE(internal_strlen(vers), 5);
+ const char *p = vers;
+ *major = internal_simple_strtoll(p, &p, /*base=*/10);
+ CHECK_EQ(*p, '.');
+ p += 1;
+ *minor = internal_simple_strtoll(p, &p, /*base=*/10);
+}
+
+DarwinKernelVersion GetDarwinKernelVersion() {
+ char buf[100];
+ size_t len = sizeof(buf);
+ int res = internal_sysctlbyname("kern.osrelease", buf, &len, nullptr, 0);
+ CHECK_EQ(res, 0);
+
+ u16 major, minor;
+ ParseVersion(buf, &major, &minor);
+
+ return DarwinKernelVersion(major, minor);
+}
+
+uptr GetRSS() {
+ struct task_basic_info info;
+ unsigned count = TASK_BASIC_INFO_COUNT;
+ kern_return_t result =
+ task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);
+ if (UNLIKELY(result != KERN_SUCCESS)) {
+ Report("Cannot get task info. Error: %d\n", result);
+ Die();
+ }
+ return info.resident_size;
+}
+
+void *internal_start_thread(void *(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ pthread_t th;
+ pthread_create(&th, 0, func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
+
+#if !SANITIZER_GO
+static BlockingMutex syslog_lock(LINKER_INITIALIZED);
+#endif
+
+void WriteOneLineToSyslog(const char *s) {
+#if !SANITIZER_GO
+ syslog_lock.CheckLocked();
+ asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+#endif
+}
+
+void LogMessageOnPrintf(const char *str) {
+ // Log all printf output to CrashLog.
+ if (common_flags()->abort_on_error)
+ CRAppendCrashLogMessage(str);
+}
+
+void LogFullErrorReport(const char *buffer) {
+#if !SANITIZER_GO
+ // Log with os_trace. This will make it into the crash log.
+#if SANITIZER_OS_TRACE
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {
+ // os_trace requires the message (format parameter) to be a string literal.
+ if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
+ sizeof("AddressSanitizer") - 1) == 0)
+ os_trace("Address Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
+ sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
+ os_trace("Undefined Behavior Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
+ sizeof("ThreadSanitizer") - 1) == 0)
+ os_trace("Thread Sanitizer reported a failure.");
+ else
+ os_trace("Sanitizer tool reported a failure.");
+
+ if (common_flags()->log_to_syslog)
+ os_trace("Consult syslog for more information.");
+ }
+#endif
+
+ // Log to syslog.
+ // The logging on OS X may call pthread_create so we need the threading
+ // environment to be fully initialized. Also, this should never be called when
+ // holding the thread registry lock since that may result in a deadlock. If
+ // the reporting thread holds the thread registry mutex, and asl_log waits
+ // for GCD to dispatch a new thread, the process will deadlock, because the
+ // pthread_create wrapper needs to acquire the lock as well.
+ BlockingMutexLock l(&syslog_lock);
+ if (common_flags()->log_to_syslog)
+ WriteToSyslog(buffer);
+
+ // The report is added to CrashLog as part of logging all of Printf output.
+#endif
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+#if defined(__x86_64__) || defined(__i386__)
+ ucontext_t *ucontext = static_cast<ucontext_t*>(context);
+ return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
+#else
+ return UNKNOWN;
+#endif
+}
+
+bool SignalContext::IsTrueFaultingAddress() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ // "Real" SIGSEGV codes (e.g., SEGV_MAPERR, SEGV_MAPERR) are non-zero.
+ return si->si_signo == SIGSEGV && si->si_code != 0;
+}
+
+#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
+ #define AARCH64_GET_REG(r) \
+ (uptr)ptrauth_strip( \
+ (void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
+#else
+ #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
+#endif
+
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__aarch64__)
+ *pc = AARCH64_GET_REG(pc);
+# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
+ *bp = AARCH64_GET_REG(fp);
+# else
+ *bp = AARCH64_GET_REG(lr);
+# endif
+ *sp = AARCH64_GET_REG(sp);
+# elif defined(__x86_64__)
+ *pc = ucontext->uc_mcontext->__ss.__rip;
+ *bp = ucontext->uc_mcontext->__ss.__rbp;
+ *sp = ucontext->uc_mcontext->__ss.__rsp;
+# elif defined(__arm__)
+ *pc = ucontext->uc_mcontext->__ss.__pc;
+ *bp = ucontext->uc_mcontext->__ss.__r[7];
+ *sp = ucontext->uc_mcontext->__ss.__sp;
+# elif defined(__i386__)
+ *pc = ucontext->uc_mcontext->__ss.__eip;
+ *bp = ucontext->uc_mcontext->__ss.__ebp;
+ *sp = ucontext->uc_mcontext->__ss.__esp;
+# else
+# error "Unknown architecture"
+# endif
+}
+
+void SignalContext::InitPcSpBp() {
+ addr = (uptr)ptrauth_strip((void *)addr, 0);
+ GetPcSpBp(context, &pc, &sp, &bp);
+}
+
+void InitializePlatformEarly() {
+ // Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
+ use_xnu_fast_mmap =
+#if defined(__x86_64__)
+ GetDarwinKernelVersion() >= DarwinKernelVersion(17, 5);
+#else
+ false;
+#endif
+}
+
+#if !SANITIZER_GO
+static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
+LowLevelAllocator allocator_for_env;
+
+// Change the value of the env var |name|, leaking the original value.
+// If |name_value| is NULL, the variable is deleted from the environment,
+// otherwise the corresponding "NAME=value" string is replaced with
+// |name_value|.
+void LeakyResetEnv(const char *name, const char *name_value) {
+ char **env = GetEnviron();
+ uptr name_len = internal_strlen(name);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
+ if (len > name_len) {
+ const char *p = *env;
+ if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
+ // Match.
+ if (name_value) {
+ // Replace the old value with the new one.
+ *env = const_cast<char*>(name_value);
+ } else {
+ // Shift the subsequent pointers back.
+ char **del = env;
+ do {
+ del[0] = del[1];
+ } while (*del++);
+ }
+ }
+ }
+ env++;
+ }
+}
+
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool ReexecDisabled() {
+ return false;
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
+static const double kMinDyldVersionWithAutoInterposition = 360.0;
+
+bool DyldNeedsEnvVariable() {
+ // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
+ // still may want use them on older systems. On older Darwin platforms, dyld
+ // doesn't export dyldVersionNumber symbol and we simply return true.
+ if (!&dyldVersionNumber) return true;
+ // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
+ // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
+ // GetMacosAlignedVersion() doesn't work for the simulator. Let's instead
+ // check `dyldVersionNumber`, which is exported by dyld, against a known
+ // version number from the first OS release where this appeared.
+ return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+}
+
+void MaybeReexec() {
+ // FIXME: This should really live in some "InitializePlatform" method.
+ MonotonicNanoTime();
+
+ if (ReexecDisabled()) return;
+
+ // Make sure the dynamic runtime library is preloaded so that the
+ // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
+ // ourselves.
+ Dl_info info;
+ RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
+ char *dyld_insert_libraries =
+ const_cast<char*>(GetEnv(kDyldInsertLibraries));
+ uptr old_env_len = dyld_insert_libraries ?
+ internal_strlen(dyld_insert_libraries) : 0;
+ uptr fname_len = internal_strlen(info.dli_fname);
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
+
+ bool lib_is_in_env = dyld_insert_libraries &&
+ internal_strstr(dyld_insert_libraries, dylib_name);
+ if (DyldNeedsEnvVariable() && !lib_is_in_env) {
+ // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
+ // library.
+ InternalScopedString program_name(1024);
+ uint32_t buf_size = program_name.size();
+ _NSGetExecutablePath(program_name.data(), &buf_size);
+ char *new_env = const_cast<char*>(info.dli_fname);
+ if (dyld_insert_libraries) {
+ // Append the runtime dylib name to the existing value of
+ // DYLD_INSERT_LIBRARIES.
+ new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
+ internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
+ new_env[old_env_len] = ':';
+ // Copy fname_len and add a trailing zero.
+ internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
+ fname_len + 1);
+ // Ok to use setenv() since the wrappers don't depend on the value of
+ // asan_inited.
+ setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
+ } else {
+ // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
+ setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
+ }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable wrappers.\n");
+ execv(program_name.data(), *_NSGetArgv());
+
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for the sanitizer to work. We tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ RAW_CHECK("execv failed" && 0);
+ }
+
+ // Verify that interceptors really work. We'll use dlsym to locate
+ // "pthread_create", if interceptors are working, it should really point to
+ // "wrap_pthread_create" within our own dylib.
+ Dl_info info_pthread_create;
+ void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
+ RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
+ if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
+ Report(
+ "ERROR: Interceptors are not working. This may be because %s is "
+ "loaded too late (e.g. via dlopen). Please launch the executable "
+ "with:\n%s=%s\n",
+ SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
+ RAW_CHECK("interceptors not installed" && 0);
+ }
+
+ if (!lib_is_in_env)
+ return;
+
+ if (!common_flags()->strip_env)
+ return;
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ RAW_CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = internal_strchr(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
+ } else {
+ filename_start = piece_start;
+ }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
+}
+#endif // SANITIZER_GO
+
+char **GetArgv() {
+ return *_NSGetArgv();
+}
+
+#if SANITIZER_IOS
+// The task_vm_info struct is normally provided by the macOS SDK, but we need
+// fields only available in 10.12+. Declare the struct manually to be able to
+// build against older SDKs.
+struct __sanitizer_task_vm_info {
+ mach_vm_size_t virtual_size;
+ integer_t region_count;
+ integer_t page_size;
+ mach_vm_size_t resident_size;
+ mach_vm_size_t resident_size_peak;
+ mach_vm_size_t device;
+ mach_vm_size_t device_peak;
+ mach_vm_size_t internal;
+ mach_vm_size_t internal_peak;
+ mach_vm_size_t external;
+ mach_vm_size_t external_peak;
+ mach_vm_size_t reusable;
+ mach_vm_size_t reusable_peak;
+ mach_vm_size_t purgeable_volatile_pmap;
+ mach_vm_size_t purgeable_volatile_resident;
+ mach_vm_size_t purgeable_volatile_virtual;
+ mach_vm_size_t compressed;
+ mach_vm_size_t compressed_peak;
+ mach_vm_size_t compressed_lifetime;
+ mach_vm_size_t phys_footprint;
+ mach_vm_address_t min_address;
+ mach_vm_address_t max_address;
+};
+#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
+ (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
+
+static uptr GetTaskInfoMaxAddress() {
+ __sanitizer_task_vm_info vm_info = {} /* zero initialize */;
+ mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
+ int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
+ return err ? 0 : vm_info.max_address;
+}
+
+uptr GetMaxUserVirtualAddress() {
+ static uptr max_vm = GetTaskInfoMaxAddress();
+ if (max_vm != 0)
+ return max_vm - 1;
+
+ // xnu cannot provide vm address limit
+# if SANITIZER_WORDSIZE == 32
+ return 0xffe00000 - 1;
+# else
+ return 0x200000000 - 1;
+# endif
+}
+
+#else // !SANITIZER_IOS
+
+uptr GetMaxUserVirtualAddress() {
+# if SANITIZER_WORDSIZE == 64
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# else // SANITIZER_WORDSIZE == 32
+ static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
+ return (1ULL << 32) - 1; // 0xffffffff;
+# endif
+}
+#endif
+
+uptr GetMaxVirtualAddress() {
+ return GetMaxUserVirtualAddress();
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+ // Start searching for available memory region past PAGEZERO, which is
+ // 4KB on 32-bit and 4GB on 64-bit.
+ mach_vm_address_t start_address =
+ (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+
+ mach_vm_address_t address = start_address;
+ mach_vm_address_t free_begin = start_address;
+ kern_return_t kr = KERN_SUCCESS;
+ if (largest_gap_found) *largest_gap_found = 0;
+ if (max_occupied_addr) *max_occupied_addr = 0;
+ while (kr == KERN_SUCCESS) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ RegionInfo vminfo;
+ mach_msg_type_number_t count = kRegionInfoSize;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+ if (kr == KERN_INVALID_ADDRESS) {
+ // No more regions beyond "address", consider the gap at the end of VM.
+ address = GetMaxVirtualAddress() + 1;
+ vmsize = 0;
+ } else {
+ if (max_occupied_addr) *max_occupied_addr = address + vmsize;
+ }
+ if (free_begin != address) {
+ // We found a free region [free_begin..address-1].
+ uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
+ uptr gap_end = RoundDownTo((uptr)address, alignment);
+ uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
+ if (size < gap_size) {
+ return gap_start;
+ }
+
+ if (largest_gap_found && *largest_gap_found < gap_size) {
+ *largest_gap_found = gap_size;
+ }
+ }
+ // Move to the next region.
+ address += vmsize;
+ free_begin = address;
+ }
+
+ // We looked at all free regions and could not find one large enough.
+ return 0;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void SignalContext::DumpAllRegisters(void *context) {
+ Report("Register values:\n");
+
+ ucontext_t *ucontext = (ucontext_t*)context;
+# define DUMPREG64(r) \
+ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREGA64(r) \
+ Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
+# define DUMPREG32(r) \
+ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG_(r) Printf(" "); DUMPREG(r);
+# define DUMPREG__(r) Printf(" "); DUMPREG(r);
+# define DUMPREG___(r) Printf(" "); DUMPREG(r);
+
+# if defined(__x86_64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
+ DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
+ DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
+ DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
+# elif defined(__i386__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
+ DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
+# elif defined(__aarch64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
+ DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
+ DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
+ DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
+ DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
+ DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
+ DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
+ DUMPREG(x[28]); DUMPREGA64(fp); DUMPREGA64(lr); DUMPREGA64(sp); Printf("\n");
+# elif defined(__arm__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
+ DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
+ DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
+ DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
+# else
+# error "Unknown architecture"
+# endif
+
+# undef DUMPREG64
+# undef DUMPREG32
+# undef DUMPREG_
+# undef DUMPREG__
+# undef DUMPREG___
+# undef DUMPREG
+}
+
+static inline bool CompareBaseAddress(const LoadedModule &a,
+ const LoadedModule &b) {
+ return a.base_address() < b.base_address();
+}
+
+void FormatUUID(char *out, uptr size, const u8 *uuid) {
+ internal_snprintf(out, size,
+ "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X>",
+ uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
+ uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+}
+
+void PrintModuleMap() {
+ Printf("Process module map:\n");
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
+ memory_mapping.DumpListOfModules(&modules);
+ Sort(modules.data(), modules.size(), CompareBaseAddress);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ char uuid_str[128];
+ FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
+ Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
+ modules[i].max_executable_address(), modules[i].full_name(),
+ ModuleArchToString(modules[i].arch()), uuid_str);
+ }
+ Printf("End of module map.\n");
+}
+
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ if (!buffer || !length || length > 256)
+ return false;
+ // arc4random never fails.
+ REAL(arc4random_buf)(buffer, length);
+ return true;
+}
+
+u32 GetNumberOfCPUs() {
+ return (u32)sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/lib/tsan/sanitizer_common/sanitizer_mac.h b/lib/tsan/sanitizer_common/sanitizer_mac.h
new file mode 100644
index 0000000000..90ecff4815
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_mac.h
@@ -0,0 +1,84 @@
+//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for OSX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_MAC_H
+#define SANITIZER_MAC_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_posix.h"
+
+namespace __sanitizer {
+
+struct MemoryMappingLayoutData {
+ int current_image;
+ u32 current_magic;
+ u32 current_filetype;
+ ModuleArch current_arch;
+ u8 current_uuid[kModuleUUIDSize];
+ int current_load_cmd_count;
+ const char *current_load_cmd_addr;
+ bool current_instrumented;
+};
+
+template <typename VersionType>
+struct VersionBase {
+ u16 major;
+ u16 minor;
+
+ VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}
+
+ bool operator==(const VersionType &other) const {
+ return major == other.major && minor == other.minor;
+ }
+ bool operator>=(const VersionType &other) const {
+ return major > other.major ||
+ (major == other.major && minor >= other.minor);
+ }
+};
+
+struct MacosVersion : VersionBase<MacosVersion> {
+ MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
+};
+
+struct DarwinKernelVersion : VersionBase<DarwinKernelVersion> {
+ DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
+};
+
+MacosVersion GetMacosAlignedVersion();
+DarwinKernelVersion GetDarwinKernelVersion();
+
+char **GetEnviron();
+
+void RestrictMemoryToMaxAddress(uptr max_address);
+
+} // namespace __sanitizer
+
+extern "C" {
+static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
+ {};
+static const char *__crashreporter_info__ __attribute__((__used__)) =
+ &__crashreporter_info_buff__[0];
+asm(".desc ___crashreporter_info__, 0x10");
+} // extern "C"
+
+namespace __sanitizer {
+static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+
+INLINE void CRAppendCrashLogMessage(const char *msg) {
+ BlockingMutexLock l(&crashreporter_info_mutex);
+ internal_strlcat(__crashreporter_info_buff__, msg,
+ sizeof(__crashreporter_info_buff__)); }
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_malloc_mac.inc b/lib/tsan/sanitizer_common/sanitizer_malloc_mac.inc
new file mode 100644
index 0000000000..647bcdfe10
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_malloc_mac.inc
@@ -0,0 +1,420 @@
+//===-- sanitizer_malloc_mac.inc --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains Mac-specific malloc interceptors and a custom zone
+// implementation, which together replace the system allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_MAC
+#error "This file should only be compiled on Darwin."
+#endif
+
+#include <AvailabilityMacros.h>
+#include <CoreFoundation/CFBase.h>
+#include <dlfcn.h>
+#include <malloc/malloc.h>
+#include <sys/mman.h>
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_mac.h"
+
+// Similar code is used in Google Perftools,
+// https://github.com/gperftools/gperftools.
+
+namespace __sanitizer {
+
+extern malloc_zone_t sanitizer_zone;
+
+struct sanitizer_malloc_introspection_t : public malloc_introspection_t {
+ // IMPORTANT: Do not change the order, alignment, or types of these fields to
+ // maintain binary compatibility. You should only add fields to this struct.
+
+ // Used to track changes to the allocator that will affect
+ // zone enumeration.
+ u64 allocator_enumeration_version;
+ uptr allocator_ptr;
+ uptr allocator_size;
+};
+
+u64 GetMallocZoneAllocatorEnumerationVersion() {
+ // This represents the current allocator ABI version.
+ // This field should be incremented every time the Allocator
+ // ABI changes in a way that breaks allocator enumeration.
+ return 0;
+}
+
+} // namespace __sanitizer
+
+INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
+ vm_size_t start_size, unsigned zone_flags) {
+ COMMON_MALLOC_ENTER();
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ COMMON_MALLOC_MEMALIGN(page_size, allocated_size);
+ malloc_zone_t *new_zone = (malloc_zone_t *)p;
+ internal_memcpy(new_zone, &sanitizer_zone, sizeof(sanitizer_zone));
+ new_zone->zone_name = NULL; // The name will be changed anyway.
+ // Prevent the client app from overwriting the zone contents.
+ // Library functions that need to modify the zone will set PROT_WRITE on it.
+ // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
+ mprotect(new_zone, allocated_size, PROT_READ);
+ // We're explicitly *NOT* registering the zone.
+ return new_zone;
+}
+
+INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
+ COMMON_MALLOC_ENTER();
+ // We don't need to do anything here. We're not registering new zones, so we
+ // don't to unregister. Just un-mprotect and free() the zone.
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
+ if (zone->zone_name) {
+ COMMON_MALLOC_FREE((void *)zone->zone_name);
+ }
+ COMMON_MALLOC_FREE(zone);
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_zone_from_ptr, const void *ptr) {
+ COMMON_MALLOC_ENTER();
+ size_t size = sanitizer_zone.size(&sanitizer_zone, ptr);
+ if (size) { // Claimed by sanitizer zone?
+ return &sanitizer_zone;
+ }
+ return REAL(malloc_zone_from_ptr)(ptr);
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
+ // FIXME: ASan should support purgeable allocations.
+ // https://github.com/google/sanitizers/issues/139
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+}
+
+INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+ // Must return 0 if the contents were not purged since the last call to
+ // malloc_make_purgeable().
+ return 0;
+}
+
+INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
+ COMMON_MALLOC_ENTER();
+ // Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)|
+ // bytes.
+ size_t buflen =
+ sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
+ InternalScopedString new_name(buflen);
+ if (name && zone->introspect == sanitizer_zone.introspect) {
+ new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
+ name = new_name.data();
+ }
+
+ // Call the system malloc's implementation for both external and our zones,
+ // since that appropriately changes VM region protections on the zone.
+ REAL(malloc_set_zone_name)(zone, name);
+}
+
+INTERCEPTOR(void *, malloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ COMMON_MALLOC_ENTER();
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_REALLOC(ptr, size);
+ return p;
+}
+
+INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+INTERCEPTOR(void *, valloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(size_t, malloc_good_size, size_t size) {
+ COMMON_MALLOC_ENTER();
+ return sanitizer_zone.introspect->good_size(&sanitizer_zone, size);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
+ COMMON_MALLOC_ENTER();
+ CHECK(memptr);
+ COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size);
+ return res;
+}
+
+namespace {
+
+// TODO(glider): the __sanitizer_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __sanitizer_mz_size(malloc_zone_t* zone, const void* ptr) {
+ COMMON_MALLOC_SIZE(ptr);
+ return size;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+ if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ const size_t kCallocPoolSize = 1024;
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+ static size_t allocated;
+ size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+ void *mem = (void*)&calloc_memory_for_dlsym[allocated];
+ allocated += size_in_words;
+ CHECK(allocated < kCallocPoolSize);
+ return mem;
+ }
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_valloc(malloc_zone_t *zone, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+// TODO(glider): the allocation callbacks need to be refactored.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
+}
+
+#define GET_ZONE_FOR_PTR(ptr) \
+ malloc_zone_t *zone_ptr = WRAP(malloc_zone_from_ptr)(ptr); \
+ const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) {
+ if (!ptr) {
+ COMMON_MALLOC_MALLOC(new_size);
+ return p;
+ } else {
+ COMMON_MALLOC_SIZE(ptr);
+ if (size) {
+ COMMON_MALLOC_REALLOC(ptr, new_size);
+ return p;
+ } else {
+ // We can't recover from reallocating an unknown address, because
+ // this would require reading at most |new_size| bytes from
+ // potentially unaccessible memory.
+ GET_ZONE_FOR_PTR(ptr);
+ COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name);
+ return nullptr;
+ }
+ }
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_destroy(malloc_zone_t* zone) {
+ // A no-op -- we will not be destroyed!
+ Report("__sanitizer_mz_destroy() called -- ignoring\n");
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MEMALIGN(align, size);
+ return p;
+}
+
+// This public API exists purely for testing purposes.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+malloc_zone_t* __sanitizer_mz_default_zone() {
+ return &sanitizer_zone;
+}
+
+// This function is currently unused, and we build with -Werror.
+#if 0
+void __sanitizer_mz_free_definite_size(
+ malloc_zone_t* zone, void *ptr, size_t size) {
+ // TODO(glider): check that |size| is valid.
+ UNIMPLEMENTED();
+}
+#endif
+
+#ifndef COMMON_MALLOC_HAS_ZONE_ENUMERATOR
+#error "COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be defined"
+#endif
+static_assert((COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 0 ||
+ (COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 1,
+ "COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be 0 or 1");
+
+#if COMMON_MALLOC_HAS_ZONE_ENUMERATOR
+// Forward declare and expect the implementation to provided by
+// includer.
+kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder);
+#else
+// Provide stub implementation that fails.
+kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ // Not supported.
+ return KERN_FAILURE;
+}
+#endif
+
+#ifndef COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT
+#error "COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be defined"
+#endif
+static_assert((COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 0 ||
+ (COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 1,
+ "COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be 0 or 1");
+#if COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT
+// Forward declare and expect the implementation to provided by
+// includer.
+void mi_extra_init(
+ sanitizer_malloc_introspection_t *mi);
+#else
+void mi_extra_init(
+ sanitizer_malloc_introspection_t *mi) {
+ // Just zero initialize the fields.
+ mi->allocator_ptr = 0;
+ mi->allocator_size = 0;
+}
+#endif
+
+size_t mi_good_size(malloc_zone_t *zone, size_t size) {
+ // I think it's always safe to return size, but we maybe could do better.
+ return size;
+}
+
+boolean_t mi_check(malloc_zone_t *zone) {
+ UNIMPLEMENTED();
+}
+
+void mi_print(malloc_zone_t *zone, boolean_t verbose) {
+ UNIMPLEMENTED();
+}
+
+void mi_log(malloc_zone_t *zone, void *address) {
+ // I don't think we support anything like this
+}
+
+void mi_force_lock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_LOCK();
+}
+
+void mi_force_unlock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_UNLOCK();
+}
+
+void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ COMMON_MALLOC_FILL_STATS(zone, stats);
+}
+
+boolean_t mi_zone_locked(malloc_zone_t *zone) {
+ // UNIMPLEMENTED();
+ return false;
+}
+
+} // unnamed namespace
+
+namespace COMMON_MALLOC_NAMESPACE {
+
+void InitMallocZoneFields() {
+ static sanitizer_malloc_introspection_t sanitizer_zone_introspection;
+ // Ok to use internal_memset, these places are not performance-critical.
+ internal_memset(&sanitizer_zone_introspection, 0,
+ sizeof(sanitizer_zone_introspection));
+
+ sanitizer_zone_introspection.enumerator = &mi_enumerator;
+ sanitizer_zone_introspection.good_size = &mi_good_size;
+ sanitizer_zone_introspection.check = &mi_check;
+ sanitizer_zone_introspection.print = &mi_print;
+ sanitizer_zone_introspection.log = &mi_log;
+ sanitizer_zone_introspection.force_lock = &mi_force_lock;
+ sanitizer_zone_introspection.force_unlock = &mi_force_unlock;
+ sanitizer_zone_introspection.statistics = &mi_statistics;
+ sanitizer_zone_introspection.zone_locked = &mi_zone_locked;
+
+ // Set current allocator enumeration version.
+ sanitizer_zone_introspection.allocator_enumeration_version =
+ GetMallocZoneAllocatorEnumerationVersion();
+
+ // Perform any sanitizer specific initialization.
+ mi_extra_init(&sanitizer_zone_introspection);
+
+ internal_memset(&sanitizer_zone, 0, sizeof(malloc_zone_t));
+
+ // Use version 6 for OSX >= 10.6.
+ sanitizer_zone.version = 6;
+ sanitizer_zone.zone_name = COMMON_MALLOC_ZONE_NAME;
+ sanitizer_zone.size = &__sanitizer_mz_size;
+ sanitizer_zone.malloc = &__sanitizer_mz_malloc;
+ sanitizer_zone.calloc = &__sanitizer_mz_calloc;
+ sanitizer_zone.valloc = &__sanitizer_mz_valloc;
+ sanitizer_zone.free = &__sanitizer_mz_free;
+ sanitizer_zone.realloc = &__sanitizer_mz_realloc;
+ sanitizer_zone.destroy = &__sanitizer_mz_destroy;
+ sanitizer_zone.batch_malloc = 0;
+ sanitizer_zone.batch_free = 0;
+ sanitizer_zone.free_definite_size = 0;
+ sanitizer_zone.memalign = &__sanitizer_mz_memalign;
+ sanitizer_zone.introspect = &sanitizer_zone_introspection;
+}
+
+void ReplaceSystemMalloc() {
+ InitMallocZoneFields();
+
+ // Register the zone.
+ malloc_zone_register(&sanitizer_zone);
+}
+
+} // namespace COMMON_MALLOC_NAMESPACE
diff --git a/lib/tsan/sanitizer_common/sanitizer_mutex.h b/lib/tsan/sanitizer_common/sanitizer_mutex.h
new file mode 100644
index 0000000000..40a6591429
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_mutex.h
@@ -0,0 +1,223 @@
+//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MUTEX_H
+#define SANITIZER_MUTEX_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+class StaticSpinMutex {
+ public:
+ void Init() {
+ atomic_store(&state_, 0, memory_order_relaxed);
+ }
+
+ void Lock() {
+ if (TryLock())
+ return;
+ LockSlow();
+ }
+
+ bool TryLock() {
+ return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
+ }
+
+ void Unlock() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
+
+ void CheckLocked() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
+ }
+
+ private:
+ atomic_uint8_t state_;
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ if (atomic_load(&state_, memory_order_relaxed) == 0
+ && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ }
+ }
+};
+
+class SpinMutex : public StaticSpinMutex {
+ public:
+ SpinMutex() {
+ Init();
+ }
+
+ private:
+ SpinMutex(const SpinMutex&);
+ void operator=(const SpinMutex&);
+};
+
+class BlockingMutex {
+ public:
+ explicit constexpr BlockingMutex(LinkerInitialized)
+ : opaque_storage_ {0, }, owner_ {0} {}
+ BlockingMutex();
+ void Lock();
+ void Unlock();
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned, and assumes callers to be generally
+ // well-behaved.
+ void CheckLocked();
+
+ private:
+ // Solaris mutex_t has a member that requires 64-bit alignment.
+ ALIGNED(8) uptr opaque_storage_[10];
+ uptr owner_; // for debugging
+};
+
+// Reader-writer spin mutex.
+class RWMutex {
+ public:
+ RWMutex() {
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+ }
+
+ ~RWMutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ void Lock() {
+ u32 cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ DCHECK_NE(prev & kWriteLock, 0);
+ (void)prev;
+ }
+
+ void ReadLock() {
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ ReadLockSlow();
+ }
+
+ void ReadUnlock() {
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+ (void)prev;
+ }
+
+ void CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ private:
+ atomic_uint32_t state_;
+
+ enum {
+ kUnlocked = 0,
+ kWriteLock = 1,
+ kReadLock = 2
+ };
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);
+ if (cmp == kUnlocked &&
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ }
+ }
+
+ void NOINLINE ReadLockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ }
+ }
+
+ RWMutex(const RWMutex&);
+ void operator = (const RWMutex&);
+};
+
+template<typename MutexType>
+class GenericScopedLock {
+ public:
+ explicit GenericScopedLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->Lock();
+ }
+
+ ~GenericScopedLock() {
+ mu_->Unlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedLock(const GenericScopedLock&);
+ void operator=(const GenericScopedLock&);
+};
+
+template<typename MutexType>
+class GenericScopedReadLock {
+ public:
+ explicit GenericScopedReadLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->ReadLock();
+ }
+
+ ~GenericScopedReadLock() {
+ mu_->ReadUnlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedReadLock(const GenericScopedReadLock&);
+ void operator=(const GenericScopedReadLock&);
+};
+
+typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
+typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+typedef GenericScopedLock<RWMutex> RWMutexLock;
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MUTEX_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_netbsd.cpp b/lib/tsan/sanitizer_common/sanitizer_netbsd.cpp
new file mode 100644
index 0000000000..d9aff51d8a
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_netbsd.cpp
@@ -0,0 +1,343 @@
+//===-- sanitizer_netbsd.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between Sanitizer run-time libraries and implements
+// NetBSD-specific functions from sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_getauxval.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <sys/exec.h>
+#include <sys/mman.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>
+#include <lwp.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+extern "C" void *__mmap(void *, size_t, int, int, int, int,
+ off_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __sysctl(const int *, unsigned int, void *, size_t *,
+ const void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_close(int) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_open(const char *, int, ...) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_read(int, void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_write(int, const void *,
+ size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __ftruncate(int, int, off_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_readlink(const char *, char *,
+ size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_sched_yield() SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___nanosleep50(const void *,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_execve(const char *, char *const[],
+ char *const[]) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" off_t __lseek(int, int, off_t, int) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __fork() SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___sigprocmask14(int, const void *,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___wait450(int wpid, int *, int,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+
+namespace __sanitizer {
+
+static void *GetRealLibcAddress(const char *symbol) {
+ void *real = dlsym(RTLD_NEXT, symbol);
+ if (!real)
+ real = dlsym(RTLD_DEFAULT, symbol);
+ if (!real) {
+ Printf("GetRealLibcAddress failed for symbol=%s", symbol);
+ Die();
+ }
+ return real;
+}
+
+#define _REAL(func, ...) real##_##func(__VA_ARGS__)
+#define DEFINE__REAL(ret_type, func, ...) \
+ static ret_type (*real_##func)(__VA_ARGS__) = NULL; \
+ if (!real_##func) { \
+ real_##func = (ret_type(*)(__VA_ARGS__))GetRealLibcAddress(#func); \
+ } \
+ CHECK(real_##func);
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ u64 offset) {
+ CHECK(&__mmap);
+ return (uptr)__mmap(addr, length, prot, flags, fd, 0, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) {
+ DEFINE__REAL(int, munmap, void *a, uptr b);
+ return _REAL(munmap, addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ DEFINE__REAL(int, mprotect, void *a, uptr b, int c);
+ return _REAL(mprotect, addr, length, prot);
+}
+
+uptr internal_close(fd_t fd) {
+ CHECK(&_sys_close);
+ return _sys_close(fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+ CHECK(&_sys_open);
+ return _sys_open(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ CHECK(&_sys_open);
+ return _sys_open(filename, flags, mode);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ sptr res;
+ CHECK(&_sys_read);
+ HANDLE_EINTR(res, (sptr)_sys_read(fd, buf, (size_t)count));
+ return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ sptr res;
+ CHECK(&_sys_write);
+ HANDLE_EINTR(res, (sptr)_sys_write(fd, buf, count));
+ return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ CHECK(&__ftruncate);
+ HANDLE_EINTR(res, __ftruncate(fd, 0, (s64)size));
+ return res;
+}
+
+uptr internal_stat(const char *path, void *buf) {
+ DEFINE__REAL(int, __stat50, const char *a, void *b);
+ return _REAL(__stat50, path, buf);
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ DEFINE__REAL(int, __lstat50, const char *a, void *b);
+ return _REAL(__lstat50, path, buf);
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ DEFINE__REAL(int, __fstat50, int a, void *b);
+ return _REAL(__fstat50, fd, buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ DEFINE__REAL(int, dup, int a);
+ return _REAL(dup, oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+ DEFINE__REAL(int, dup2, int a, int b);
+ return _REAL(dup2, oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ CHECK(&_sys_readlink);
+ return (uptr)_sys_readlink(path, buf, bufsize);
+}
+
+uptr internal_unlink(const char *path) {
+ DEFINE__REAL(int, unlink, const char *a);
+ return _REAL(unlink, path);
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ DEFINE__REAL(int, rename, const char *a, const char *b);
+ return _REAL(rename, oldpath, newpath);
+}
+
+uptr internal_sched_yield() {
+ CHECK(&_sys_sched_yield);
+ return _sys_sched_yield();
+}
+
+void internal__exit(int exitcode) {
+ DEFINE__REAL(void, _exit, int a);
+ _REAL(_exit, exitcode);
+ Die(); // Unreachable.
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ struct timespec ts;
+ ts.tv_sec = seconds;
+ ts.tv_nsec = 0;
+ CHECK(&_sys___nanosleep50);
+ int res = _sys___nanosleep50(&ts, &ts);
+ if (res)
+ return ts.tv_sec;
+ return 0;
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ CHECK(&_sys_execve);
+ return _sys_execve(filename, argv, envp);
+}
+
+tid_t GetTid() {
+ DEFINE__REAL(int, _lwp_self);
+ return _REAL(_lwp_self);
+}
+
+int TgKill(pid_t pid, tid_t tid, int sig) {
+ DEFINE__REAL(int, _lwp_kill, int a, int b);
+ (void)pid;
+ return _REAL(_lwp_kill, tid, sig);
+}
+
+u64 NanoTime() {
+ timeval tv;
+ DEFINE__REAL(int, __gettimeofday50, void *a, void *b);
+ internal_memset(&tv, 0, sizeof(tv));
+ _REAL(__gettimeofday50, &tv, 0);
+ return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ DEFINE__REAL(int, __clock_gettime50, __sanitizer_clockid_t a, void *b);
+ return _REAL(__clock_gettime50, clk_id, tp);
+}
+
+uptr internal_ptrace(int request, int pid, void *addr, int data) {
+ DEFINE__REAL(int, ptrace, int a, int b, void *c, int d);
+ return _REAL(ptrace, request, pid, addr, data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ CHECK(&_sys___wait450);
+ return _sys___wait450(pid, status, options, 0 /* rusage */);
+}
+
+uptr internal_getpid() {
+ DEFINE__REAL(int, getpid);
+ return _REAL(getpid);
+}
+
+uptr internal_getppid() {
+ DEFINE__REAL(int, getppid);
+ return _REAL(getppid);
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+ DEFINE__REAL(int, dlinfo, void *a, int b, void *c);
+ return _REAL(dlinfo, handle, request, p);
+}
+
+uptr internal_getdents(fd_t fd, void *dirp, unsigned int count) {
+ DEFINE__REAL(int, __getdents30, int a, void *b, size_t c);
+ return _REAL(__getdents30, fd, dirp, count);
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+ CHECK(&__lseek);
+ return __lseek(fd, 0, offset, whence);
+}
+
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+ Printf("internal_prctl not implemented for NetBSD");
+ Die();
+ return 0;
+}
+
+uptr internal_sigaltstack(const void *ss, void *oss) {
+ DEFINE__REAL(int, __sigaltstack14, const void *a, void *b);
+ return _REAL(__sigaltstack14, ss, oss);
+}
+
+int internal_fork() {
+ CHECK(&__fork);
+ return __fork();
+}
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+ CHECK(&__sysctl);
+ return __sysctl(name, namelen, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ DEFINE__REAL(int, sysctlbyname, const char *a, void *b, size_t *c,
+ const void *d, size_t e);
+ return _REAL(sysctlbyname, sname, oldp, (size_t *)oldlenp, newp,
+ (size_t)newlen);
+}
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ CHECK(&_sys___sigprocmask14);
+ return _sys___sigprocmask14(how, set, oldset);
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
+ DEFINE__REAL(int, __sigfillset14, const void *a);
+ (void)_REAL(__sigfillset14, set);
+}
+
+void internal_sigemptyset(__sanitizer_sigset_t *set) {
+ DEFINE__REAL(int, __sigemptyset14, const void *a);
+ (void)_REAL(__sigemptyset14, set);
+}
+
+void internal_sigdelset(__sanitizer_sigset_t *set, int signo) {
+ DEFINE__REAL(int, __sigdelset14, const void *a, int b);
+ (void)_REAL(__sigdelset14, set, signo);
+}
+
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags,
+ void *arg) {
+ DEFINE__REAL(int, clone, int (*a)(void *b), void *c, int d, void *e);
+
+ return _REAL(clone, fn, child_stack, flags, arg);
+}
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_openbsd.cpp b/lib/tsan/sanitizer_common/sanitizer_openbsd.cpp
new file mode 100644
index 0000000000..ed2d8edeb7
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_openbsd.cpp
@@ -0,0 +1,115 @@
+//===-- sanitizer_openbsd.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements Solaris-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_OPENBSD
+
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+extern char **environ;
+
+namespace __sanitizer {
+
+uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd,
+ u64 offset) {
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); }
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return mprotect(addr, length, prot);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ Printf("internal_sysctlbyname not implemented for OpenBSD");
+ Die();
+ return 0;
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ // On OpenBSD we cannot get the full path
+ struct kinfo_proc kp;
+ uptr kl;
+ const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()};
+ if (internal_sysctl(Mib, ARRAY_SIZE(Mib), &kp, &kl, NULL, 0) != -1)
+ return internal_snprintf(buf,
+ (KI_MAXCOMLEN < buf_len ? KI_MAXCOMLEN : buf_len),
+ "%s", kp.p_comm);
+ return (uptr)0;
+}
+
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+ uptr nargv;
+ uptr nenv;
+ int argvmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ARGV};
+ int envmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ENV};
+ if (internal_sysctl(argvmib, 4, NULL, &nargv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_NARGV failed\n");
+ Die();
+ }
+ if (internal_sysctl(envmib, 4, NULL, &nenv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_NENV failed\n");
+ Die();
+ }
+ if (internal_sysctl(argvmib, 4, &argv, &nargv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_ARGV failed\n");
+ Die();
+ }
+ if (internal_sysctl(envmib, 4, &envp, &nenv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_ENV failed\n");
+ Die();
+ }
+}
+
+char **GetArgv() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return argv;
+}
+
+char **GetEnviron() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return envp;
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_OPENBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.cpp b/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.cpp
new file mode 100644
index 0000000000..1ca0375b8a
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.cpp
@@ -0,0 +1,18 @@
+//===-- sanitizer_persistent_allocator.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.h b/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.h
new file mode 100644
index 0000000000..de4fb6ebc3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_persistent_allocator.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+ uptr end = atomic_load(&region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return nullptr;
+ if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(&region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(&region_end, mem + allocsz, memory_order_release);
+ atomic_store(&region_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_placement_new.h b/lib/tsan/sanitizer_common/sanitizer_placement_new.h
new file mode 100644
index 0000000000..1ceb8b9092
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_placement_new.h
@@ -0,0 +1,24 @@
+//===-- sanitizer_placement_new.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//
+// The file provides 'placement new'.
+// Do not include it into header files, only into source files.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLACEMENT_NEW_H
+#define SANITIZER_PLACEMENT_NEW_H
+
+#include "sanitizer_internal_defs.h"
+
+inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {
+ return p;
+}
+
+#endif // SANITIZER_PLACEMENT_NEW_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform.h b/lib/tsan/sanitizer_common/sanitizer_platform.h
new file mode 100644
index 0000000000..f0b1e04d1d
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform.h
@@ -0,0 +1,364 @@
+//===-- sanitizer_platform.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common platform macros.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_H
+#define SANITIZER_PLATFORM_H
+
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
+ !defined(__OpenBSD__) && !defined(__APPLE__) && !defined(_WIN32) && \
+ !defined(__Fuchsia__) && !defined(__rtems__) && \
+ !(defined(__sun__) && defined(__svr4__))
+# error "This operating system is not supported"
+#endif
+
+#if defined(__linux__)
+# define SANITIZER_LINUX 1
+#else
+# define SANITIZER_LINUX 0
+#endif
+
+#if defined(__FreeBSD__)
+# define SANITIZER_FREEBSD 1
+#else
+# define SANITIZER_FREEBSD 0
+#endif
+
+#if defined(__NetBSD__)
+# define SANITIZER_NETBSD 1
+#else
+# define SANITIZER_NETBSD 0
+#endif
+
+#if defined(__OpenBSD__)
+# define SANITIZER_OPENBSD 1
+#else
+# define SANITIZER_OPENBSD 0
+#endif
+
+#if defined(__sun__) && defined(__svr4__)
+# define SANITIZER_SOLARIS 1
+#else
+# define SANITIZER_SOLARIS 0
+#endif
+
+#if defined(__APPLE__)
+# define SANITIZER_MAC 1
+# include <TargetConditionals.h>
+# if TARGET_OS_IPHONE
+# define SANITIZER_IOS 1
+# else
+# define SANITIZER_IOS 0
+# endif
+# if TARGET_OS_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
+#else
+# define SANITIZER_MAC 0
+# define SANITIZER_IOS 0
+# define SANITIZER_IOSSIM 0
+#endif
+
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
+# define SANITIZER_WATCHOS 1
+#else
+# define SANITIZER_WATCHOS 0
+#endif
+
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
+# define SANITIZER_TVOS 1
+#else
+# define SANITIZER_TVOS 0
+#endif
+
+#if defined(_WIN32)
+# define SANITIZER_WINDOWS 1
+#else
+# define SANITIZER_WINDOWS 0
+#endif
+
+#if defined(_WIN64)
+# define SANITIZER_WINDOWS64 1
+#else
+# define SANITIZER_WINDOWS64 0
+#endif
+
+#if defined(__ANDROID__)
+# define SANITIZER_ANDROID 1
+#else
+# define SANITIZER_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+# define SANITIZER_FUCHSIA 1
+#else
+# define SANITIZER_FUCHSIA 0
+#endif
+
+#if defined(__rtems__)
+# define SANITIZER_RTEMS 1
+#else
+# define SANITIZER_RTEMS 0
+#endif
+
+#define SANITIZER_POSIX \
+ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+ SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS)
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+#if defined(__i386__) || defined(_M_IX86)
+# define SANITIZER_I386 1
+#else
+# define SANITIZER_I386 0
+#endif
+
+#if defined(__mips__)
+# define SANITIZER_MIPS 1
+# if defined(__mips64)
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
+#else
+# define SANITIZER_MIPS 0
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 0
+#endif
+
+#if defined(__s390__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 0
+#endif
+
+#if defined(__powerpc__)
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
+// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
+// big-endian, and uses v1 ABI (known for its function descriptors),
+// while the new powerpc64le target is little-endian and uses v2.
+// In theory, you could convince gcc to compile for their evil twins
+// (eg. big-endian v2), but you won't find such combinations in the wild
+// (it'd require bootstrapping a whole system, which would be quite painful
+// - there's no target triple for that). LLVM doesn't support them either.
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
+# else
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
+# endif
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
+#endif
+
+#if defined(__arm__)
+# define SANITIZER_ARM 1
+#else
+# define SANITIZER_ARM 0
+#endif
+
+#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
+# define SANITIZER_SOLARIS32 1
+#else
+# define SANITIZER_SOLARIS32 0
+#endif
+
+#if defined(__myriad2__)
+# define SANITIZER_MYRIAD2 1
+#else
+# define SANITIZER_MYRIAD2 0
+#endif
+
+// By default we allow to use SizeClassAllocator64 on 64-bit platform.
+// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
+// does not work well and we need to fallback to SizeClassAllocator32.
+// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
+// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
+#ifndef SANITIZER_CAN_USE_ALLOCATOR64
+# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
+#endif
+
+// The range of addresses which can be returned my mmap.
+// FIXME: this value should be different on different platforms. Larger values
+// will still work but will consume more memory for TwoLevelByteMap.
+#if defined(__mips__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#elif defined(__aarch64__)
+# if SANITIZER_MAC
+// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+# endif
+#elif defined(__sparc__)
+#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
+#else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Whether the addresses are sign-extended from the VMA range to the word.
+// The SPARC64 Linux port implements this to split the VMA space into two
+// non-contiguous halves with a huge hole in the middle.
+#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
+#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
+#else
+#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
+#endif
+
+// The AArch64 and RISC-V linux ports use the canonical syscall set as
+// mandated by the upstream linux community for all new ports. Other ports
+// may still use legacy syscalls.
+#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+# else
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+# endif
+#endif
+
+// udi16 syscalls can only be used when the following conditions are
+// met:
+// * target is one of arm32, x86-32, sparc32, sh or m68k
+// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15
+// built against > linux-2.2 kernel headers
+// Since we don't want to include libc headers here, we check the
+// target only.
+#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
+#define SANITIZER_USES_UID16_SYSCALLS 1
+#else
+#define SANITIZER_USES_UID16_SYSCALLS 0
+#endif
+
+#if defined(__mips__)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+#else
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+#endif
+
+/// \macro MSC_PREREQ
+/// \brief Is the compiler MSVC of at least the specified version?
+/// The common \param version values to check for are:
+/// * 1800: Microsoft Visual Studio 2013 / 12.0
+/// * 1900: Microsoft Visual Studio 2015 / 14.0
+#ifdef _MSC_VER
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
+#else
+# define MSC_PREREQ(version) 0
+#endif
+
+#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+#else
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+#endif
+
+// On linux, some architectures had an ABI transition from 64-bit long double
+// (ie. same as double) to 128-bit long double. On those, glibc symbols
+// involving long doubles come in two versions, and we need to pass the
+// correct one to dlvsym when intercepting them.
+#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
+#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+#endif
+
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
+// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
+// pthread_exit() performs unwinding that leads to dlopen'ing libgcc_s.so.
+// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
+// that this allocation happens in dynamic linker and should be ignored.
+#if SANITIZER_PPC || defined(__thumb__)
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+#else
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+# define SANITIZER_MADVISE_DONTNEED MADV_FREE
+#else
+# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+# define SANITIZER_CACHE_LINE_SIZE 128
+#else
+# define SANITIZER_CACHE_LINE_SIZE 64
+#endif
+
+// Enable offline markup symbolizer for Fuchsia and RTEMS.
+#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
+#define SANITIZER_SYMBOLIZER_MARKUP 1
+#else
+#define SANITIZER_SYMBOLIZER_MARKUP 0
+#endif
+
+// Enable ability to support sanitizer initialization that is
+// compatible with the sanitizer library being loaded via
+// `dlopen()`.
+#if SANITIZER_MAC
+#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#else
+#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+#endif
+
+#endif // SANITIZER_PLATFORM_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_interceptors.h b/lib/tsan/sanitizer_common/sanitizer_platform_interceptors.h
new file mode 100644
index 0000000000..e28bb937ae
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_interceptors.h
@@ -0,0 +1,609 @@
+//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macro telling whether sanitizer tools can/should intercept
+// given library functions on a given platform.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
+#define SANITIZER_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_glibc_version.h"
+#include "sanitizer_internal_defs.h"
+
+#if SANITIZER_POSIX
+# define SI_POSIX 1
+#else
+# define SI_POSIX 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define SI_WINDOWS 0
+#else
+# define SI_WINDOWS 1
+#endif
+
+#if SI_WINDOWS && SI_POSIX
+# error "Windows is not POSIX!"
+#endif
+
+#if SI_POSIX
+# include "sanitizer_platform_limits_freebsd.h"
+# include "sanitizer_platform_limits_netbsd.h"
+# include "sanitizer_platform_limits_openbsd.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_platform_limits_solaris.h"
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define SI_LINUX_NOT_ANDROID 1
+#else
+# define SI_LINUX_NOT_ANDROID 0
+#endif
+
+#if SANITIZER_ANDROID
+# define SI_ANDROID 1
+#else
+# define SI_ANDROID 0
+#endif
+
+#if SANITIZER_FREEBSD
+# define SI_FREEBSD 1
+#else
+# define SI_FREEBSD 0
+#endif
+
+#if SANITIZER_NETBSD
+# define SI_NETBSD 1
+#else
+# define SI_NETBSD 0
+#endif
+
+#if SANITIZER_OPENBSD
+#define SI_OPENBSD 1
+#else
+#define SI_OPENBSD 0
+#endif
+
+#if SANITIZER_LINUX
+# define SI_LINUX 1
+#else
+# define SI_LINUX 0
+#endif
+
+#if SANITIZER_MAC
+# define SI_MAC 1
+# define SI_NOT_MAC 0
+#else
+# define SI_MAC 0
+# define SI_NOT_MAC 1
+#endif
+
+#if SANITIZER_IOS
+# define SI_IOS 1
+#else
+# define SI_IOS 0
+#endif
+
+#if SANITIZER_IOSSIM
+# define SI_IOSSIM 1
+#else
+# define SI_IOSSIM 0
+#endif
+
+#if SANITIZER_WATCHOS
+# define SI_WATCHOS 1
+#else
+# define SI_WATCHOS 0
+#endif
+
+#if SANITIZER_TVOS
+# define SI_TVOS 1
+#else
+# define SI_TVOS 0
+#endif
+
+#if SANITIZER_FUCHSIA
+# define SI_NOT_FUCHSIA 0
+#else
+# define SI_NOT_FUCHSIA 1
+#endif
+
+#if SANITIZER_RTEMS
+# define SI_NOT_RTEMS 0
+#else
+# define SI_NOT_RTEMS 1
+#endif
+
+#if SANITIZER_SOLARIS
+# define SI_SOLARIS 1
+#else
+# define SI_SOLARIS 0
+#endif
+
+#if SANITIZER_SOLARIS32
+# define SI_SOLARIS32 1
+#else
+# define SI_SOLARIS32 0
+#endif
+
+#if SANITIZER_POSIX && !SANITIZER_MAC
+# define SI_POSIX_NOT_MAC 1
+#else
+# define SI_POSIX_NOT_MAC 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_FREEBSD
+# define SI_LINUX_NOT_FREEBSD 1
+# else
+# define SI_LINUX_NOT_FREEBSD 0
+#endif
+
+#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX
+#define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC
+#define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX
+#define SANITIZER_INTERCEPT_MEMSET 1
+#define SANITIZER_INTERCEPT_MEMMOVE 1
+#define SANITIZER_INTERCEPT_MEMCPY 1
+#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_BCMP \
+ SANITIZER_INTERCEPT_MEMCMP && \
+ ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
+#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 1
+#else
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 0
+#endif
+// memmem on Darwin doesn't exist on 10.6
+// FIXME: enable memmem on Windows.
+#define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7)
+#define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_MEMRCHR \
+ (SI_FREEBSD || SI_LINUX || SI_NETBSD || SI_OPENBSD)
+
+#define SANITIZER_INTERCEPT_READ SI_POSIX
+#define SANITIZER_INTERCEPT_PREAD SI_POSIX
+#define SANITIZER_INTERCEPT_WRITE SI_POSIX
+#define SANITIZER_INTERCEPT_PWRITE SI_POSIX
+
+#define SANITIZER_INTERCEPT_FREAD SI_POSIX
+#define SANITIZER_INTERCEPT_FWRITE SI_POSIX
+#define SANITIZER_INTERCEPT_FGETS SI_POSIX
+#define SANITIZER_INTERCEPT_FPUTS SI_POSIX
+#define SANITIZER_INTERCEPT_PUTS SI_POSIX
+
+#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+
+#define SANITIZER_INTERCEPT_READV SI_POSIX
+#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
+
+#define SANITIZER_INTERCEPT_PREADV \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
+
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_POSIX
+#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
+
+#define SANITIZER_INTERCEPT_SCANF SI_POSIX
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+
+#ifndef SANITIZER_INTERCEPT_PRINTF
+# define SANITIZER_INTERCEPT_PRINTF SI_POSIX
+# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#endif
+
+#define SANITIZER_INTERCEPT___PRINTF_CHK \
+ (SANITIZER_INTERCEPT_PRINTF && SI_LINUX_NOT_ANDROID)
+
+#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
+
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETPWENT \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETGRENT_R \
+ (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_GETPWENT_R \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT_R \
+ (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SETPWENT \
+ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
+#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
+#define SANITIZER_INTERCEPT_TIME SI_POSIX
+#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WAIT SI_POSIX
+#define SANITIZER_INTERCEPT_INET SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 SI_POSIX && !SI_SOLARIS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R \
+ (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTENT_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
+#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
+#define SANITIZER_INTERCEPT_ACCEPT4 \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
+#define SANITIZER_INTERCEPT_MODF SI_POSIX
+#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
+#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
+#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX
+#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX
+#define SANITIZER_INTERCEPT_SYSMSG SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX
+#define SANITIZER_INTERCEPT_IOCTL SI_POSIX
+#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
+#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
+#define SANITIZER_INTERCEPT_READDIR SI_POSIX
+#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#if SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+#define SANITIZER_INTERCEPT_PTRACE 1
+#else
+#define SANITIZER_INTERCEPT_PTRACE 0
+#endif
+#define SANITIZER_INTERCEPT_SETLOCALE SI_POSIX
+#define SANITIZER_INTERCEPT_GETCWD SI_POSIX
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_POSIX
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_POSIX
+#define SANITIZER_INTERCEPT_MBSNRTOWCS \
+ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_POSIX
+#define SANITIZER_INTERCEPT_STRXFRM SI_POSIX
+#define SANITIZER_INTERCEPT___STRXFRM_L SI_LINUX
+#define SANITIZER_INTERCEPT_WCSXFRM SI_POSIX
+#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX
+#define SANITIZER_INTERCEPT_WCSNRTOMBS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCRTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_REALPATH SI_POSIX
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CONFSTR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
+#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_POLL SI_POSIX
+#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_WORDEXP \
+ (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
+#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_SIGSETOPS \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
+#define SANITIZER_INTERCEPT_BACKTRACE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
+#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS \
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_STATFS64 \
+ (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATVFS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_ETHER_HOST \
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SHMCTL \
+ (((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
+ SI_NETBSD || SI_OPENBSD || SI_SOLARIS) // NOLINT
+#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
+ (SI_LINUX_NOT_ANDROID && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
+#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
+#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
+#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
+#define SANITIZER_INTERCEPT_SINCOS SI_LINUX || SI_SOLARIS
+#define SANITIZER_INTERCEPT_REMQUO SI_POSIX
+#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX
+#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RAND_R \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_ICONV \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_TIMES SI_POSIX
+
+// FIXME: getline seems to be available on OSX 10.7
+#define SANITIZER_INTERCEPT_GETLINE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT__EXIT \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
+#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
+#define SANITIZER_INTERCEPT_GETIFADDRS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
+#if SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT_AEABI_MEM 1
+#else
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+#endif
+#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FTIME \
+ (!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX)
+#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TSEARCH \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FOPEN SI_POSIX
+#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
+#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
+
+#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#endif
+
+#define SANITIZER_INTERCEPT_GETPASS \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_MLOCKX SI_POSIX
+#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SEM \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
+#define SANITIZER_INTERCEPT_MINCORE \
+ (SI_LINUX || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
+#define SANITIZER_INTERCEPT_CTERMID \
+ (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPTOR_HOOKS \
+ (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD)
+#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
+#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
+
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
+#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_UTMP \
+ (SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_UTMPX \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
+
+#define SANITIZER_INTERCEPT_GETLOADAVG \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD)
+
+#define SANITIZER_INTERCEPT_MMAP SI_POSIX
+#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MEMALIGN \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_PVALLOC \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_CFREE \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE \
+ (!SI_MAC && !SI_OPENBSD && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
+#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID
+
+#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_USER_FROM_UID SI_NETBSD
+#define SANITIZER_INTERCEPT_UID_FROM_USER SI_NETBSD
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID SI_NETBSD
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
+#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_GETGROUPLIST (SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_STRLCPY \
+ (SI_NETBSD || SI_FREEBSD || SI_OPENBSD || SI_MAC || SI_ANDROID)
+
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_READLINK SI_POSIX
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000
+# define SI_MAC_DEPLOYMENT_BELOW_10_10 1
+#else
+# define SI_MAC_DEPLOYMENT_BELOW_10_10 0
+#endif
+#define SANITIZER_INTERCEPT_READLINKAT \
+ (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_10)
+
+#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
+#define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \
+ SI_LINUX || SI_MAC)
+#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD
+#define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD
+#define SANITIZER_INTERCEPT_REGEX (SI_NETBSD || SI_FREEBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_REGEXSUB SI_NETBSD
+#define SANITIZER_INTERCEPT_FTS (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_SYSCTL (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_ASYSCTL SI_NETBSD
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO SI_NETBSD
+#define SANITIZER_INTERCEPT_NL_LANGINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
+#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
+#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
+#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
+#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
+#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
+#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
+#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
+#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
+#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
+#define SANITIZER_INTERCEPT_CDB SI_NETBSD
+#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_POPEN SI_POSIX
+#define SANITIZER_INTERCEPT_POPENVE SI_NETBSD
+#define SANITIZER_INTERCEPT_PCLOSE SI_POSIX
+#define SANITIZER_INTERCEPT_FUNOPEN (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FUNOPEN2 SI_NETBSD
+#define SANITIZER_INTERCEPT_GETFSENT (SI_FREEBSD || SI_NETBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_ARC4RANDOM (SI_FREEBSD || SI_NETBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)
+#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
+#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)
+#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)
+
+#define SANITIZER_INTERCEPT_GETRANDOM \
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
+#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
+#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
+#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
+#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
+#define SANITIZER_INTERCEPT_QSORT \
+ (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
+#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
+// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
+// calling it and assuming that it does not clobber registers.
+#define SANITIZER_INTERCEPT_SIGALTSTACK \
+ (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
+#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
+#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
+
+#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
new file mode 100644
index 0000000000..dcc6c71c07
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -0,0 +1,531 @@
+//===-- sanitizer_platform_limits_freebsd.cpp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific FreeBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD
+
+#include <sys/capsicum.h>
+#include <sys/consio.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/kbio.h>
+#include <sys/link_elf.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/mqueue.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/soundcard.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/timespec.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <sys/utsname.h>
+//
+#include <arpa/inet.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/ppp_defs.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+//
+#include <dirent.h>
+#include <dlfcn.h>
+#include <fstab.h>
+#include <fts.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <regex.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stringlist.h>
+#include <term.h>
+#include <termios.h>
+#include <time.h>
+#include <utime.h>
+#include <utmpx.h>
+#include <vis.h>
+#include <wchar.h>
+#include <wordexp.h>
+
+#define _KERNEL // to declare 'shminfo' structure
+#include <sys/shm.h>
+#undef _KERNEL
+
+#undef INLINE // to avoid clashes with sanitizers' definitions
+
+#undef IOC_DIRMASK
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_platform_limits_freebsd.h"
+
+namespace __sanitizer {
+void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
+ void *p = nullptr;
+ return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
+}
+
+unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_stack_t_sz = sizeof(stack_t);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned fpos_t_sz = sizeof(fpos_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sigevent_sz = sizeof(struct sigevent);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_statfs_sz = sizeof(struct statfs);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_timeb_sz = sizeof(struct timeb);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+unsigned struct_shminfo_sz = sizeof(struct shminfo);
+unsigned struct_shm_info_sz = sizeof(struct shm_info);
+unsigned struct_regmatch_sz = sizeof(regmatch_t);
+unsigned struct_regex_sz = sizeof(regex_t);
+unsigned struct_fstab_sz = sizeof(struct fstab);
+unsigned struct_FTS_sz = sizeof(FTS);
+unsigned struct_FTSENT_sz = sizeof(FTSENT);
+unsigned struct_StringList_sz = sizeof(StringList);
+
+const uptr sig_ign = (uptr)SIG_IGN;
+const uptr sig_dfl = (uptr)SIG_DFL;
+const uptr sig_err = (uptr)SIG_ERR;
+const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+int shmctl_ipc_info = (int)IPC_INFO;
+int shmctl_shm_info = (int)SHM_INFO;
+int shmctl_shm_stat = (int)SHM_STAT;
+unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+// ioctl arguments
+unsigned struct_ifreq_sz = sizeof(struct ifreq);
+unsigned struct_termios_sz = sizeof(struct termios);
+unsigned struct_winsize_sz = sizeof(struct winsize);
+#if SOUND_VERSION >= 0x040000
+unsigned struct_copr_buffer_sz = 0;
+unsigned struct_copr_debug_buf_sz = 0;
+unsigned struct_copr_msg_sz = 0;
+#else
+unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
+unsigned struct_midi_info_sz = sizeof(struct midi_info);
+unsigned struct_mtget_sz = sizeof(struct mtget);
+unsigned struct_mtop_sz = sizeof(struct mtop);
+unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
+unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
+unsigned struct_synth_info_sz = sizeof(struct synth_info);
+unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+const unsigned IOCTL_NOT_PRESENT = 0;
+
+unsigned IOCTL_FIOASYNC = FIOASYNC;
+unsigned IOCTL_FIOCLEX = FIOCLEX;
+unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+unsigned IOCTL_FIONBIO = FIONBIO;
+unsigned IOCTL_FIONCLEX = FIONCLEX;
+unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+unsigned IOCTL_TIOCCONS = TIOCCONS;
+unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+unsigned IOCTL_TIOCGETD = TIOCGETD;
+unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+unsigned IOCTL_TIOCMGET = TIOCMGET;
+unsigned IOCTL_TIOCMSET = TIOCMSET;
+unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+unsigned IOCTL_TIOCPKT = TIOCPKT;
+unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+unsigned IOCTL_TIOCSETD = TIOCSETD;
+unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+unsigned IOCTL_TIOCSTI = TIOCSTI;
+unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+unsigned IOCTL_MTIOCGET = MTIOCGET;
+unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;
+unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;
+unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;
+unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;
+unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;
+unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;
+unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;
+unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;
+unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;
+unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;
+unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;
+unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;
+unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;
+unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;
+unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;
+unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;
+unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;
+unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;
+unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;
+unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;
+unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;
+unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;
+unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;
+unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;
+unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;
+unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;
+unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;
+unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;
+unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;
+unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;
+unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;
+unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;
+unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;
+unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;
+unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;
+unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;
+unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;
+unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;
+unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;
+unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;
+unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;
+unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;
+unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;
+unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;
+unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;
+unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;
+unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;
+unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;
+unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;
+unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;
+unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;
+unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;
+unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;
+unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;
+unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;
+unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;
+unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;
+unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;
+unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;
+unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;
+unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;
+unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;
+unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;
+unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;
+unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;
+unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;
+unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;
+unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;
+unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;
+unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;
+unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
+unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
+unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
+unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+unsigned IOCTL_KDDISABIO = KDDISABIO;
+unsigned IOCTL_KDENABIO = KDENABIO;
+unsigned IOCTL_KDGETLED = KDGETLED;
+unsigned IOCTL_KDGETMODE = KDGETMODE;
+unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+unsigned IOCTL_KDMKTONE = KDMKTONE;
+unsigned IOCTL_KDSETLED = KDSETLED;
+unsigned IOCTL_KDSETMODE = KDSETMODE;
+unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+const int unvis_valid = UNVIS_VALID;
+const int unvis_validpush = UNVIS_VALIDPUSH;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#undef ifa_dstaddr
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if HAVE_RPC_XDR_H
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+CHECK_TYPE_SIZE(sem_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
+#endif // SANITIZER_FREEBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.h b/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.h
new file mode 100644
index 0000000000..5e0ca9c7d7
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -0,0 +1,656 @@
+//===-- sanitizer_platform_limits_freebsd.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific FreeBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_FREEBSD_H
+#define SANITIZER_PLATFORM_LIMITS_FREEBSD_H
+
+#if SANITIZER_FREEBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+#include "sanitizer_platform_limits_posix.h"
+
+// Get sys/_types.h, because that tells us whether 64-bit inodes are
+// used in struct dirent below.
+#include <sys/_types.h>
+
+namespace __sanitizer {
+void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+#if defined(__powerpc64__)
+const unsigned struct___old_kernel_stat_sz = 0;
+#else
+const unsigned struct___old_kernel_stat_sz = 32;
+#endif
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned fpos_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_stack_t_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs64_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+extern unsigned struct_regmatch_sz;
+extern unsigned struct_regex_sz;
+extern unsigned struct_FTS_sz;
+extern unsigned struct_FTSENT_sz;
+extern const int unvis_valid;
+extern const int unvis_validpush;
+
+struct __sanitizer_iocb {
+ u64 aio_data;
+ u32 aio_key_or_aio_reserved1; // Simply crazy.
+ u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
+ u16 aio_lio_opcode;
+ s16 aio_reqprio;
+ u32 aio_fildes;
+ u64 aio_buf;
+ u64 aio_nbytes;
+ s64 aio_offset;
+ u64 aio_reserved2;
+ u64 aio_reserved3;
+};
+
+struct __sanitizer_io_event {
+ u64 data;
+ u64 obj;
+ u64 res;
+ u64 res2;
+};
+
+const unsigned iocb_cmd_pread = 0;
+const unsigned iocb_cmd_pwrite = 1;
+const unsigned iocb_cmd_preadv = 7;
+const unsigned iocb_cmd_pwritev = 8;
+
+struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+ unsigned long ___unused[4];
+};
+
+struct __sanitizer_ipc_perm {
+ unsigned int cuid;
+ unsigned int cgid;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned short mode;
+ unsigned short seq;
+ long key;
+};
+
+#if !defined(__i386__)
+typedef long long __sanitizer_time_t;
+#else
+typedef long __sanitizer_time_t;
+#endif
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ unsigned int shm_lpid;
+ unsigned int shm_cpid;
+ int shm_nattch;
+ __sanitizer_time_t shm_atime;
+ __sanitizer_time_t shm_dtime;
+ __sanitizer_time_t shm_ctime;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timeb_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+#undef ifa_dstaddr
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+ int pw_fields;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+typedef long __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+#if defined(__INO64)
+ unsigned long long d_fileno;
+ unsigned long long d_off;
+#else
+ unsigned int d_fileno;
+#endif
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+
+// 'clock_t' is 32 bits wide on x64 FreeBSD
+typedef int __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__)
+typedef unsigned __sanitizer___kernel_uid_t;
+typedef unsigned __sanitizer___kernel_gid_t;
+#else
+typedef unsigned short __sanitizer___kernel_uid_t;
+typedef unsigned short __sanitizer___kernel_gid_t;
+#endif
+typedef long long __sanitizer___kernel_off_t;
+
+#if defined(__powerpc__) || defined(__mips__)
+typedef unsigned int __sanitizer___kernel_old_uid_t;
+typedef unsigned int __sanitizer___kernel_old_gid_t;
+#else
+typedef unsigned short __sanitizer___kernel_old_uid_t;
+typedef unsigned short __sanitizer___kernel_old_gid_t;
+#endif
+
+typedef long long __sanitizer___kernel_loff_t;
+typedef struct {
+ unsigned long fds_bits[1024 / (8 * sizeof(long))];
+} __sanitizer___kernel_fd_set;
+
+// This thing depends on the platform. We are only interested in the upper
+// limit. Verified with a compiler assert in .cpp.
+union __sanitizer_pthread_attr_t {
+ char size[128];
+ void *align;
+};
+
+const unsigned old_sigset_t_sz = sizeof(unsigned long);
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+};
+
+struct __sanitizer_sem_t {
+ u32 data[4];
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char *we_strings;
+ uptr we_nbytes;
+};
+
+typedef void __sanitizer_FILE;
+
+extern unsigned struct_shminfo_sz;
+extern unsigned struct_shm_info_sz;
+extern int shmctl_ipc_stat;
+extern int shmctl_ipc_info;
+extern int shmctl_shm_info;
+extern int shmctl_shm_stat;
+
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_winsize_sz;
+
+extern unsigned struct_copr_buffer_sz;
+extern unsigned struct_copr_debug_buf_sz;
+extern unsigned struct_copr_msg_sz;
+extern unsigned struct_midi_info_sz;
+extern unsigned struct_mtget_sz;
+extern unsigned struct_mtop_sz;
+extern unsigned struct_rtentry_sz;
+extern unsigned struct_sbi_instrument_sz;
+extern unsigned struct_seq_event_rec_sz;
+extern unsigned struct_synth_info_sz;
+extern unsigned struct_vt_mode_sz;
+
+extern const unsigned long __sanitizer_bufsiz;
+extern unsigned struct_audio_buf_info_sz;
+extern unsigned struct_ppp_stats_sz;
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+
+// ioctl request identifiers
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_TIOCCONS;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_SIOCGETSGCNT;
+extern unsigned IOCTL_SIOCGETVIFCNT;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCTOP;
+extern unsigned IOCTL_SIOCADDRT;
+extern unsigned IOCTL_SIOCDELRT;
+extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+extern unsigned IOCTL_SNDCTL_DSP_POST;
+extern unsigned IOCTL_SNDCTL_DSP_RESET;
+extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;
+extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;
+extern unsigned IOCTL_SNDCTL_MIDI_INFO;
+extern unsigned IOCTL_SNDCTL_MIDI_PRETIME;
+extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;
+extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;
+extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;
+extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;
+extern unsigned IOCTL_SNDCTL_SEQ_PANIC;
+extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;
+extern unsigned IOCTL_SNDCTL_SEQ_RESET;
+extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;
+extern unsigned IOCTL_SNDCTL_SEQ_SYNC;
+extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;
+extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;
+extern unsigned IOCTL_SNDCTL_SYNTH_INFO;
+extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;
+extern unsigned IOCTL_SNDCTL_TMR_CONTINUE;
+extern unsigned IOCTL_SNDCTL_TMR_METRONOME;
+extern unsigned IOCTL_SNDCTL_TMR_SELECT;
+extern unsigned IOCTL_SNDCTL_TMR_SOURCE;
+extern unsigned IOCTL_SNDCTL_TMR_START;
+extern unsigned IOCTL_SNDCTL_TMR_STOP;
+extern unsigned IOCTL_SNDCTL_TMR_TEMPO;
+extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;
+extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_BASS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CAPS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CD;
+extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE;
+extern unsigned IOCTL_SOUND_MIXER_READ_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_READ_MIC;
+extern unsigned IOCTL_SOUND_MIXER_READ_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_PCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;
+extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_CD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;
+extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+extern unsigned IOCTL_VT_ACTIVATE;
+extern unsigned IOCTL_VT_GETMODE;
+extern unsigned IOCTL_VT_OPENQRY;
+extern unsigned IOCTL_VT_RELDISP;
+extern unsigned IOCTL_VT_SETMODE;
+extern unsigned IOCTL_VT_WAITACTIVE;
+extern unsigned IOCTL_GIO_SCRNMAP;
+extern unsigned IOCTL_KDDISABIO;
+extern unsigned IOCTL_KDENABIO;
+extern unsigned IOCTL_KDGETLED;
+extern unsigned IOCTL_KDGETMODE;
+extern unsigned IOCTL_KDGKBMODE;
+extern unsigned IOCTL_KDGKBTYPE;
+extern unsigned IOCTL_KDMKTONE;
+extern unsigned IOCTL_KDSETLED;
+extern unsigned IOCTL_KDSETMODE;
+extern unsigned IOCTL_KDSKBMODE;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+
+struct __sanitizer_cap_rights {
+ u64 cr_rights[2];
+};
+
+typedef struct __sanitizer_cap_rights __sanitizer_cap_rights_t;
+extern unsigned struct_cap_rights_sz;
+
+extern unsigned struct_fstab_sz;
+extern unsigned struct_StringList_sz;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME sigaction
+
+#endif
+
+#endif // SANITIZER_FREEBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_linux.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_linux.cpp
new file mode 100644
index 0000000000..c51327e126
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -0,0 +1,108 @@
+//===-- sanitizer_platform_limits_linux.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of linux kernel data structures.
+//===----------------------------------------------------------------------===//
+
+// This is a separate compilation unit for linux headers that conflict with
+// userspace headers.
+// Most "normal" includes go in sanitizer_platform_limits_posix.cpp
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+// For offsetof -> __builtin_offsetof definition.
+#include <stddef.h>
+
+// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
+// are not defined anywhere in userspace headers. Fake them. This seems to work
+// fine with newer headers, too.
+#include <linux/posix_types.h>
+#if defined(__x86_64__) || defined(__mips__)
+#include <sys/stat.h>
+#else
+#define ino_t __kernel_ino_t
+#define mode_t __kernel_mode_t
+#define nlink_t __kernel_nlink_t
+#define uid_t __kernel_uid_t
+#define gid_t __kernel_gid_t
+#define off_t __kernel_off_t
+#define time_t __kernel_time_t
+// This header seems to contain the definitions of _kernel_ stat* structs.
+#include <asm/stat.h>
+#undef ino_t
+#undef mode_t
+#undef nlink_t
+#undef uid_t
+#undef gid_t
+#undef off_t
+#endif
+
+#include <linux/aio_abi.h>
+
+#if !SANITIZER_ANDROID
+#include <sys/statfs.h>
+#include <linux/perf_event.h>
+#endif
+
+using namespace __sanitizer;
+
+namespace __sanitizer {
+#if !SANITIZER_ANDROID
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif
+} // namespace __sanitizer
+
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+ && !defined(__mips__) && !defined(__s390__)\
+ && !defined(__sparc__) && !defined(__riscv)
+COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
+#endif
+
+COMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat));
+
+#if defined(__i386__)
+COMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64));
+#endif
+
+CHECK_TYPE_SIZE(io_event);
+CHECK_SIZE_AND_OFFSET(io_event, data);
+CHECK_SIZE_AND_OFFSET(io_event, obj);
+CHECK_SIZE_AND_OFFSET(io_event, res);
+CHECK_SIZE_AND_OFFSET(io_event, res2);
+
+#if !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(struct __sanitizer_perf_event_attr) <=
+ sizeof(struct perf_event_attr));
+CHECK_SIZE_AND_OFFSET(perf_event_attr, type);
+CHECK_SIZE_AND_OFFSET(perf_event_attr, size);
+#endif
+
+COMPILER_CHECK(iocb_cmd_pread == IOCB_CMD_PREAD);
+COMPILER_CHECK(iocb_cmd_pwrite == IOCB_CMD_PWRITE);
+#if !SANITIZER_ANDROID
+COMPILER_CHECK(iocb_cmd_preadv == IOCB_CMD_PREADV);
+COMPILER_CHECK(iocb_cmd_pwritev == IOCB_CMD_PWRITEV);
+#endif
+
+CHECK_TYPE_SIZE(iocb);
+CHECK_SIZE_AND_OFFSET(iocb, aio_data);
+// Skip aio_key, it's weird.
+CHECK_SIZE_AND_OFFSET(iocb, aio_lio_opcode);
+CHECK_SIZE_AND_OFFSET(iocb, aio_reqprio);
+CHECK_SIZE_AND_OFFSET(iocb, aio_fildes);
+CHECK_SIZE_AND_OFFSET(iocb, aio_buf);
+CHECK_SIZE_AND_OFFSET(iocb, aio_nbytes);
+CHECK_SIZE_AND_OFFSET(iocb, aio_offset);
+
+#endif // SANITIZER_LINUX
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
new file mode 100644
index 0000000000..25da334b63
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -0,0 +1,2586 @@
+//===-- sanitizer_platform_limits_netbsd.cpp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+
+#define _KMEMUSER
+#define RAY_DO_SIGLEV
+#define __LEGACY_PT_LWPINFO
+
+// clang-format off
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/disk.h>
+#include <sys/disklabel.h>
+#include <sys/mount.h>
+#include <sys/agpio.h>
+#include <sys/ataio.h>
+#include <sys/audioio.h>
+#include <sys/cdbr.h>
+#include <sys/cdio.h>
+#include <sys/chio.h>
+#include <sys/clockctl.h>
+#include <sys/cpuio.h>
+#include <sys/dkio.h>
+#include <sys/drvctlio.h>
+#include <sys/dvdio.h>
+#include <sys/envsys.h>
+#include <sys/event.h>
+#include <sys/fdio.h>
+#include <sys/filio.h>
+#include <sys/gpio.h>
+#include <sys/ioctl.h>
+#include <sys/ioctl_compat.h>
+#include <sys/joystick.h>
+#include <sys/ksyms.h>
+#include <sys/lua.h>
+#include <sys/midiio.h>
+#include <sys/mtio.h>
+#include <sys/power.h>
+#include <sys/radioio.h>
+#include <sys/rndio.h>
+#include <sys/scanio.h>
+#include <sys/scsiio.h>
+#include <sys/sockio.h>
+#include <sys/timepps.h>
+#include <sys/ttycom.h>
+#include <sys/verified_exec.h>
+#include <sys/videoio.h>
+#include <sys/wdog.h>
+#include <sys/event.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/ipmi.h>
+#include <sys/kcov.h>
+#include <sys/mman.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+#include <sys/mqueue.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+
+// Compat for NetBSD < 9.99.30.
+#ifndef PT_LWPSTATUS
+#define PT_LWPSTATUS 24
+#endif
+#ifndef PT_LWPNEXT
+#define PT_LWPNEXT 25
+#endif
+
+#include <sys/resource.h>
+#include <sys/sem.h>
+#include <sys/sha1.h>
+#include <sys/sha2.h>
+#include <sys/shm.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/soundcard.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/timespec.h>
+#include <sys/timex.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <sys/utsname.h>
+#include <altq/altq.h>
+#include <altq/altq_afmap.h>
+#include <altq/altq_blue.h>
+#include <altq/altq_cbq.h>
+#include <altq/altq_cdnr.h>
+#include <altq/altq_fifoq.h>
+#include <altq/altq_hfsc.h>
+#include <altq/altq_jobs.h>
+#include <altq/altq_priq.h>
+#include <altq/altq_red.h>
+#include <altq/altq_rio.h>
+#include <altq/altq_wfq.h>
+#include <arpa/inet.h>
+#include <crypto/cryptodev.h>
+#include <dev/apm/apmio.h>
+#include <dev/dm/netbsd-dm.h>
+#include <dev/dmover/dmover_io.h>
+#include <dev/dtv/dtvio_demux.h>
+#include <dev/dtv/dtvio_frontend.h>
+#if !__NetBSD_Prereq__(9, 99, 26)
+#include <dev/filemon/filemon.h>
+#else
+#define FILEMON_SET_FD _IOWR('S', 1, int)
+#define FILEMON_SET_PID _IOWR('S', 2, pid_t)
+#endif
+#include <dev/hdaudio/hdaudioio.h>
+#include <dev/hdmicec/hdmicecio.h>
+#include <dev/hpc/hpcfbio.h>
+#include <dev/i2o/iopio.h>
+#include <dev/ic/athioctl.h>
+#include <dev/ic/bt8xx.h>
+#include <dev/ic/icp_ioctl.h>
+#include <dev/ic/isp_ioctl.h>
+#include <dev/ic/mlxio.h>
+#include <dev/ic/qemufwcfgio.h>
+#include <dev/ic/nvmeio.h>
+#include <dev/ir/irdaio.h>
+#include <dev/isa/isvio.h>
+#include <dev/isa/wtreg.h>
+#include <dev/iscsi/iscsi_ioctl.h>
+#include <dev/ofw/openfirmio.h>
+#include <dev/pci/amrio.h>
+#include <dev/pci/mlyreg.h>
+#include <dev/pci/mlyio.h>
+#include <dev/pci/pciio.h>
+#include <dev/pci/tweio.h>
+#include <dev/pcmcia/if_cnwioctl.h>
+#include <net/bpf.h>
+#include <net/if_gre.h>
+#include <net/ppp_defs.h>
+#include <net/if_ppp.h>
+#include <net/if_pppoe.h>
+#include <net/if_sppp.h>
+#include <net/if_srt.h>
+#include <net/if_tap.h>
+#include <net/if_tun.h>
+#include <net/npf.h>
+#include <net/pfvar.h>
+#include <net/slip.h>
+#include <netbt/hci.h>
+#include <netinet/ip_compat.h>
+#if __has_include(<netinet/ip_fil.h>)
+#include <netinet/ip_fil.h>
+#include <netinet/ip_nat.h>
+#include <netinet/ip_proxy.h>
+#else
+/* Fallback for MKIPFILTER=no */
+
+typedef struct ap_control {
+ char apc_label[16];
+ char apc_config[16];
+ unsigned char apc_p;
+ unsigned long apc_cmd;
+ unsigned long apc_arg;
+ void *apc_data;
+ size_t apc_dsize;
+} ap_ctl_t;
+
+typedef struct ipftq {
+ ipfmutex_t ifq_lock;
+ unsigned int ifq_ttl;
+ void *ifq_head;
+ void **ifq_tail;
+ void *ifq_next;
+ void **ifq_pnext;
+ int ifq_ref;
+ unsigned int ifq_flags;
+} ipftq_t;
+
+typedef struct ipfobj {
+ uint32_t ipfo_rev;
+ uint32_t ipfo_size;
+ void *ipfo_ptr;
+ int ipfo_type;
+ int ipfo_offset;
+ int ipfo_retval;
+ unsigned char ipfo_xxxpad[28];
+} ipfobj_t;
+
+#define SIOCADNAT _IOW('r', 60, struct ipfobj)
+#define SIOCRMNAT _IOW('r', 61, struct ipfobj)
+#define SIOCGNATS _IOWR('r', 62, struct ipfobj)
+#define SIOCGNATL _IOWR('r', 63, struct ipfobj)
+#define SIOCPURGENAT _IOWR('r', 100, struct ipfobj)
+#endif
+#include <netinet6/in6_var.h>
+#include <netinet6/nd6.h>
+#if !__NetBSD_Prereq__(9, 99, 51)
+#include <netsmb/smb_dev.h>
+#else
+struct smbioc_flags {
+ int ioc_level;
+ int ioc_mask;
+ int ioc_flags;
+};
+struct smbioc_oshare {
+ int ioc_opt;
+ int ioc_stype;
+ char ioc_share[129];
+ char ioc_password[129];
+ uid_t ioc_owner;
+ gid_t ioc_group;
+ mode_t ioc_mode;
+ mode_t ioc_rights;
+};
+struct smbioc_ossn {
+ int ioc_opt;
+ uint32_t ioc_svlen;
+ struct sockaddr *ioc_server;
+ uint32_t ioc_lolen;
+ struct sockaddr *ioc_local;
+ char ioc_srvname[16];
+ int ioc_timeout;
+ int ioc_retrycount;
+ char ioc_localcs[16];
+ char ioc_servercs[16];
+ char ioc_user[129];
+ char ioc_workgroup[129];
+ char ioc_password[129];
+ uid_t ioc_owner;
+ gid_t ioc_group;
+ mode_t ioc_mode;
+ mode_t ioc_rights;
+};
+struct smbioc_lookup {
+ int ioc_level;
+ int ioc_flags;
+ struct smbioc_ossn ioc_ssn;
+ struct smbioc_oshare ioc_sh;
+};
+struct smbioc_rq {
+ u_char ioc_cmd;
+ u_char ioc_twc;
+ void *ioc_twords;
+ u_short ioc_tbc;
+ void *ioc_tbytes;
+ int ioc_rpbufsz;
+ char *ioc_rpbuf;
+ u_char ioc_rwc;
+ u_short ioc_rbc;
+};
+struct smbioc_rw {
+ u_int16_t ioc_fh;
+ char *ioc_base;
+ off_t ioc_offset;
+ int ioc_cnt;
+};
+#define SMBIOC_OPENSESSION _IOW('n', 100, struct smbioc_ossn)
+#define SMBIOC_OPENSHARE _IOW('n', 101, struct smbioc_oshare)
+#define SMBIOC_REQUEST _IOWR('n', 102, struct smbioc_rq)
+#define SMBIOC_T2RQ _IOWR('n', 103, struct smbioc_t2rq)
+#define SMBIOC_SETFLAGS _IOW('n', 104, struct smbioc_flags)
+#define SMBIOC_LOOKUP _IOW('n', 106, struct smbioc_lookup)
+#define SMBIOC_READ _IOWR('n', 107, struct smbioc_rw)
+#define SMBIOC_WRITE _IOWR('n', 108, struct smbioc_rw)
+#endif
+#include <dev/biovar.h>
+#include <dev/bluetooth/btdev.h>
+#include <dev/bluetooth/btsco.h>
+#include <dev/ccdvar.h>
+#include <dev/cgdvar.h>
+#include <dev/fssvar.h>
+#include <dev/kttcpio.h>
+#include <dev/lockstat.h>
+#include <dev/md.h>
+#include <net/if_ether.h>
+#include <dev/pcmcia/if_rayreg.h>
+#include <stdio.h>
+#include <dev/raidframe/raidframeio.h>
+#include <dev/sbus/mbppio.h>
+#include <dev/scsipi/ses.h>
+#include <dev/spi/spi_io.h>
+#include <dev/spkrio.h>
+#include <dev/sun/disklabel.h>
+#include <dev/sun/fbio.h>
+#include <dev/sun/kbio.h>
+#include <dev/sun/vuid_event.h>
+#include <dev/tc/sticio.h>
+#include <dev/usb/ukyopon.h>
+#if !__NetBSD_Prereq__(9, 99, 44)
+#include <dev/usb/urio.h>
+#else
+struct urio_command {
+ unsigned short length;
+ int request;
+ int requesttype;
+ int value;
+ int index;
+ void *buffer;
+ int timeout;
+};
+#define URIO_SEND_COMMAND _IOWR('U', 200, struct urio_command)
+#define URIO_RECV_COMMAND _IOWR('U', 201, struct urio_command)
+#endif
+#include <dev/usb/usb.h>
+#include <dev/usb/utoppy.h>
+#include <dev/vme/xio.h>
+#include <dev/vndvar.h>
+#include <dev/wscons/wsconsio.h>
+#include <dev/wscons/wsdisplay_usl_io.h>
+#include <fs/autofs/autofs_ioctl.h>
+#include <dirent.h>
+#include <dlfcn.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link_elf.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+#include <netinet/sctp_uio.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <md2.h>
+#include <md4.h>
+#include <md5.h>
+#include <rmd160.h>
+#include <soundcard.h>
+#include <term.h>
+#include <termios.h>
+#include <time.h>
+#include <ttyent.h>
+#include <utime.h>
+#include <utmp.h>
+#include <utmpx.h>
+#include <vis.h>
+#include <wchar.h>
+#include <wordexp.h>
+#include <ttyent.h>
+#include <fts.h>
+#include <regex.h>
+#include <fstab.h>
+#include <stringlist.h>
+
+#if defined(__x86_64__)
+#include <nvmm.h>
+#endif
+// clang-format on
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_platform_limits_netbsd.h"
+
+namespace __sanitizer {
+void *__sanitizer_get_link_map_by_dlopen_handle(void* handle) {
+ void *p = nullptr;
+ return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
+}
+
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_stack_t_sz = sizeof(stack_t);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sigevent_sz = sizeof(struct sigevent);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_sembuf_sz = sizeof(struct sembuf);
+unsigned struct_kevent_sz = sizeof(struct kevent);
+unsigned struct_FTS_sz = sizeof(FTS);
+unsigned struct_FTSENT_sz = sizeof(FTSENT);
+unsigned struct_regex_sz = sizeof(regex_t);
+unsigned struct_regmatch_sz = sizeof(regmatch_t);
+unsigned struct_fstab_sz = sizeof(struct fstab);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_timex_sz = sizeof(struct timex);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+unsigned struct_sigaltstack_sz = sizeof(stack_t);
+
+const uptr sig_ign = (uptr)SIG_IGN;
+const uptr sig_dfl = (uptr)SIG_DFL;
+const uptr sig_err = (uptr)SIG_ERR;
+const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+int ptrace_pt_io = PT_IO;
+int ptrace_pt_lwpinfo = PT_LWPINFO;
+int ptrace_pt_set_event_mask = PT_SET_EVENT_MASK;
+int ptrace_pt_get_event_mask = PT_GET_EVENT_MASK;
+int ptrace_pt_get_process_state = PT_GET_PROCESS_STATE;
+int ptrace_pt_set_siginfo = PT_SET_SIGINFO;
+int ptrace_pt_get_siginfo = PT_GET_SIGINFO;
+int ptrace_pt_lwpstatus = PT_LWPSTATUS;
+int ptrace_pt_lwpnext = PT_LWPNEXT;
+int ptrace_piod_read_d = PIOD_READ_D;
+int ptrace_piod_write_d = PIOD_WRITE_D;
+int ptrace_piod_read_i = PIOD_READ_I;
+int ptrace_piod_write_i = PIOD_WRITE_I;
+int ptrace_piod_read_auxv = PIOD_READ_AUXV;
+
+#if defined(PT_SETREGS) && defined(PT_GETREGS)
+int ptrace_pt_setregs = PT_SETREGS;
+int ptrace_pt_getregs = PT_GETREGS;
+#else
+int ptrace_pt_setregs = -1;
+int ptrace_pt_getregs = -1;
+#endif
+
+#if defined(PT_SETFPREGS) && defined(PT_GETFPREGS)
+int ptrace_pt_setfpregs = PT_SETFPREGS;
+int ptrace_pt_getfpregs = PT_GETFPREGS;
+#else
+int ptrace_pt_setfpregs = -1;
+int ptrace_pt_getfpregs = -1;
+#endif
+
+#if defined(PT_SETDBREGS) && defined(PT_GETDBREGS)
+int ptrace_pt_setdbregs = PT_SETDBREGS;
+int ptrace_pt_getdbregs = PT_GETDBREGS;
+#else
+int ptrace_pt_setdbregs = -1;
+int ptrace_pt_getdbregs = -1;
+#endif
+
+unsigned struct_ptrace_ptrace_io_desc_struct_sz = sizeof(struct ptrace_io_desc);
+unsigned struct_ptrace_ptrace_lwpinfo_struct_sz = sizeof(struct ptrace_lwpinfo);
+unsigned struct_ptrace_ptrace_lwpstatus_struct_sz =
+ sizeof(struct __sanitizer_ptrace_lwpstatus);
+unsigned struct_ptrace_ptrace_event_struct_sz = sizeof(ptrace_event_t);
+unsigned struct_ptrace_ptrace_siginfo_struct_sz = sizeof(ptrace_siginfo_t);
+
+#if defined(PT_SETREGS)
+unsigned struct_ptrace_reg_struct_sz = sizeof(struct reg);
+#else
+unsigned struct_ptrace_reg_struct_sz = -1;
+#endif
+
+#if defined(PT_SETFPREGS)
+unsigned struct_ptrace_fpreg_struct_sz = sizeof(struct fpreg);
+#else
+unsigned struct_ptrace_fpreg_struct_sz = -1;
+#endif
+
+#if defined(PT_SETDBREGS)
+unsigned struct_ptrace_dbreg_struct_sz = sizeof(struct dbreg);
+#else
+unsigned struct_ptrace_dbreg_struct_sz = -1;
+#endif
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+
+unsigned struct_utmp_sz = sizeof(struct utmp);
+unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+int struct_ttyent_sz = sizeof(struct ttyent);
+
+struct __sanitizer_nvlist_ref_t {
+ void *buf;
+ uptr len;
+ int flags;
+};
+
+typedef __sanitizer_nvlist_ref_t nvlist_ref_t;
+
+// ioctl arguments
+unsigned struct_altqreq_sz = sizeof(altqreq);
+unsigned struct_amr_user_ioctl_sz = sizeof(amr_user_ioctl);
+unsigned struct_ap_control_sz = sizeof(ap_control);
+unsigned struct_apm_ctl_sz = sizeof(apm_ctl);
+unsigned struct_apm_event_info_sz = sizeof(apm_event_info);
+unsigned struct_apm_power_info_sz = sizeof(apm_power_info);
+unsigned struct_atabusiodetach_args_sz = sizeof(atabusiodetach_args);
+unsigned struct_atabusioscan_args_sz = sizeof(atabusioscan_args);
+unsigned struct_ath_diag_sz = sizeof(ath_diag);
+unsigned struct_atm_flowmap_sz = sizeof(atm_flowmap);
+unsigned struct_audio_buf_info_sz = sizeof(audio_buf_info);
+unsigned struct_audio_device_sz = sizeof(audio_device);
+unsigned struct_audio_encoding_sz = sizeof(audio_encoding);
+unsigned struct_audio_info_sz = sizeof(audio_info);
+unsigned struct_audio_offset_sz = sizeof(audio_offset);
+unsigned struct_bio_locate_sz = sizeof(bio_locate);
+unsigned struct_bioc_alarm_sz = sizeof(bioc_alarm);
+unsigned struct_bioc_blink_sz = sizeof(bioc_blink);
+unsigned struct_bioc_disk_sz = sizeof(bioc_disk);
+unsigned struct_bioc_inq_sz = sizeof(bioc_inq);
+unsigned struct_bioc_setstate_sz = sizeof(bioc_setstate);
+unsigned struct_bioc_vol_sz = sizeof(bioc_vol);
+unsigned struct_bioc_volops_sz = sizeof(bioc_volops);
+unsigned struct_bktr_chnlset_sz = sizeof(bktr_chnlset);
+unsigned struct_bktr_remote_sz = sizeof(bktr_remote);
+unsigned struct_blue_conf_sz = sizeof(blue_conf);
+unsigned struct_blue_interface_sz = sizeof(blue_interface);
+unsigned struct_blue_stats_sz = sizeof(blue_stats);
+unsigned struct_bpf_dltlist_sz = sizeof(bpf_dltlist);
+unsigned struct_bpf_program_sz = sizeof(bpf_program);
+unsigned struct_bpf_stat_old_sz = sizeof(bpf_stat_old);
+unsigned struct_bpf_stat_sz = sizeof(bpf_stat);
+unsigned struct_bpf_version_sz = sizeof(bpf_version);
+unsigned struct_btreq_sz = sizeof(btreq);
+unsigned struct_btsco_info_sz = sizeof(btsco_info);
+unsigned struct_buffmem_desc_sz = sizeof(buffmem_desc);
+unsigned struct_cbq_add_class_sz = sizeof(cbq_add_class);
+unsigned struct_cbq_add_filter_sz = sizeof(cbq_add_filter);
+unsigned struct_cbq_delete_class_sz = sizeof(cbq_delete_class);
+unsigned struct_cbq_delete_filter_sz = sizeof(cbq_delete_filter);
+unsigned struct_cbq_getstats_sz = sizeof(cbq_getstats);
+unsigned struct_cbq_interface_sz = sizeof(cbq_interface);
+unsigned struct_cbq_modify_class_sz = sizeof(cbq_modify_class);
+unsigned struct_ccd_ioctl_sz = sizeof(ccd_ioctl);
+unsigned struct_cdnr_add_element_sz = sizeof(cdnr_add_element);
+unsigned struct_cdnr_add_filter_sz = sizeof(cdnr_add_filter);
+unsigned struct_cdnr_add_tbmeter_sz = sizeof(cdnr_add_tbmeter);
+unsigned struct_cdnr_add_trtcm_sz = sizeof(cdnr_add_trtcm);
+unsigned struct_cdnr_add_tswtcm_sz = sizeof(cdnr_add_tswtcm);
+unsigned struct_cdnr_delete_element_sz = sizeof(cdnr_delete_element);
+unsigned struct_cdnr_delete_filter_sz = sizeof(cdnr_delete_filter);
+unsigned struct_cdnr_get_stats_sz = sizeof(cdnr_get_stats);
+unsigned struct_cdnr_interface_sz = sizeof(cdnr_interface);
+unsigned struct_cdnr_modify_tbmeter_sz = sizeof(cdnr_modify_tbmeter);
+unsigned struct_cdnr_modify_trtcm_sz = sizeof(cdnr_modify_trtcm);
+unsigned struct_cdnr_modify_tswtcm_sz = sizeof(cdnr_modify_tswtcm);
+unsigned struct_cdnr_tbmeter_stats_sz = sizeof(cdnr_tbmeter_stats);
+unsigned struct_cdnr_tcm_stats_sz = sizeof(cdnr_tcm_stats);
+unsigned struct_cgd_ioctl_sz = sizeof(cgd_ioctl);
+unsigned struct_cgd_user_sz = sizeof(cgd_user);
+unsigned struct_changer_element_status_request_sz =
+ sizeof(changer_element_status_request);
+unsigned struct_changer_exchange_request_sz = sizeof(changer_exchange_request);
+unsigned struct_changer_move_request_sz = sizeof(changer_move_request);
+unsigned struct_changer_params_sz = sizeof(changer_params);
+unsigned struct_changer_position_request_sz = sizeof(changer_position_request);
+unsigned struct_changer_set_voltag_request_sz =
+ sizeof(changer_set_voltag_request);
+unsigned struct_clockctl_adjtime_sz = sizeof(clockctl_adjtime);
+unsigned struct_clockctl_clock_settime_sz = sizeof(clockctl_clock_settime);
+unsigned struct_clockctl_ntp_adjtime_sz = sizeof(clockctl_ntp_adjtime);
+unsigned struct_clockctl_settimeofday_sz = sizeof(clockctl_settimeofday);
+unsigned struct_cnwistats_sz = sizeof(cnwistats);
+unsigned struct_cnwitrail_sz = sizeof(cnwitrail);
+unsigned struct_cnwstatus_sz = sizeof(cnwstatus);
+unsigned struct_count_info_sz = sizeof(count_info);
+unsigned struct_cpu_ucode_sz = sizeof(cpu_ucode);
+unsigned struct_cpu_ucode_version_sz = sizeof(cpu_ucode_version);
+unsigned struct_crypt_kop_sz = sizeof(crypt_kop);
+unsigned struct_crypt_mkop_sz = sizeof(crypt_mkop);
+unsigned struct_crypt_mop_sz = sizeof(crypt_mop);
+unsigned struct_crypt_op_sz = sizeof(crypt_op);
+unsigned struct_crypt_result_sz = sizeof(crypt_result);
+unsigned struct_crypt_sfop_sz = sizeof(crypt_sfop);
+unsigned struct_crypt_sgop_sz = sizeof(crypt_sgop);
+unsigned struct_cryptret_sz = sizeof(cryptret);
+unsigned struct_devdetachargs_sz = sizeof(devdetachargs);
+unsigned struct_devlistargs_sz = sizeof(devlistargs);
+unsigned struct_devpmargs_sz = sizeof(devpmargs);
+unsigned struct_devrescanargs_sz = sizeof(devrescanargs);
+unsigned struct_disk_badsecinfo_sz = sizeof(disk_badsecinfo);
+unsigned struct_disk_strategy_sz = sizeof(disk_strategy);
+unsigned struct_disklabel_sz = sizeof(disklabel);
+unsigned struct_dkbad_sz = sizeof(dkbad);
+unsigned struct_dkwedge_info_sz = sizeof(dkwedge_info);
+unsigned struct_dkwedge_list_sz = sizeof(dkwedge_list);
+unsigned struct_dmio_setfunc_sz = sizeof(dmio_setfunc);
+unsigned struct_dmx_pes_filter_params_sz = sizeof(dmx_pes_filter_params);
+unsigned struct_dmx_sct_filter_params_sz = sizeof(dmx_sct_filter_params);
+unsigned struct_dmx_stc_sz = sizeof(dmx_stc);
+unsigned struct_dvb_diseqc_master_cmd_sz = sizeof(dvb_diseqc_master_cmd);
+unsigned struct_dvb_diseqc_slave_reply_sz = sizeof(dvb_diseqc_slave_reply);
+unsigned struct_dvb_frontend_event_sz = sizeof(dvb_frontend_event);
+unsigned struct_dvb_frontend_info_sz = sizeof(dvb_frontend_info);
+unsigned struct_dvb_frontend_parameters_sz = sizeof(dvb_frontend_parameters);
+unsigned struct_eccapreq_sz = sizeof(eccapreq);
+unsigned struct_fbcmap_sz = sizeof(fbcmap);
+unsigned struct_fbcurpos_sz = sizeof(fbcurpos);
+unsigned struct_fbcursor_sz = sizeof(fbcursor);
+unsigned struct_fbgattr_sz = sizeof(fbgattr);
+unsigned struct_fbsattr_sz = sizeof(fbsattr);
+unsigned struct_fbtype_sz = sizeof(fbtype);
+unsigned struct_fdformat_cmd_sz = sizeof(fdformat_cmd);
+unsigned struct_fdformat_parms_sz = sizeof(fdformat_parms);
+unsigned struct_fifoq_conf_sz = sizeof(fifoq_conf);
+unsigned struct_fifoq_getstats_sz = sizeof(fifoq_getstats);
+unsigned struct_fifoq_interface_sz = sizeof(fifoq_interface);
+unsigned struct_format_op_sz = sizeof(format_op);
+unsigned struct_fss_get_sz = sizeof(fss_get);
+unsigned struct_fss_set_sz = sizeof(fss_set);
+unsigned struct_gpio_attach_sz = sizeof(gpio_attach);
+unsigned struct_gpio_info_sz = sizeof(gpio_info);
+unsigned struct_gpio_req_sz = sizeof(gpio_req);
+unsigned struct_gpio_set_sz = sizeof(gpio_set);
+unsigned struct_hfsc_add_class_sz = sizeof(hfsc_add_class);
+unsigned struct_hfsc_add_filter_sz = sizeof(hfsc_add_filter);
+unsigned struct_hfsc_attach_sz = sizeof(hfsc_attach);
+unsigned struct_hfsc_class_stats_sz = sizeof(hfsc_class_stats);
+unsigned struct_hfsc_delete_class_sz = sizeof(hfsc_delete_class);
+unsigned struct_hfsc_delete_filter_sz = sizeof(hfsc_delete_filter);
+unsigned struct_hfsc_interface_sz = sizeof(hfsc_interface);
+unsigned struct_hfsc_modify_class_sz = sizeof(hfsc_modify_class);
+unsigned struct_hpcfb_dsp_op_sz = sizeof(hpcfb_dsp_op);
+unsigned struct_hpcfb_dspconf_sz = sizeof(hpcfb_dspconf);
+unsigned struct_hpcfb_fbconf_sz = sizeof(hpcfb_fbconf);
+unsigned struct_if_addrprefreq_sz = sizeof(if_addrprefreq);
+unsigned struct_if_clonereq_sz = sizeof(if_clonereq);
+unsigned struct_if_laddrreq_sz = sizeof(if_laddrreq);
+unsigned struct_ifaddr_sz = sizeof(ifaddr);
+unsigned struct_ifaliasreq_sz = sizeof(ifaliasreq);
+unsigned struct_ifcapreq_sz = sizeof(ifcapreq);
+unsigned struct_ifconf_sz = sizeof(ifconf);
+unsigned struct_ifdatareq_sz = sizeof(ifdatareq);
+unsigned struct_ifdrv_sz = sizeof(ifdrv);
+unsigned struct_ifmediareq_sz = sizeof(ifmediareq);
+unsigned struct_ifpppcstatsreq_sz = sizeof(ifpppcstatsreq);
+unsigned struct_ifpppstatsreq_sz = sizeof(ifpppstatsreq);
+unsigned struct_ifreq_sz = sizeof(ifreq);
+unsigned struct_in6_addrpolicy_sz = sizeof(in6_addrpolicy);
+unsigned struct_in6_ndireq_sz = sizeof(in6_ndireq);
+unsigned struct_ioc_load_unload_sz = sizeof(ioc_load_unload);
+unsigned struct_ioc_patch_sz = sizeof(ioc_patch);
+unsigned struct_ioc_play_blocks_sz = sizeof(ioc_play_blocks);
+unsigned struct_ioc_play_msf_sz = sizeof(ioc_play_msf);
+unsigned struct_ioc_play_track_sz = sizeof(ioc_play_track);
+unsigned struct_ioc_read_subchannel_sz = sizeof(ioc_read_subchannel);
+unsigned struct_ioc_read_toc_entry_sz = sizeof(ioc_read_toc_entry);
+unsigned struct_ioc_toc_header_sz = sizeof(ioc_toc_header);
+unsigned struct_ioc_vol_sz = sizeof(ioc_vol);
+unsigned struct_ioctl_pt_sz = sizeof(ioctl_pt);
+unsigned struct_ioppt_sz = sizeof(ioppt);
+unsigned struct_iovec_sz = sizeof(iovec);
+unsigned struct_ipfobj_sz = sizeof(ipfobj);
+unsigned struct_irda_params_sz = sizeof(irda_params);
+unsigned struct_isp_fc_device_sz = sizeof(isp_fc_device);
+unsigned struct_isp_fc_tsk_mgmt_sz = sizeof(isp_fc_tsk_mgmt);
+unsigned struct_isp_hba_device_sz = sizeof(isp_hba_device);
+unsigned struct_isv_cmd_sz = sizeof(isv_cmd);
+unsigned struct_jobs_add_class_sz = sizeof(jobs_add_class);
+unsigned struct_jobs_add_filter_sz = sizeof(jobs_add_filter);
+unsigned struct_jobs_attach_sz = sizeof(jobs_attach);
+unsigned struct_jobs_class_stats_sz = sizeof(jobs_class_stats);
+unsigned struct_jobs_delete_class_sz = sizeof(jobs_delete_class);
+unsigned struct_jobs_delete_filter_sz = sizeof(jobs_delete_filter);
+unsigned struct_jobs_interface_sz = sizeof(jobs_interface);
+unsigned struct_jobs_modify_class_sz = sizeof(jobs_modify_class);
+unsigned struct_kbentry_sz = sizeof(kbentry);
+unsigned struct_kfilter_mapping_sz = sizeof(kfilter_mapping);
+unsigned struct_kiockeymap_sz = sizeof(kiockeymap);
+unsigned struct_ksyms_gsymbol_sz = sizeof(ksyms_gsymbol);
+unsigned struct_ksyms_gvalue_sz = sizeof(ksyms_gvalue);
+unsigned struct_ksyms_ogsymbol_sz = sizeof(ksyms_ogsymbol);
+unsigned struct_kttcp_io_args_sz = sizeof(kttcp_io_args);
+unsigned struct_ltchars_sz = sizeof(ltchars);
+unsigned struct_lua_create_sz = sizeof(struct lua_create);
+unsigned struct_lua_info_sz = sizeof(struct lua_info);
+unsigned struct_lua_load_sz = sizeof(struct lua_load);
+unsigned struct_lua_require_sz = sizeof(lua_require);
+unsigned struct_mbpp_param_sz = sizeof(mbpp_param);
+unsigned struct_md_conf_sz = sizeof(md_conf);
+unsigned struct_meteor_capframe_sz = sizeof(meteor_capframe);
+unsigned struct_meteor_counts_sz = sizeof(meteor_counts);
+unsigned struct_meteor_geomet_sz = sizeof(meteor_geomet);
+unsigned struct_meteor_pixfmt_sz = sizeof(meteor_pixfmt);
+unsigned struct_meteor_video_sz = sizeof(meteor_video);
+unsigned struct_mlx_cinfo_sz = sizeof(mlx_cinfo);
+unsigned struct_mlx_pause_sz = sizeof(mlx_pause);
+unsigned struct_mlx_rebuild_request_sz = sizeof(mlx_rebuild_request);
+unsigned struct_mlx_rebuild_status_sz = sizeof(mlx_rebuild_status);
+unsigned struct_mlx_usercommand_sz = sizeof(mlx_usercommand);
+unsigned struct_mly_user_command_sz = sizeof(mly_user_command);
+unsigned struct_mly_user_health_sz = sizeof(mly_user_health);
+unsigned struct_mtget_sz = sizeof(mtget);
+unsigned struct_mtop_sz = sizeof(mtop);
+unsigned struct_npf_ioctl_table_sz = sizeof(npf_ioctl_table);
+unsigned struct_npioctl_sz = sizeof(npioctl);
+unsigned struct_nvme_pt_command_sz = sizeof(nvme_pt_command);
+unsigned struct_ochanger_element_status_request_sz =
+ sizeof(ochanger_element_status_request);
+unsigned struct_ofiocdesc_sz = sizeof(ofiocdesc);
+unsigned struct_okiockey_sz = sizeof(okiockey);
+unsigned struct_ortentry_sz = sizeof(ortentry);
+unsigned struct_oscsi_addr_sz = sizeof(oscsi_addr);
+unsigned struct_oss_audioinfo_sz = sizeof(oss_audioinfo);
+unsigned struct_oss_sysinfo_sz = sizeof(oss_sysinfo);
+unsigned struct_pciio_bdf_cfgreg_sz = sizeof(pciio_bdf_cfgreg);
+unsigned struct_pciio_businfo_sz = sizeof(pciio_businfo);
+unsigned struct_pciio_cfgreg_sz = sizeof(pciio_cfgreg);
+unsigned struct_pciio_drvname_sz = sizeof(pciio_drvname);
+unsigned struct_pciio_drvnameonbus_sz = sizeof(pciio_drvnameonbus);
+unsigned struct_pcvtid_sz = sizeof(pcvtid);
+unsigned struct_pf_osfp_ioctl_sz = sizeof(pf_osfp_ioctl);
+unsigned struct_pf_status_sz = sizeof(pf_status);
+unsigned struct_pfioc_altq_sz = sizeof(pfioc_altq);
+unsigned struct_pfioc_if_sz = sizeof(pfioc_if);
+unsigned struct_pfioc_iface_sz = sizeof(pfioc_iface);
+unsigned struct_pfioc_limit_sz = sizeof(pfioc_limit);
+unsigned struct_pfioc_natlook_sz = sizeof(pfioc_natlook);
+unsigned struct_pfioc_pooladdr_sz = sizeof(pfioc_pooladdr);
+unsigned struct_pfioc_qstats_sz = sizeof(pfioc_qstats);
+unsigned struct_pfioc_rule_sz = sizeof(pfioc_rule);
+unsigned struct_pfioc_ruleset_sz = sizeof(pfioc_ruleset);
+unsigned struct_pfioc_src_node_kill_sz = sizeof(pfioc_src_node_kill);
+unsigned struct_pfioc_src_nodes_sz = sizeof(pfioc_src_nodes);
+unsigned struct_pfioc_state_kill_sz = sizeof(pfioc_state_kill);
+unsigned struct_pfioc_state_sz = sizeof(pfioc_state);
+unsigned struct_pfioc_states_sz = sizeof(pfioc_states);
+unsigned struct_pfioc_table_sz = sizeof(pfioc_table);
+unsigned struct_pfioc_tm_sz = sizeof(pfioc_tm);
+unsigned struct_pfioc_trans_sz = sizeof(pfioc_trans);
+unsigned struct_plistref_sz = sizeof(plistref);
+unsigned struct_power_type_sz = sizeof(power_type);
+unsigned struct_ppp_idle_sz = sizeof(ppp_idle);
+unsigned struct_ppp_option_data_sz = sizeof(ppp_option_data);
+unsigned struct_ppp_rawin_sz = sizeof(ppp_rawin);
+unsigned struct_pppoeconnectionstate_sz = sizeof(pppoeconnectionstate);
+unsigned struct_pppoediscparms_sz = sizeof(pppoediscparms);
+unsigned struct_priq_add_class_sz = sizeof(priq_add_class);
+unsigned struct_priq_add_filter_sz = sizeof(priq_add_filter);
+unsigned struct_priq_class_stats_sz = sizeof(priq_class_stats);
+unsigned struct_priq_delete_class_sz = sizeof(priq_delete_class);
+unsigned struct_priq_delete_filter_sz = sizeof(priq_delete_filter);
+unsigned struct_priq_interface_sz = sizeof(priq_interface);
+unsigned struct_priq_modify_class_sz = sizeof(priq_modify_class);
+unsigned struct_ptmget_sz = sizeof(ptmget);
+unsigned struct_radio_info_sz = sizeof(radio_info);
+unsigned struct_red_conf_sz = sizeof(red_conf);
+unsigned struct_red_interface_sz = sizeof(red_interface);
+unsigned struct_red_stats_sz = sizeof(red_stats);
+unsigned struct_redparams_sz = sizeof(redparams);
+unsigned struct_rf_pmparams_sz = sizeof(rf_pmparams);
+unsigned struct_rf_pmstat_sz = sizeof(rf_pmstat);
+unsigned struct_rf_recon_req_sz = sizeof(rf_recon_req);
+unsigned struct_rio_conf_sz = sizeof(rio_conf);
+unsigned struct_rio_interface_sz = sizeof(rio_interface);
+unsigned struct_rio_stats_sz = sizeof(rio_stats);
+unsigned struct_scan_io_sz = sizeof(scan_io);
+unsigned struct_scbusaccel_args_sz = sizeof(scbusaccel_args);
+unsigned struct_scbusiodetach_args_sz = sizeof(scbusiodetach_args);
+unsigned struct_scbusioscan_args_sz = sizeof(scbusioscan_args);
+unsigned struct_scsi_addr_sz = sizeof(scsi_addr);
+unsigned struct_seq_event_rec_sz = sizeof(seq_event_rec);
+unsigned struct_session_op_sz = sizeof(session_op);
+unsigned struct_sgttyb_sz = sizeof(sgttyb);
+unsigned struct_sioc_sg_req_sz = sizeof(sioc_sg_req);
+unsigned struct_sioc_vif_req_sz = sizeof(sioc_vif_req);
+unsigned struct_smbioc_flags_sz = sizeof(smbioc_flags);
+unsigned struct_smbioc_lookup_sz = sizeof(smbioc_lookup);
+unsigned struct_smbioc_oshare_sz = sizeof(smbioc_oshare);
+unsigned struct_smbioc_ossn_sz = sizeof(smbioc_ossn);
+unsigned struct_smbioc_rq_sz = sizeof(smbioc_rq);
+unsigned struct_smbioc_rw_sz = sizeof(smbioc_rw);
+unsigned struct_spppauthcfg_sz = sizeof(spppauthcfg);
+unsigned struct_spppauthfailuresettings_sz = sizeof(spppauthfailuresettings);
+unsigned struct_spppauthfailurestats_sz = sizeof(spppauthfailurestats);
+unsigned struct_spppdnsaddrs_sz = sizeof(spppdnsaddrs);
+unsigned struct_spppdnssettings_sz = sizeof(spppdnssettings);
+unsigned struct_spppidletimeout_sz = sizeof(spppidletimeout);
+unsigned struct_spppkeepalivesettings_sz = sizeof(spppkeepalivesettings);
+unsigned struct_sppplcpcfg_sz = sizeof(sppplcpcfg);
+unsigned struct_spppstatus_sz = sizeof(spppstatus);
+unsigned struct_spppstatusncp_sz = sizeof(spppstatusncp);
+unsigned struct_srt_rt_sz = sizeof(srt_rt);
+unsigned struct_stic_xinfo_sz = sizeof(stic_xinfo);
+unsigned struct_sun_dkctlr_sz = sizeof(sun_dkctlr);
+unsigned struct_sun_dkgeom_sz = sizeof(sun_dkgeom);
+unsigned struct_sun_dkpart_sz = sizeof(sun_dkpart);
+unsigned struct_synth_info_sz = sizeof(synth_info);
+unsigned struct_tbrreq_sz = sizeof(tbrreq);
+unsigned struct_tchars_sz = sizeof(tchars);
+unsigned struct_termios_sz = sizeof(termios);
+unsigned struct_timeval_sz = sizeof(timeval);
+unsigned struct_twe_drivecommand_sz = sizeof(twe_drivecommand);
+unsigned struct_twe_paramcommand_sz = sizeof(twe_paramcommand);
+unsigned struct_twe_usercommand_sz = sizeof(twe_usercommand);
+unsigned struct_ukyopon_identify_sz = sizeof(ukyopon_identify);
+unsigned struct_urio_command_sz = sizeof(urio_command);
+unsigned struct_usb_alt_interface_sz = sizeof(usb_alt_interface);
+unsigned struct_usb_bulk_ra_wb_opt_sz = sizeof(usb_bulk_ra_wb_opt);
+unsigned struct_usb_config_desc_sz = sizeof(usb_config_desc);
+unsigned struct_usb_ctl_report_desc_sz = sizeof(usb_ctl_report_desc);
+unsigned struct_usb_ctl_report_sz = sizeof(usb_ctl_report);
+unsigned struct_usb_ctl_request_sz = sizeof(usb_ctl_request);
+#if defined(__x86_64__)
+unsigned struct_nvmm_ioc_capability_sz = sizeof(nvmm_ioc_capability);
+unsigned struct_nvmm_ioc_machine_create_sz = sizeof(nvmm_ioc_machine_create);
+unsigned struct_nvmm_ioc_machine_destroy_sz = sizeof(nvmm_ioc_machine_destroy);
+unsigned struct_nvmm_ioc_machine_configure_sz =
+ sizeof(nvmm_ioc_machine_configure);
+unsigned struct_nvmm_ioc_vcpu_create_sz = sizeof(nvmm_ioc_vcpu_create);
+unsigned struct_nvmm_ioc_vcpu_destroy_sz = sizeof(nvmm_ioc_vcpu_destroy);
+unsigned struct_nvmm_ioc_vcpu_configure_sz = sizeof(nvmm_ioc_vcpu_configure);
+unsigned struct_nvmm_ioc_vcpu_setstate_sz = sizeof(nvmm_ioc_vcpu_destroy);
+unsigned struct_nvmm_ioc_vcpu_getstate_sz = sizeof(nvmm_ioc_vcpu_getstate);
+unsigned struct_nvmm_ioc_vcpu_inject_sz = sizeof(nvmm_ioc_vcpu_inject);
+unsigned struct_nvmm_ioc_vcpu_run_sz = sizeof(nvmm_ioc_vcpu_run);
+unsigned struct_nvmm_ioc_gpa_map_sz = sizeof(nvmm_ioc_gpa_map);
+unsigned struct_nvmm_ioc_gpa_unmap_sz = sizeof(nvmm_ioc_gpa_unmap);
+unsigned struct_nvmm_ioc_hva_map_sz = sizeof(nvmm_ioc_hva_map);
+unsigned struct_nvmm_ioc_hva_unmap_sz = sizeof(nvmm_ioc_hva_unmap);
+unsigned struct_nvmm_ioc_ctl_sz = sizeof(nvmm_ioc_ctl);
+#endif
+unsigned struct_spi_ioctl_configure_sz = sizeof(spi_ioctl_configure);
+unsigned struct_spi_ioctl_transfer_sz = sizeof(spi_ioctl_transfer);
+unsigned struct_autofs_daemon_request_sz = sizeof(autofs_daemon_request);
+unsigned struct_autofs_daemon_done_sz = sizeof(autofs_daemon_done);
+unsigned struct_sctp_connectx_addrs_sz = sizeof(sctp_connectx_addrs);
+unsigned struct_usb_device_info_old_sz = sizeof(usb_device_info_old);
+unsigned struct_usb_device_info_sz = sizeof(usb_device_info);
+unsigned struct_usb_device_stats_sz = sizeof(usb_device_stats);
+unsigned struct_usb_endpoint_desc_sz = sizeof(usb_endpoint_desc);
+unsigned struct_usb_full_desc_sz = sizeof(usb_full_desc);
+unsigned struct_usb_interface_desc_sz = sizeof(usb_interface_desc);
+unsigned struct_usb_string_desc_sz = sizeof(usb_string_desc);
+unsigned struct_utoppy_readfile_sz = sizeof(utoppy_readfile);
+unsigned struct_utoppy_rename_sz = sizeof(utoppy_rename);
+unsigned struct_utoppy_stats_sz = sizeof(utoppy_stats);
+unsigned struct_utoppy_writefile_sz = sizeof(utoppy_writefile);
+unsigned struct_v4l2_audio_sz = sizeof(v4l2_audio);
+unsigned struct_v4l2_audioout_sz = sizeof(v4l2_audioout);
+unsigned struct_v4l2_buffer_sz = sizeof(v4l2_buffer);
+unsigned struct_v4l2_capability_sz = sizeof(v4l2_capability);
+unsigned struct_v4l2_control_sz = sizeof(v4l2_control);
+unsigned struct_v4l2_crop_sz = sizeof(v4l2_crop);
+unsigned struct_v4l2_cropcap_sz = sizeof(v4l2_cropcap);
+unsigned struct_v4l2_fmtdesc_sz = sizeof(v4l2_fmtdesc);
+unsigned struct_v4l2_format_sz = sizeof(v4l2_format);
+unsigned struct_v4l2_framebuffer_sz = sizeof(v4l2_framebuffer);
+unsigned struct_v4l2_frequency_sz = sizeof(v4l2_frequency);
+unsigned struct_v4l2_frmivalenum_sz = sizeof(v4l2_frmivalenum);
+unsigned struct_v4l2_frmsizeenum_sz = sizeof(v4l2_frmsizeenum);
+unsigned struct_v4l2_input_sz = sizeof(v4l2_input);
+unsigned struct_v4l2_jpegcompression_sz = sizeof(v4l2_jpegcompression);
+unsigned struct_v4l2_modulator_sz = sizeof(v4l2_modulator);
+unsigned struct_v4l2_output_sz = sizeof(v4l2_output);
+unsigned struct_v4l2_queryctrl_sz = sizeof(v4l2_queryctrl);
+unsigned struct_v4l2_querymenu_sz = sizeof(v4l2_querymenu);
+unsigned struct_v4l2_requestbuffers_sz = sizeof(v4l2_requestbuffers);
+unsigned struct_v4l2_standard_sz = sizeof(v4l2_standard);
+unsigned struct_v4l2_streamparm_sz = sizeof(v4l2_streamparm);
+unsigned struct_v4l2_tuner_sz = sizeof(v4l2_tuner);
+unsigned struct_vnd_ioctl_sz = sizeof(vnd_ioctl);
+unsigned struct_vnd_user_sz = sizeof(vnd_user);
+unsigned struct_vt_stat_sz = sizeof(vt_stat);
+unsigned struct_wdog_conf_sz = sizeof(wdog_conf);
+unsigned struct_wdog_mode_sz = sizeof(wdog_mode);
+unsigned struct_ipmi_recv_sz = sizeof(ipmi_recv);
+unsigned struct_ipmi_req_sz = sizeof(ipmi_req);
+unsigned struct_ipmi_cmdspec_sz = sizeof(ipmi_cmdspec);
+unsigned struct_wfq_conf_sz = sizeof(wfq_conf);
+unsigned struct_wfq_getqid_sz = sizeof(wfq_getqid);
+unsigned struct_wfq_getstats_sz = sizeof(wfq_getstats);
+unsigned struct_wfq_interface_sz = sizeof(wfq_interface);
+unsigned struct_wfq_setweight_sz = sizeof(wfq_setweight);
+unsigned struct_winsize_sz = sizeof(winsize);
+unsigned struct_wscons_event_sz = sizeof(wscons_event);
+unsigned struct_wsdisplay_addscreendata_sz = sizeof(wsdisplay_addscreendata);
+unsigned struct_wsdisplay_char_sz = sizeof(wsdisplay_char);
+unsigned struct_wsdisplay_cmap_sz = sizeof(wsdisplay_cmap);
+unsigned struct_wsdisplay_curpos_sz = sizeof(wsdisplay_curpos);
+unsigned struct_wsdisplay_cursor_sz = sizeof(wsdisplay_cursor);
+unsigned struct_wsdisplay_delscreendata_sz = sizeof(wsdisplay_delscreendata);
+unsigned struct_wsdisplay_fbinfo_sz = sizeof(wsdisplay_fbinfo);
+unsigned struct_wsdisplay_font_sz = sizeof(wsdisplay_font);
+unsigned struct_wsdisplay_kbddata_sz = sizeof(wsdisplay_kbddata);
+unsigned struct_wsdisplay_msgattrs_sz = sizeof(wsdisplay_msgattrs);
+unsigned struct_wsdisplay_param_sz = sizeof(wsdisplay_param);
+unsigned struct_wsdisplay_scroll_data_sz = sizeof(wsdisplay_scroll_data);
+unsigned struct_wsdisplay_usefontdata_sz = sizeof(wsdisplay_usefontdata);
+unsigned struct_wsdisplayio_blit_sz = sizeof(wsdisplayio_blit);
+unsigned struct_wsdisplayio_bus_id_sz = sizeof(wsdisplayio_bus_id);
+unsigned struct_wsdisplayio_edid_info_sz = sizeof(wsdisplayio_edid_info);
+unsigned struct_wsdisplayio_fbinfo_sz = sizeof(wsdisplayio_fbinfo);
+unsigned struct_wskbd_bell_data_sz = sizeof(wskbd_bell_data);
+unsigned struct_wskbd_keyrepeat_data_sz = sizeof(wskbd_keyrepeat_data);
+unsigned struct_wskbd_map_data_sz = sizeof(wskbd_map_data);
+unsigned struct_wskbd_scroll_data_sz = sizeof(wskbd_scroll_data);
+unsigned struct_wsmouse_calibcoords_sz = sizeof(wsmouse_calibcoords);
+unsigned struct_wsmouse_id_sz = sizeof(wsmouse_id);
+unsigned struct_wsmouse_repeat_sz = sizeof(wsmouse_repeat);
+unsigned struct_wsmux_device_list_sz = sizeof(wsmux_device_list);
+unsigned struct_wsmux_device_sz = sizeof(wsmux_device);
+unsigned struct_xd_iocmd_sz = sizeof(xd_iocmd);
+
+unsigned struct_scsireq_sz = sizeof(struct scsireq);
+unsigned struct_tone_sz = sizeof(tone_t);
+unsigned union_twe_statrequest_sz = sizeof(union twe_statrequest);
+unsigned struct_usb_device_descriptor_sz = sizeof(usb_device_descriptor_t);
+unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
+unsigned struct__old_mixer_info_sz = sizeof(struct _old_mixer_info);
+unsigned struct__agp_allocate_sz = sizeof(struct _agp_allocate);
+unsigned struct__agp_bind_sz = sizeof(struct _agp_bind);
+unsigned struct__agp_info_sz = sizeof(struct _agp_info);
+unsigned struct__agp_setup_sz = sizeof(struct _agp_setup);
+unsigned struct__agp_unbind_sz = sizeof(struct _agp_unbind);
+unsigned struct_atareq_sz = sizeof(struct atareq);
+unsigned struct_cpustate_sz = sizeof(struct cpustate);
+unsigned struct_dmx_caps_sz = sizeof(struct dmx_caps);
+unsigned enum_dmx_source_sz = sizeof(dmx_source_t);
+unsigned union_dvd_authinfo_sz = sizeof(dvd_authinfo);
+unsigned union_dvd_struct_sz = sizeof(dvd_struct);
+unsigned enum_v4l2_priority_sz = sizeof(enum v4l2_priority);
+unsigned struct_envsys_basic_info_sz = sizeof(struct envsys_basic_info);
+unsigned struct_envsys_tre_data_sz = sizeof(struct envsys_tre_data);
+unsigned enum_fe_sec_mini_cmd_sz = sizeof(enum fe_sec_mini_cmd);
+unsigned enum_fe_sec_tone_mode_sz = sizeof(enum fe_sec_tone_mode);
+unsigned enum_fe_sec_voltage_sz = sizeof(enum fe_sec_voltage);
+unsigned enum_fe_status_sz = sizeof(enum fe_status);
+unsigned struct_gdt_ctrt_sz = sizeof(struct gdt_ctrt);
+unsigned struct_gdt_event_sz = sizeof(struct gdt_event);
+unsigned struct_gdt_osv_sz = sizeof(struct gdt_osv);
+unsigned struct_gdt_rescan_sz = sizeof(struct gdt_rescan);
+unsigned struct_gdt_statist_sz = sizeof(struct gdt_statist);
+unsigned struct_gdt_ucmd_sz = sizeof(struct gdt_ucmd);
+unsigned struct_iscsi_conn_status_parameters_sz =
+ sizeof(iscsi_conn_status_parameters_t);
+unsigned struct_iscsi_get_version_parameters_sz =
+ sizeof(iscsi_get_version_parameters_t);
+unsigned struct_iscsi_iocommand_parameters_sz =
+ sizeof(iscsi_iocommand_parameters_t);
+unsigned struct_iscsi_login_parameters_sz = sizeof(iscsi_login_parameters_t);
+unsigned struct_iscsi_logout_parameters_sz = sizeof(iscsi_logout_parameters_t);
+unsigned struct_iscsi_register_event_parameters_sz =
+ sizeof(iscsi_register_event_parameters_t);
+unsigned struct_iscsi_remove_parameters_sz = sizeof(iscsi_remove_parameters_t);
+unsigned struct_iscsi_send_targets_parameters_sz =
+ sizeof(iscsi_send_targets_parameters_t);
+unsigned struct_iscsi_set_node_name_parameters_sz =
+ sizeof(iscsi_set_node_name_parameters_t);
+unsigned struct_iscsi_wait_event_parameters_sz =
+ sizeof(iscsi_wait_event_parameters_t);
+unsigned struct_isp_stats_sz = sizeof(isp_stats_t);
+unsigned struct_lsenable_sz = sizeof(struct lsenable);
+unsigned struct_lsdisable_sz = sizeof(struct lsdisable);
+unsigned struct_audio_format_query_sz = sizeof(audio_format_query);
+unsigned struct_mixer_ctrl_sz = sizeof(struct mixer_ctrl);
+unsigned struct_mixer_devinfo_sz = sizeof(struct mixer_devinfo);
+unsigned struct_mpu_command_rec_sz = sizeof(mpu_command_rec);
+unsigned struct_rndstat_sz = sizeof(rndstat_t);
+unsigned struct_rndstat_name_sz = sizeof(rndstat_name_t);
+unsigned struct_rndctl_sz = sizeof(rndctl_t);
+unsigned struct_rnddata_sz = sizeof(rnddata_t);
+unsigned struct_rndpoolstat_sz = sizeof(rndpoolstat_t);
+unsigned struct_rndstat_est_sz = sizeof(rndstat_est_t);
+unsigned struct_rndstat_est_name_sz = sizeof(rndstat_est_name_t);
+unsigned struct_pps_params_sz = sizeof(pps_params_t);
+unsigned struct_pps_info_sz = sizeof(pps_info_t);
+unsigned struct_mixer_info_sz = sizeof(struct mixer_info);
+unsigned struct_RF_SparetWait_sz = sizeof(RF_SparetWait_t);
+unsigned struct_RF_ComponentLabel_sz = sizeof(RF_ComponentLabel_t);
+unsigned struct_RF_SingleComponent_sz = sizeof(RF_SingleComponent_t);
+unsigned struct_RF_ProgressInfo_sz = sizeof(RF_ProgressInfo_t);
+unsigned struct_nvlist_ref_sz = sizeof(struct __sanitizer_nvlist_ref_t);
+unsigned struct_StringList_sz = sizeof(StringList);
+
+const unsigned IOCTL_NOT_PRESENT = 0;
+
+unsigned IOCTL_AFM_ADDFMAP = AFM_ADDFMAP;
+unsigned IOCTL_AFM_DELFMAP = AFM_DELFMAP;
+unsigned IOCTL_AFM_CLEANFMAP = AFM_CLEANFMAP;
+unsigned IOCTL_AFM_GETFMAP = AFM_GETFMAP;
+unsigned IOCTL_ALTQGTYPE = ALTQGTYPE;
+unsigned IOCTL_ALTQTBRSET = ALTQTBRSET;
+unsigned IOCTL_ALTQTBRGET = ALTQTBRGET;
+unsigned IOCTL_BLUE_IF_ATTACH = BLUE_IF_ATTACH;
+unsigned IOCTL_BLUE_IF_DETACH = BLUE_IF_DETACH;
+unsigned IOCTL_BLUE_ENABLE = BLUE_ENABLE;
+unsigned IOCTL_BLUE_DISABLE = BLUE_DISABLE;
+unsigned IOCTL_BLUE_CONFIG = BLUE_CONFIG;
+unsigned IOCTL_BLUE_GETSTATS = BLUE_GETSTATS;
+unsigned IOCTL_CBQ_IF_ATTACH = CBQ_IF_ATTACH;
+unsigned IOCTL_CBQ_IF_DETACH = CBQ_IF_DETACH;
+unsigned IOCTL_CBQ_ENABLE = CBQ_ENABLE;
+unsigned IOCTL_CBQ_DISABLE = CBQ_DISABLE;
+unsigned IOCTL_CBQ_CLEAR_HIERARCHY = CBQ_CLEAR_HIERARCHY;
+unsigned IOCTL_CBQ_ADD_CLASS = CBQ_ADD_CLASS;
+unsigned IOCTL_CBQ_DEL_CLASS = CBQ_DEL_CLASS;
+unsigned IOCTL_CBQ_MODIFY_CLASS = CBQ_MODIFY_CLASS;
+unsigned IOCTL_CBQ_ADD_FILTER = CBQ_ADD_FILTER;
+unsigned IOCTL_CBQ_DEL_FILTER = CBQ_DEL_FILTER;
+unsigned IOCTL_CBQ_GETSTATS = CBQ_GETSTATS;
+unsigned IOCTL_CDNR_IF_ATTACH = CDNR_IF_ATTACH;
+unsigned IOCTL_CDNR_IF_DETACH = CDNR_IF_DETACH;
+unsigned IOCTL_CDNR_ENABLE = CDNR_ENABLE;
+unsigned IOCTL_CDNR_DISABLE = CDNR_DISABLE;
+unsigned IOCTL_CDNR_ADD_FILTER = CDNR_ADD_FILTER;
+unsigned IOCTL_CDNR_DEL_FILTER = CDNR_DEL_FILTER;
+unsigned IOCTL_CDNR_GETSTATS = CDNR_GETSTATS;
+unsigned IOCTL_CDNR_ADD_ELEM = CDNR_ADD_ELEM;
+unsigned IOCTL_CDNR_DEL_ELEM = CDNR_DEL_ELEM;
+unsigned IOCTL_CDNR_ADD_TBM = CDNR_ADD_TBM;
+unsigned IOCTL_CDNR_MOD_TBM = CDNR_MOD_TBM;
+unsigned IOCTL_CDNR_TBM_STATS = CDNR_TBM_STATS;
+unsigned IOCTL_CDNR_ADD_TCM = CDNR_ADD_TCM;
+unsigned IOCTL_CDNR_MOD_TCM = CDNR_MOD_TCM;
+unsigned IOCTL_CDNR_TCM_STATS = CDNR_TCM_STATS;
+unsigned IOCTL_CDNR_ADD_TSW = CDNR_ADD_TSW;
+unsigned IOCTL_CDNR_MOD_TSW = CDNR_MOD_TSW;
+unsigned IOCTL_FIFOQ_IF_ATTACH = FIFOQ_IF_ATTACH;
+unsigned IOCTL_FIFOQ_IF_DETACH = FIFOQ_IF_DETACH;
+unsigned IOCTL_FIFOQ_ENABLE = FIFOQ_ENABLE;
+unsigned IOCTL_FIFOQ_DISABLE = FIFOQ_DISABLE;
+unsigned IOCTL_FIFOQ_CONFIG = FIFOQ_CONFIG;
+unsigned IOCTL_FIFOQ_GETSTATS = FIFOQ_GETSTATS;
+unsigned IOCTL_HFSC_IF_ATTACH = HFSC_IF_ATTACH;
+unsigned IOCTL_HFSC_IF_DETACH = HFSC_IF_DETACH;
+unsigned IOCTL_HFSC_ENABLE = HFSC_ENABLE;
+unsigned IOCTL_HFSC_DISABLE = HFSC_DISABLE;
+unsigned IOCTL_HFSC_CLEAR_HIERARCHY = HFSC_CLEAR_HIERARCHY;
+unsigned IOCTL_HFSC_ADD_CLASS = HFSC_ADD_CLASS;
+unsigned IOCTL_HFSC_DEL_CLASS = HFSC_DEL_CLASS;
+unsigned IOCTL_HFSC_MOD_CLASS = HFSC_MOD_CLASS;
+unsigned IOCTL_HFSC_ADD_FILTER = HFSC_ADD_FILTER;
+unsigned IOCTL_HFSC_DEL_FILTER = HFSC_DEL_FILTER;
+unsigned IOCTL_HFSC_GETSTATS = HFSC_GETSTATS;
+unsigned IOCTL_JOBS_IF_ATTACH = JOBS_IF_ATTACH;
+unsigned IOCTL_JOBS_IF_DETACH = JOBS_IF_DETACH;
+unsigned IOCTL_JOBS_ENABLE = JOBS_ENABLE;
+unsigned IOCTL_JOBS_DISABLE = JOBS_DISABLE;
+unsigned IOCTL_JOBS_CLEAR = JOBS_CLEAR;
+unsigned IOCTL_JOBS_ADD_CLASS = JOBS_ADD_CLASS;
+unsigned IOCTL_JOBS_DEL_CLASS = JOBS_DEL_CLASS;
+unsigned IOCTL_JOBS_MOD_CLASS = JOBS_MOD_CLASS;
+unsigned IOCTL_JOBS_ADD_FILTER = JOBS_ADD_FILTER;
+unsigned IOCTL_JOBS_DEL_FILTER = JOBS_DEL_FILTER;
+unsigned IOCTL_JOBS_GETSTATS = JOBS_GETSTATS;
+unsigned IOCTL_PRIQ_IF_ATTACH = PRIQ_IF_ATTACH;
+unsigned IOCTL_PRIQ_IF_DETACH = PRIQ_IF_DETACH;
+unsigned IOCTL_PRIQ_ENABLE = PRIQ_ENABLE;
+unsigned IOCTL_PRIQ_DISABLE = PRIQ_DISABLE;
+unsigned IOCTL_PRIQ_CLEAR = PRIQ_CLEAR;
+unsigned IOCTL_PRIQ_ADD_CLASS = PRIQ_ADD_CLASS;
+unsigned IOCTL_PRIQ_DEL_CLASS = PRIQ_DEL_CLASS;
+unsigned IOCTL_PRIQ_MOD_CLASS = PRIQ_MOD_CLASS;
+unsigned IOCTL_PRIQ_ADD_FILTER = PRIQ_ADD_FILTER;
+unsigned IOCTL_PRIQ_DEL_FILTER = PRIQ_DEL_FILTER;
+unsigned IOCTL_PRIQ_GETSTATS = PRIQ_GETSTATS;
+unsigned IOCTL_RED_IF_ATTACH = RED_IF_ATTACH;
+unsigned IOCTL_RED_IF_DETACH = RED_IF_DETACH;
+unsigned IOCTL_RED_ENABLE = RED_ENABLE;
+unsigned IOCTL_RED_DISABLE = RED_DISABLE;
+unsigned IOCTL_RED_CONFIG = RED_CONFIG;
+unsigned IOCTL_RED_GETSTATS = RED_GETSTATS;
+unsigned IOCTL_RED_SETDEFAULTS = RED_SETDEFAULTS;
+unsigned IOCTL_RIO_IF_ATTACH = RIO_IF_ATTACH;
+unsigned IOCTL_RIO_IF_DETACH = RIO_IF_DETACH;
+unsigned IOCTL_RIO_ENABLE = RIO_ENABLE;
+unsigned IOCTL_RIO_DISABLE = RIO_DISABLE;
+unsigned IOCTL_RIO_CONFIG = RIO_CONFIG;
+unsigned IOCTL_RIO_GETSTATS = RIO_GETSTATS;
+unsigned IOCTL_RIO_SETDEFAULTS = RIO_SETDEFAULTS;
+unsigned IOCTL_WFQ_IF_ATTACH = WFQ_IF_ATTACH;
+unsigned IOCTL_WFQ_IF_DETACH = WFQ_IF_DETACH;
+unsigned IOCTL_WFQ_ENABLE = WFQ_ENABLE;
+unsigned IOCTL_WFQ_DISABLE = WFQ_DISABLE;
+unsigned IOCTL_WFQ_CONFIG = WFQ_CONFIG;
+unsigned IOCTL_WFQ_GET_STATS = WFQ_GET_STATS;
+unsigned IOCTL_WFQ_GET_QID = WFQ_GET_QID;
+unsigned IOCTL_WFQ_SET_WEIGHT = WFQ_SET_WEIGHT;
+unsigned IOCTL_CRIOGET = CRIOGET;
+unsigned IOCTL_CIOCFSESSION = CIOCFSESSION;
+unsigned IOCTL_CIOCKEY = CIOCKEY;
+unsigned IOCTL_CIOCNFKEYM = CIOCNFKEYM;
+unsigned IOCTL_CIOCNFSESSION = CIOCNFSESSION;
+unsigned IOCTL_CIOCNCRYPTRETM = CIOCNCRYPTRETM;
+unsigned IOCTL_CIOCNCRYPTRET = CIOCNCRYPTRET;
+unsigned IOCTL_CIOCGSESSION = CIOCGSESSION;
+unsigned IOCTL_CIOCNGSESSION = CIOCNGSESSION;
+unsigned IOCTL_CIOCCRYPT = CIOCCRYPT;
+unsigned IOCTL_CIOCNCRYPTM = CIOCNCRYPTM;
+unsigned IOCTL_CIOCASYMFEAT = CIOCASYMFEAT;
+unsigned IOCTL_APM_IOC_REJECT = APM_IOC_REJECT;
+unsigned IOCTL_APM_IOC_STANDBY = APM_IOC_STANDBY;
+unsigned IOCTL_APM_IOC_SUSPEND = APM_IOC_SUSPEND;
+unsigned IOCTL_OAPM_IOC_GETPOWER = OAPM_IOC_GETPOWER;
+unsigned IOCTL_APM_IOC_GETPOWER = APM_IOC_GETPOWER;
+unsigned IOCTL_APM_IOC_NEXTEVENT = APM_IOC_NEXTEVENT;
+unsigned IOCTL_APM_IOC_DEV_CTL = APM_IOC_DEV_CTL;
+unsigned IOCTL_NETBSD_DM_IOCTL = NETBSD_DM_IOCTL;
+unsigned IOCTL_DMIO_SETFUNC = DMIO_SETFUNC;
+unsigned IOCTL_DMX_START = DMX_START;
+unsigned IOCTL_DMX_STOP = DMX_STOP;
+unsigned IOCTL_DMX_SET_FILTER = DMX_SET_FILTER;
+unsigned IOCTL_DMX_SET_PES_FILTER = DMX_SET_PES_FILTER;
+unsigned IOCTL_DMX_SET_BUFFER_SIZE = DMX_SET_BUFFER_SIZE;
+unsigned IOCTL_DMX_GET_STC = DMX_GET_STC;
+unsigned IOCTL_DMX_ADD_PID = DMX_ADD_PID;
+unsigned IOCTL_DMX_REMOVE_PID = DMX_REMOVE_PID;
+unsigned IOCTL_DMX_GET_CAPS = DMX_GET_CAPS;
+unsigned IOCTL_DMX_SET_SOURCE = DMX_SET_SOURCE;
+unsigned IOCTL_FE_READ_STATUS = FE_READ_STATUS;
+unsigned IOCTL_FE_READ_BER = FE_READ_BER;
+unsigned IOCTL_FE_READ_SNR = FE_READ_SNR;
+unsigned IOCTL_FE_READ_SIGNAL_STRENGTH = FE_READ_SIGNAL_STRENGTH;
+unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS = FE_READ_UNCORRECTED_BLOCKS;
+unsigned IOCTL_FE_SET_FRONTEND = FE_SET_FRONTEND;
+unsigned IOCTL_FE_GET_FRONTEND = FE_GET_FRONTEND;
+unsigned IOCTL_FE_GET_EVENT = FE_GET_EVENT;
+unsigned IOCTL_FE_GET_INFO = FE_GET_INFO;
+unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD = FE_DISEQC_RESET_OVERLOAD;
+unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD = FE_DISEQC_SEND_MASTER_CMD;
+unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY = FE_DISEQC_RECV_SLAVE_REPLY;
+unsigned IOCTL_FE_DISEQC_SEND_BURST = FE_DISEQC_SEND_BURST;
+unsigned IOCTL_FE_SET_TONE = FE_SET_TONE;
+unsigned IOCTL_FE_SET_VOLTAGE = FE_SET_VOLTAGE;
+unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE = FE_ENABLE_HIGH_LNB_VOLTAGE;
+unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE = FE_SET_FRONTEND_TUNE_MODE;
+unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD = FE_DISHNETWORK_SEND_LEGACY_CMD;
+unsigned IOCTL_FILEMON_SET_FD = FILEMON_SET_FD;
+unsigned IOCTL_FILEMON_SET_PID = FILEMON_SET_PID;
+unsigned IOCTL_HDAUDIO_FGRP_INFO = HDAUDIO_FGRP_INFO;
+unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG = HDAUDIO_FGRP_GETCONFIG;
+unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG = HDAUDIO_FGRP_SETCONFIG;
+unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO = HDAUDIO_FGRP_WIDGET_INFO;
+unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO = HDAUDIO_FGRP_CODEC_INFO;
+unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO = HDAUDIO_AFG_WIDGET_INFO;
+unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO = HDAUDIO_AFG_CODEC_INFO;
+unsigned IOCTL_CEC_GET_PHYS_ADDR = CEC_GET_PHYS_ADDR;
+unsigned IOCTL_CEC_GET_LOG_ADDRS = CEC_GET_LOG_ADDRS;
+unsigned IOCTL_CEC_SET_LOG_ADDRS = CEC_SET_LOG_ADDRS;
+unsigned IOCTL_CEC_GET_VENDOR_ID = CEC_GET_VENDOR_ID;
+unsigned IOCTL_HPCFBIO_GCONF = HPCFBIO_GCONF;
+unsigned IOCTL_HPCFBIO_SCONF = HPCFBIO_SCONF;
+unsigned IOCTL_HPCFBIO_GDSPCONF = HPCFBIO_GDSPCONF;
+unsigned IOCTL_HPCFBIO_SDSPCONF = HPCFBIO_SDSPCONF;
+unsigned IOCTL_HPCFBIO_GOP = HPCFBIO_GOP;
+unsigned IOCTL_HPCFBIO_SOP = HPCFBIO_SOP;
+unsigned IOCTL_IOPIOCPT = IOPIOCPT;
+unsigned IOCTL_IOPIOCGLCT = IOPIOCGLCT;
+unsigned IOCTL_IOPIOCGSTATUS = IOPIOCGSTATUS;
+unsigned IOCTL_IOPIOCRECONFIG = IOPIOCRECONFIG;
+unsigned IOCTL_IOPIOCGTIDMAP = IOPIOCGTIDMAP;
+unsigned IOCTL_SIOCGATHSTATS = SIOCGATHSTATS;
+unsigned IOCTL_SIOCGATHDIAG = SIOCGATHDIAG;
+unsigned IOCTL_METEORCAPTUR = METEORCAPTUR;
+unsigned IOCTL_METEORCAPFRM = METEORCAPFRM;
+unsigned IOCTL_METEORSETGEO = METEORSETGEO;
+unsigned IOCTL_METEORGETGEO = METEORGETGEO;
+unsigned IOCTL_METEORSTATUS = METEORSTATUS;
+unsigned IOCTL_METEORSHUE = METEORSHUE;
+unsigned IOCTL_METEORGHUE = METEORGHUE;
+unsigned IOCTL_METEORSFMT = METEORSFMT;
+unsigned IOCTL_METEORGFMT = METEORGFMT;
+unsigned IOCTL_METEORSINPUT = METEORSINPUT;
+unsigned IOCTL_METEORGINPUT = METEORGINPUT;
+unsigned IOCTL_METEORSCHCV = METEORSCHCV;
+unsigned IOCTL_METEORGCHCV = METEORGCHCV;
+unsigned IOCTL_METEORSCOUNT = METEORSCOUNT;
+unsigned IOCTL_METEORGCOUNT = METEORGCOUNT;
+unsigned IOCTL_METEORSFPS = METEORSFPS;
+unsigned IOCTL_METEORGFPS = METEORGFPS;
+unsigned IOCTL_METEORSSIGNAL = METEORSSIGNAL;
+unsigned IOCTL_METEORGSIGNAL = METEORGSIGNAL;
+unsigned IOCTL_METEORSVIDEO = METEORSVIDEO;
+unsigned IOCTL_METEORGVIDEO = METEORGVIDEO;
+unsigned IOCTL_METEORSBRIG = METEORSBRIG;
+unsigned IOCTL_METEORGBRIG = METEORGBRIG;
+unsigned IOCTL_METEORSCSAT = METEORSCSAT;
+unsigned IOCTL_METEORGCSAT = METEORGCSAT;
+unsigned IOCTL_METEORSCONT = METEORSCONT;
+unsigned IOCTL_METEORGCONT = METEORGCONT;
+unsigned IOCTL_METEORSHWS = METEORSHWS;
+unsigned IOCTL_METEORGHWS = METEORGHWS;
+unsigned IOCTL_METEORSVWS = METEORSVWS;
+unsigned IOCTL_METEORGVWS = METEORGVWS;
+unsigned IOCTL_METEORSTS = METEORSTS;
+unsigned IOCTL_METEORGTS = METEORGTS;
+unsigned IOCTL_TVTUNER_SETCHNL = TVTUNER_SETCHNL;
+unsigned IOCTL_TVTUNER_GETCHNL = TVTUNER_GETCHNL;
+unsigned IOCTL_TVTUNER_SETTYPE = TVTUNER_SETTYPE;
+unsigned IOCTL_TVTUNER_GETTYPE = TVTUNER_GETTYPE;
+unsigned IOCTL_TVTUNER_GETSTATUS = TVTUNER_GETSTATUS;
+unsigned IOCTL_TVTUNER_SETFREQ = TVTUNER_SETFREQ;
+unsigned IOCTL_TVTUNER_GETFREQ = TVTUNER_GETFREQ;
+unsigned IOCTL_TVTUNER_SETAFC = TVTUNER_SETAFC;
+unsigned IOCTL_TVTUNER_GETAFC = TVTUNER_GETAFC;
+unsigned IOCTL_RADIO_SETMODE = RADIO_SETMODE;
+unsigned IOCTL_RADIO_GETMODE = RADIO_GETMODE;
+unsigned IOCTL_RADIO_SETFREQ = RADIO_SETFREQ;
+unsigned IOCTL_RADIO_GETFREQ = RADIO_GETFREQ;
+unsigned IOCTL_METEORSACTPIXFMT = METEORSACTPIXFMT;
+unsigned IOCTL_METEORGACTPIXFMT = METEORGACTPIXFMT;
+unsigned IOCTL_METEORGSUPPIXFMT = METEORGSUPPIXFMT;
+unsigned IOCTL_TVTUNER_GETCHNLSET = TVTUNER_GETCHNLSET;
+unsigned IOCTL_REMOTE_GETKEY = REMOTE_GETKEY;
+unsigned IOCTL_GDT_IOCTL_GENERAL = GDT_IOCTL_GENERAL;
+unsigned IOCTL_GDT_IOCTL_DRVERS = GDT_IOCTL_DRVERS;
+unsigned IOCTL_GDT_IOCTL_CTRTYPE = GDT_IOCTL_CTRTYPE;
+unsigned IOCTL_GDT_IOCTL_OSVERS = GDT_IOCTL_OSVERS;
+unsigned IOCTL_GDT_IOCTL_CTRCNT = GDT_IOCTL_CTRCNT;
+unsigned IOCTL_GDT_IOCTL_EVENT = GDT_IOCTL_EVENT;
+unsigned IOCTL_GDT_IOCTL_STATIST = GDT_IOCTL_STATIST;
+unsigned IOCTL_GDT_IOCTL_RESCAN = GDT_IOCTL_RESCAN;
+unsigned IOCTL_ISP_SDBLEV = ISP_SDBLEV;
+unsigned IOCTL_ISP_RESETHBA = ISP_RESETHBA;
+unsigned IOCTL_ISP_RESCAN = ISP_RESCAN;
+unsigned IOCTL_ISP_SETROLE = ISP_SETROLE;
+unsigned IOCTL_ISP_GETROLE = ISP_GETROLE;
+unsigned IOCTL_ISP_GET_STATS = ISP_GET_STATS;
+unsigned IOCTL_ISP_CLR_STATS = ISP_CLR_STATS;
+unsigned IOCTL_ISP_FC_LIP = ISP_FC_LIP;
+unsigned IOCTL_ISP_FC_GETDINFO = ISP_FC_GETDINFO;
+unsigned IOCTL_ISP_GET_FW_CRASH_DUMP = ISP_GET_FW_CRASH_DUMP;
+unsigned IOCTL_ISP_FORCE_CRASH_DUMP = ISP_FORCE_CRASH_DUMP;
+unsigned IOCTL_ISP_FC_GETHINFO = ISP_FC_GETHINFO;
+unsigned IOCTL_ISP_TSK_MGMT = ISP_TSK_MGMT;
+unsigned IOCTL_ISP_FC_GETDLIST = ISP_FC_GETDLIST;
+unsigned IOCTL_MLXD_STATUS = MLXD_STATUS;
+unsigned IOCTL_MLXD_CHECKASYNC = MLXD_CHECKASYNC;
+unsigned IOCTL_MLXD_DETACH = MLXD_DETACH;
+unsigned IOCTL_MLX_RESCAN_DRIVES = MLX_RESCAN_DRIVES;
+unsigned IOCTL_MLX_PAUSE_CHANNEL = MLX_PAUSE_CHANNEL;
+unsigned IOCTL_MLX_COMMAND = MLX_COMMAND;
+unsigned IOCTL_MLX_REBUILDASYNC = MLX_REBUILDASYNC;
+unsigned IOCTL_MLX_REBUILDSTAT = MLX_REBUILDSTAT;
+unsigned IOCTL_MLX_GET_SYSDRIVE = MLX_GET_SYSDRIVE;
+unsigned IOCTL_MLX_GET_CINFO = MLX_GET_CINFO;
+unsigned IOCTL_NVME_PASSTHROUGH_CMD = NVME_PASSTHROUGH_CMD;
+unsigned IOCTL_FWCFGIO_SET_INDEX = FWCFGIO_SET_INDEX;
+unsigned IOCTL_IRDA_RESET_PARAMS = IRDA_RESET_PARAMS;
+unsigned IOCTL_IRDA_SET_PARAMS = IRDA_SET_PARAMS;
+unsigned IOCTL_IRDA_GET_SPEEDMASK = IRDA_GET_SPEEDMASK;
+unsigned IOCTL_IRDA_GET_TURNAROUNDMASK = IRDA_GET_TURNAROUNDMASK;
+unsigned IOCTL_IRFRAMETTY_GET_DEVICE = IRFRAMETTY_GET_DEVICE;
+unsigned IOCTL_IRFRAMETTY_GET_DONGLE = IRFRAMETTY_GET_DONGLE;
+unsigned IOCTL_IRFRAMETTY_SET_DONGLE = IRFRAMETTY_SET_DONGLE;
+unsigned IOCTL_ISV_CMD = ISV_CMD;
+unsigned IOCTL_WTQICMD = WTQICMD;
+unsigned IOCTL_ISCSI_GET_VERSION = ISCSI_GET_VERSION;
+unsigned IOCTL_ISCSI_LOGIN = ISCSI_LOGIN;
+unsigned IOCTL_ISCSI_LOGOUT = ISCSI_LOGOUT;
+unsigned IOCTL_ISCSI_ADD_CONNECTION = ISCSI_ADD_CONNECTION;
+unsigned IOCTL_ISCSI_RESTORE_CONNECTION = ISCSI_RESTORE_CONNECTION;
+unsigned IOCTL_ISCSI_REMOVE_CONNECTION = ISCSI_REMOVE_CONNECTION;
+unsigned IOCTL_ISCSI_CONNECTION_STATUS = ISCSI_CONNECTION_STATUS;
+unsigned IOCTL_ISCSI_SEND_TARGETS = ISCSI_SEND_TARGETS;
+unsigned IOCTL_ISCSI_SET_NODE_NAME = ISCSI_SET_NODE_NAME;
+unsigned IOCTL_ISCSI_IO_COMMAND = ISCSI_IO_COMMAND;
+unsigned IOCTL_ISCSI_REGISTER_EVENT = ISCSI_REGISTER_EVENT;
+unsigned IOCTL_ISCSI_DEREGISTER_EVENT = ISCSI_DEREGISTER_EVENT;
+unsigned IOCTL_ISCSI_WAIT_EVENT = ISCSI_WAIT_EVENT;
+unsigned IOCTL_ISCSI_POLL_EVENT = ISCSI_POLL_EVENT;
+unsigned IOCTL_OFIOCGET = OFIOCGET;
+unsigned IOCTL_OFIOCSET = OFIOCSET;
+unsigned IOCTL_OFIOCNEXTPROP = OFIOCNEXTPROP;
+unsigned IOCTL_OFIOCGETOPTNODE = OFIOCGETOPTNODE;
+unsigned IOCTL_OFIOCGETNEXT = OFIOCGETNEXT;
+unsigned IOCTL_OFIOCGETCHILD = OFIOCGETCHILD;
+unsigned IOCTL_OFIOCFINDDEVICE = OFIOCFINDDEVICE;
+unsigned IOCTL_AMR_IO_VERSION = AMR_IO_VERSION;
+unsigned IOCTL_AMR_IO_COMMAND = AMR_IO_COMMAND;
+unsigned IOCTL_MLYIO_COMMAND = MLYIO_COMMAND;
+unsigned IOCTL_MLYIO_HEALTH = MLYIO_HEALTH;
+unsigned IOCTL_PCI_IOC_CFGREAD = PCI_IOC_CFGREAD;
+unsigned IOCTL_PCI_IOC_CFGWRITE = PCI_IOC_CFGWRITE;
+unsigned IOCTL_PCI_IOC_BDF_CFGREAD = PCI_IOC_BDF_CFGREAD;
+unsigned IOCTL_PCI_IOC_BDF_CFGWRITE = PCI_IOC_BDF_CFGWRITE;
+unsigned IOCTL_PCI_IOC_BUSINFO = PCI_IOC_BUSINFO;
+unsigned IOCTL_PCI_IOC_DRVNAME = PCI_IOC_DRVNAME;
+unsigned IOCTL_PCI_IOC_DRVNAMEONBUS = PCI_IOC_DRVNAMEONBUS;
+unsigned IOCTL_TWEIO_COMMAND = TWEIO_COMMAND;
+unsigned IOCTL_TWEIO_STATS = TWEIO_STATS;
+unsigned IOCTL_TWEIO_AEN_POLL = TWEIO_AEN_POLL;
+unsigned IOCTL_TWEIO_AEN_WAIT = TWEIO_AEN_WAIT;
+unsigned IOCTL_TWEIO_SET_PARAM = TWEIO_SET_PARAM;
+unsigned IOCTL_TWEIO_GET_PARAM = TWEIO_GET_PARAM;
+unsigned IOCTL_TWEIO_RESET = TWEIO_RESET;
+unsigned IOCTL_TWEIO_ADD_UNIT = TWEIO_ADD_UNIT;
+unsigned IOCTL_TWEIO_DEL_UNIT = TWEIO_DEL_UNIT;
+unsigned IOCTL_SIOCSCNWDOMAIN = SIOCSCNWDOMAIN;
+unsigned IOCTL_SIOCGCNWDOMAIN = SIOCGCNWDOMAIN;
+unsigned IOCTL_SIOCSCNWKEY = SIOCSCNWKEY;
+unsigned IOCTL_SIOCGCNWSTATUS = SIOCGCNWSTATUS;
+unsigned IOCTL_SIOCGCNWSTATS = SIOCGCNWSTATS;
+unsigned IOCTL_SIOCGCNWTRAIL = SIOCGCNWTRAIL;
+unsigned IOCTL_SIOCGRAYSIGLEV = SIOCGRAYSIGLEV;
+unsigned IOCTL_RAIDFRAME_SHUTDOWN = RAIDFRAME_SHUTDOWN;
+unsigned IOCTL_RAIDFRAME_TUR = RAIDFRAME_TUR;
+unsigned IOCTL_RAIDFRAME_FAIL_DISK = RAIDFRAME_FAIL_DISK;
+unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS = RAIDFRAME_CHECK_RECON_STATUS;
+unsigned IOCTL_RAIDFRAME_REWRITEPARITY = RAIDFRAME_REWRITEPARITY;
+unsigned IOCTL_RAIDFRAME_COPYBACK = RAIDFRAME_COPYBACK;
+unsigned IOCTL_RAIDFRAME_SPARET_WAIT = RAIDFRAME_SPARET_WAIT;
+unsigned IOCTL_RAIDFRAME_SEND_SPARET = RAIDFRAME_SEND_SPARET;
+unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT = RAIDFRAME_ABORT_SPARET_WAIT;
+unsigned IOCTL_RAIDFRAME_START_ATRACE = RAIDFRAME_START_ATRACE;
+unsigned IOCTL_RAIDFRAME_STOP_ATRACE = RAIDFRAME_STOP_ATRACE;
+unsigned IOCTL_RAIDFRAME_GET_SIZE = RAIDFRAME_GET_SIZE;
+unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS = RAIDFRAME_RESET_ACCTOTALS;
+unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS = RAIDFRAME_KEEP_ACCTOTALS;
+unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL = RAIDFRAME_GET_COMPONENT_LABEL;
+unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL = RAIDFRAME_SET_COMPONENT_LABEL;
+unsigned IOCTL_RAIDFRAME_INIT_LABELS = RAIDFRAME_INIT_LABELS;
+unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE = RAIDFRAME_ADD_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE = RAIDFRAME_REMOVE_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE = RAIDFRAME_REBUILD_IN_PLACE;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITY = RAIDFRAME_CHECK_PARITY;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS =
+ RAIDFRAME_CHECK_PARITYREWRITE_STATUS;
+unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS =
+ RAIDFRAME_CHECK_COPYBACK_STATUS;
+unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG = RAIDFRAME_SET_AUTOCONFIG;
+unsigned IOCTL_RAIDFRAME_SET_ROOT = RAIDFRAME_SET_ROOT;
+unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT = RAIDFRAME_DELETE_COMPONENT;
+unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE =
+ RAIDFRAME_INCORPORATE_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT =
+ RAIDFRAME_CHECK_RECON_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT =
+ RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT =
+ RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CONFIGURE = RAIDFRAME_CONFIGURE;
+unsigned IOCTL_RAIDFRAME_GET_INFO = RAIDFRAME_GET_INFO;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS = RAIDFRAME_PARITYMAP_STATUS;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE =
+ RAIDFRAME_PARITYMAP_GET_DISABLE;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE =
+ RAIDFRAME_PARITYMAP_SET_DISABLE;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS = RAIDFRAME_PARITYMAP_SET_PARAMS;
+unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT = RAIDFRAME_SET_LAST_UNIT;
+unsigned IOCTL_MBPPIOCSPARAM = MBPPIOCSPARAM;
+unsigned IOCTL_MBPPIOCGPARAM = MBPPIOCGPARAM;
+unsigned IOCTL_MBPPIOCGSTAT = MBPPIOCGSTAT;
+unsigned IOCTL_SESIOC_GETNOBJ = SESIOC_GETNOBJ;
+unsigned IOCTL_SESIOC_GETOBJMAP = SESIOC_GETOBJMAP;
+unsigned IOCTL_SESIOC_GETENCSTAT = SESIOC_GETENCSTAT;
+unsigned IOCTL_SESIOC_SETENCSTAT = SESIOC_SETENCSTAT;
+unsigned IOCTL_SESIOC_GETOBJSTAT = SESIOC_GETOBJSTAT;
+unsigned IOCTL_SESIOC_SETOBJSTAT = SESIOC_SETOBJSTAT;
+unsigned IOCTL_SESIOC_GETTEXT = SESIOC_GETTEXT;
+unsigned IOCTL_SESIOC_INIT = SESIOC_INIT;
+unsigned IOCTL_SUN_DKIOCGGEOM = SUN_DKIOCGGEOM;
+unsigned IOCTL_SUN_DKIOCINFO = SUN_DKIOCINFO;
+unsigned IOCTL_SUN_DKIOCGPART = SUN_DKIOCGPART;
+unsigned IOCTL_FBIOGTYPE = FBIOGTYPE;
+unsigned IOCTL_FBIOPUTCMAP = FBIOPUTCMAP;
+unsigned IOCTL_FBIOGETCMAP = FBIOGETCMAP;
+unsigned IOCTL_FBIOGATTR = FBIOGATTR;
+unsigned IOCTL_FBIOSVIDEO = FBIOSVIDEO;
+unsigned IOCTL_FBIOGVIDEO = FBIOGVIDEO;
+unsigned IOCTL_FBIOSCURSOR = FBIOSCURSOR;
+unsigned IOCTL_FBIOGCURSOR = FBIOGCURSOR;
+unsigned IOCTL_FBIOSCURPOS = FBIOSCURPOS;
+unsigned IOCTL_FBIOGCURPOS = FBIOGCURPOS;
+unsigned IOCTL_FBIOGCURMAX = FBIOGCURMAX;
+unsigned IOCTL_KIOCTRANS = KIOCTRANS;
+unsigned IOCTL_KIOCSETKEY = KIOCSETKEY;
+unsigned IOCTL_KIOCGETKEY = KIOCGETKEY;
+unsigned IOCTL_KIOCGTRANS = KIOCGTRANS;
+unsigned IOCTL_KIOCCMD = KIOCCMD;
+unsigned IOCTL_KIOCTYPE = KIOCTYPE;
+unsigned IOCTL_KIOCSDIRECT = KIOCSDIRECT;
+unsigned IOCTL_KIOCSKEY = KIOCSKEY;
+unsigned IOCTL_KIOCGKEY = KIOCGKEY;
+unsigned IOCTL_KIOCSLED = KIOCSLED;
+unsigned IOCTL_KIOCGLED = KIOCGLED;
+unsigned IOCTL_KIOCLAYOUT = KIOCLAYOUT;
+unsigned IOCTL_VUIDSFORMAT = VUIDSFORMAT;
+unsigned IOCTL_VUIDGFORMAT = VUIDGFORMAT;
+unsigned IOCTL_STICIO_GXINFO = STICIO_GXINFO;
+unsigned IOCTL_STICIO_RESET = STICIO_RESET;
+unsigned IOCTL_STICIO_STARTQ = STICIO_STARTQ;
+unsigned IOCTL_STICIO_STOPQ = STICIO_STOPQ;
+unsigned IOCTL_UKYOPON_IDENTIFY = UKYOPON_IDENTIFY;
+unsigned IOCTL_URIO_SEND_COMMAND = URIO_SEND_COMMAND;
+unsigned IOCTL_URIO_RECV_COMMAND = URIO_RECV_COMMAND;
+unsigned IOCTL_USB_REQUEST = USB_REQUEST;
+unsigned IOCTL_USB_SETDEBUG = USB_SETDEBUG;
+unsigned IOCTL_USB_DISCOVER = USB_DISCOVER;
+unsigned IOCTL_USB_DEVICEINFO = USB_DEVICEINFO;
+unsigned IOCTL_USB_DEVICEINFO_OLD = USB_DEVICEINFO_OLD;
+unsigned IOCTL_USB_DEVICESTATS = USB_DEVICESTATS;
+unsigned IOCTL_USB_GET_REPORT_DESC = USB_GET_REPORT_DESC;
+unsigned IOCTL_USB_SET_IMMED = USB_SET_IMMED;
+unsigned IOCTL_USB_GET_REPORT = USB_GET_REPORT;
+unsigned IOCTL_USB_SET_REPORT = USB_SET_REPORT;
+unsigned IOCTL_USB_GET_REPORT_ID = USB_GET_REPORT_ID;
+unsigned IOCTL_USB_GET_CONFIG = USB_GET_CONFIG;
+unsigned IOCTL_USB_SET_CONFIG = USB_SET_CONFIG;
+unsigned IOCTL_USB_GET_ALTINTERFACE = USB_GET_ALTINTERFACE;
+unsigned IOCTL_USB_SET_ALTINTERFACE = USB_SET_ALTINTERFACE;
+unsigned IOCTL_USB_GET_NO_ALT = USB_GET_NO_ALT;
+unsigned IOCTL_USB_GET_DEVICE_DESC = USB_GET_DEVICE_DESC;
+unsigned IOCTL_USB_GET_CONFIG_DESC = USB_GET_CONFIG_DESC;
+unsigned IOCTL_USB_GET_INTERFACE_DESC = USB_GET_INTERFACE_DESC;
+unsigned IOCTL_USB_GET_ENDPOINT_DESC = USB_GET_ENDPOINT_DESC;
+unsigned IOCTL_USB_GET_FULL_DESC = USB_GET_FULL_DESC;
+unsigned IOCTL_USB_GET_STRING_DESC = USB_GET_STRING_DESC;
+unsigned IOCTL_USB_DO_REQUEST = USB_DO_REQUEST;
+unsigned IOCTL_USB_GET_DEVICEINFO = USB_GET_DEVICEINFO;
+unsigned IOCTL_USB_GET_DEVICEINFO_OLD = USB_GET_DEVICEINFO_OLD;
+unsigned IOCTL_USB_SET_SHORT_XFER = USB_SET_SHORT_XFER;
+unsigned IOCTL_USB_SET_TIMEOUT = USB_SET_TIMEOUT;
+unsigned IOCTL_USB_SET_BULK_RA = USB_SET_BULK_RA;
+unsigned IOCTL_USB_SET_BULK_WB = USB_SET_BULK_WB;
+unsigned IOCTL_USB_SET_BULK_RA_OPT = USB_SET_BULK_RA_OPT;
+unsigned IOCTL_USB_SET_BULK_WB_OPT = USB_SET_BULK_WB_OPT;
+unsigned IOCTL_USB_GET_CM_OVER_DATA = USB_GET_CM_OVER_DATA;
+unsigned IOCTL_USB_SET_CM_OVER_DATA = USB_SET_CM_OVER_DATA;
+unsigned IOCTL_UTOPPYIOTURBO = UTOPPYIOTURBO;
+unsigned IOCTL_UTOPPYIOCANCEL = UTOPPYIOCANCEL;
+unsigned IOCTL_UTOPPYIOREBOOT = UTOPPYIOREBOOT;
+unsigned IOCTL_UTOPPYIOSTATS = UTOPPYIOSTATS;
+unsigned IOCTL_UTOPPYIORENAME = UTOPPYIORENAME;
+unsigned IOCTL_UTOPPYIOMKDIR = UTOPPYIOMKDIR;
+unsigned IOCTL_UTOPPYIODELETE = UTOPPYIODELETE;
+unsigned IOCTL_UTOPPYIOREADDIR = UTOPPYIOREADDIR;
+unsigned IOCTL_UTOPPYIOREADFILE = UTOPPYIOREADFILE;
+unsigned IOCTL_UTOPPYIOWRITEFILE = UTOPPYIOWRITEFILE;
+unsigned IOCTL_DIOSXDCMD = DIOSXDCMD;
+unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+unsigned IOCTL_VT_GETACTIVE = VT_GETACTIVE;
+unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+unsigned IOCTL_KDGETKBENT = KDGETKBENT;
+unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+unsigned IOCTL_KDMKTONE = KDMKTONE;
+unsigned IOCTL_KDSETMODE = KDSETMODE;
+unsigned IOCTL_KDENABIO = KDENABIO;
+unsigned IOCTL_KDDISABIO = KDDISABIO;
+unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+unsigned IOCTL_KDGETLED = KDGETLED;
+unsigned IOCTL_KDSETLED = KDSETLED;
+unsigned IOCTL_KDSETRAD = KDSETRAD;
+unsigned IOCTL_VGAPCVTID = VGAPCVTID;
+unsigned IOCTL_CONS_GETVERS = CONS_GETVERS;
+unsigned IOCTL_WSKBDIO_GTYPE = WSKBDIO_GTYPE;
+unsigned IOCTL_WSKBDIO_BELL = WSKBDIO_BELL;
+unsigned IOCTL_WSKBDIO_COMPLEXBELL = WSKBDIO_COMPLEXBELL;
+unsigned IOCTL_WSKBDIO_SETBELL = WSKBDIO_SETBELL;
+unsigned IOCTL_WSKBDIO_GETBELL = WSKBDIO_GETBELL;
+unsigned IOCTL_WSKBDIO_SETDEFAULTBELL = WSKBDIO_SETDEFAULTBELL;
+unsigned IOCTL_WSKBDIO_GETDEFAULTBELL = WSKBDIO_GETDEFAULTBELL;
+unsigned IOCTL_WSKBDIO_SETKEYREPEAT = WSKBDIO_SETKEYREPEAT;
+unsigned IOCTL_WSKBDIO_GETKEYREPEAT = WSKBDIO_GETKEYREPEAT;
+unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT = WSKBDIO_SETDEFAULTKEYREPEAT;
+unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT = WSKBDIO_GETDEFAULTKEYREPEAT;
+unsigned IOCTL_WSKBDIO_SETLEDS = WSKBDIO_SETLEDS;
+unsigned IOCTL_WSKBDIO_GETLEDS = WSKBDIO_GETLEDS;
+unsigned IOCTL_WSKBDIO_GETMAP = WSKBDIO_GETMAP;
+unsigned IOCTL_WSKBDIO_SETMAP = WSKBDIO_SETMAP;
+unsigned IOCTL_WSKBDIO_GETENCODING = WSKBDIO_GETENCODING;
+unsigned IOCTL_WSKBDIO_SETENCODING = WSKBDIO_SETENCODING;
+unsigned IOCTL_WSKBDIO_SETMODE = WSKBDIO_SETMODE;
+unsigned IOCTL_WSKBDIO_GETMODE = WSKBDIO_GETMODE;
+unsigned IOCTL_WSKBDIO_SETKEYCLICK = WSKBDIO_SETKEYCLICK;
+unsigned IOCTL_WSKBDIO_GETKEYCLICK = WSKBDIO_GETKEYCLICK;
+unsigned IOCTL_WSKBDIO_GETSCROLL = WSKBDIO_GETSCROLL;
+unsigned IOCTL_WSKBDIO_SETSCROLL = WSKBDIO_SETSCROLL;
+unsigned IOCTL_WSKBDIO_SETVERSION = WSKBDIO_SETVERSION;
+unsigned IOCTL_WSMOUSEIO_GTYPE = WSMOUSEIO_GTYPE;
+unsigned IOCTL_WSMOUSEIO_SRES = WSMOUSEIO_SRES;
+unsigned IOCTL_WSMOUSEIO_SSCALE = WSMOUSEIO_SSCALE;
+unsigned IOCTL_WSMOUSEIO_SRATE = WSMOUSEIO_SRATE;
+unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS = WSMOUSEIO_SCALIBCOORDS;
+unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS = WSMOUSEIO_GCALIBCOORDS;
+unsigned IOCTL_WSMOUSEIO_GETID = WSMOUSEIO_GETID;
+unsigned IOCTL_WSMOUSEIO_GETREPEAT = WSMOUSEIO_GETREPEAT;
+unsigned IOCTL_WSMOUSEIO_SETREPEAT = WSMOUSEIO_SETREPEAT;
+unsigned IOCTL_WSMOUSEIO_SETVERSION = WSMOUSEIO_SETVERSION;
+unsigned IOCTL_WSDISPLAYIO_GTYPE = WSDISPLAYIO_GTYPE;
+unsigned IOCTL_WSDISPLAYIO_GINFO = WSDISPLAYIO_GINFO;
+unsigned IOCTL_WSDISPLAYIO_GETCMAP = WSDISPLAYIO_GETCMAP;
+unsigned IOCTL_WSDISPLAYIO_PUTCMAP = WSDISPLAYIO_PUTCMAP;
+unsigned IOCTL_WSDISPLAYIO_GVIDEO = WSDISPLAYIO_GVIDEO;
+unsigned IOCTL_WSDISPLAYIO_SVIDEO = WSDISPLAYIO_SVIDEO;
+unsigned IOCTL_WSDISPLAYIO_GCURPOS = WSDISPLAYIO_GCURPOS;
+unsigned IOCTL_WSDISPLAYIO_SCURPOS = WSDISPLAYIO_SCURPOS;
+unsigned IOCTL_WSDISPLAYIO_GCURMAX = WSDISPLAYIO_GCURMAX;
+unsigned IOCTL_WSDISPLAYIO_GCURSOR = WSDISPLAYIO_GCURSOR;
+unsigned IOCTL_WSDISPLAYIO_SCURSOR = WSDISPLAYIO_SCURSOR;
+unsigned IOCTL_WSDISPLAYIO_GMODE = WSDISPLAYIO_GMODE;
+unsigned IOCTL_WSDISPLAYIO_SMODE = WSDISPLAYIO_SMODE;
+unsigned IOCTL_WSDISPLAYIO_LDFONT = WSDISPLAYIO_LDFONT;
+unsigned IOCTL_WSDISPLAYIO_ADDSCREEN = WSDISPLAYIO_ADDSCREEN;
+unsigned IOCTL_WSDISPLAYIO_DELSCREEN = WSDISPLAYIO_DELSCREEN;
+unsigned IOCTL_WSDISPLAYIO_SFONT = WSDISPLAYIO_SFONT;
+unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD = _O_WSDISPLAYIO_SETKEYBOARD;
+unsigned IOCTL_WSDISPLAYIO_GETPARAM = WSDISPLAYIO_GETPARAM;
+unsigned IOCTL_WSDISPLAYIO_SETPARAM = WSDISPLAYIO_SETPARAM;
+unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN = WSDISPLAYIO_GETACTIVESCREEN;
+unsigned IOCTL_WSDISPLAYIO_GETWSCHAR = WSDISPLAYIO_GETWSCHAR;
+unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR = WSDISPLAYIO_PUTWSCHAR;
+unsigned IOCTL_WSDISPLAYIO_DGSCROLL = WSDISPLAYIO_DGSCROLL;
+unsigned IOCTL_WSDISPLAYIO_DSSCROLL = WSDISPLAYIO_DSSCROLL;
+unsigned IOCTL_WSDISPLAYIO_GMSGATTRS = WSDISPLAYIO_GMSGATTRS;
+unsigned IOCTL_WSDISPLAYIO_SMSGATTRS = WSDISPLAYIO_SMSGATTRS;
+unsigned IOCTL_WSDISPLAYIO_GBORDER = WSDISPLAYIO_GBORDER;
+unsigned IOCTL_WSDISPLAYIO_SBORDER = WSDISPLAYIO_SBORDER;
+unsigned IOCTL_WSDISPLAYIO_SSPLASH = WSDISPLAYIO_SSPLASH;
+unsigned IOCTL_WSDISPLAYIO_SPROGRESS = WSDISPLAYIO_SPROGRESS;
+unsigned IOCTL_WSDISPLAYIO_LINEBYTES = WSDISPLAYIO_LINEBYTES;
+unsigned IOCTL_WSDISPLAYIO_SETVERSION = WSDISPLAYIO_SETVERSION;
+unsigned IOCTL_WSMUXIO_ADD_DEVICE = WSMUXIO_ADD_DEVICE;
+unsigned IOCTL_WSMUXIO_REMOVE_DEVICE = WSMUXIO_REMOVE_DEVICE;
+unsigned IOCTL_WSMUXIO_LIST_DEVICES = WSMUXIO_LIST_DEVICES;
+unsigned IOCTL_WSMUXIO_INJECTEVENT = WSMUXIO_INJECTEVENT;
+unsigned IOCTL_WSDISPLAYIO_GET_BUSID = WSDISPLAYIO_GET_BUSID;
+unsigned IOCTL_WSDISPLAYIO_GET_EDID = WSDISPLAYIO_GET_EDID;
+unsigned IOCTL_WSDISPLAYIO_SET_POLLING = WSDISPLAYIO_SET_POLLING;
+unsigned IOCTL_WSDISPLAYIO_GET_FBINFO = WSDISPLAYIO_GET_FBINFO;
+unsigned IOCTL_WSDISPLAYIO_DOBLIT = WSDISPLAYIO_DOBLIT;
+unsigned IOCTL_WSDISPLAYIO_WAITBLIT = WSDISPLAYIO_WAITBLIT;
+unsigned IOCTL_BIOCLOCATE = BIOCLOCATE;
+unsigned IOCTL_BIOCINQ = BIOCINQ;
+unsigned IOCTL_BIOCDISK_NOVOL = BIOCDISK_NOVOL;
+unsigned IOCTL_BIOCDISK = BIOCDISK;
+unsigned IOCTL_BIOCVOL = BIOCVOL;
+unsigned IOCTL_BIOCALARM = BIOCALARM;
+unsigned IOCTL_BIOCBLINK = BIOCBLINK;
+unsigned IOCTL_BIOCSETSTATE = BIOCSETSTATE;
+unsigned IOCTL_BIOCVOLOPS = BIOCVOLOPS;
+unsigned IOCTL_MD_GETCONF = MD_GETCONF;
+unsigned IOCTL_MD_SETCONF = MD_SETCONF;
+unsigned IOCTL_CCDIOCSET = CCDIOCSET;
+unsigned IOCTL_CCDIOCCLR = CCDIOCCLR;
+unsigned IOCTL_CGDIOCSET = CGDIOCSET;
+unsigned IOCTL_CGDIOCCLR = CGDIOCCLR;
+unsigned IOCTL_CGDIOCGET = CGDIOCGET;
+unsigned IOCTL_FSSIOCSET = FSSIOCSET;
+unsigned IOCTL_FSSIOCGET = FSSIOCGET;
+unsigned IOCTL_FSSIOCCLR = FSSIOCCLR;
+unsigned IOCTL_FSSIOFSET = FSSIOFSET;
+unsigned IOCTL_FSSIOFGET = FSSIOFGET;
+unsigned IOCTL_BTDEV_ATTACH = BTDEV_ATTACH;
+unsigned IOCTL_BTDEV_DETACH = BTDEV_DETACH;
+unsigned IOCTL_BTSCO_GETINFO = BTSCO_GETINFO;
+unsigned IOCTL_KTTCP_IO_SEND = KTTCP_IO_SEND;
+unsigned IOCTL_KTTCP_IO_RECV = KTTCP_IO_RECV;
+unsigned IOCTL_IOC_LOCKSTAT_GVERSION = IOC_LOCKSTAT_GVERSION;
+unsigned IOCTL_IOC_LOCKSTAT_ENABLE = IOC_LOCKSTAT_ENABLE;
+unsigned IOCTL_IOC_LOCKSTAT_DISABLE = IOC_LOCKSTAT_DISABLE;
+unsigned IOCTL_VNDIOCSET = VNDIOCSET;
+unsigned IOCTL_VNDIOCCLR = VNDIOCCLR;
+unsigned IOCTL_VNDIOCGET = VNDIOCGET;
+unsigned IOCTL_SPKRTONE = SPKRTONE;
+unsigned IOCTL_SPKRTUNE = SPKRTUNE;
+unsigned IOCTL_SPKRGETVOL = SPKRGETVOL;
+unsigned IOCTL_SPKRSETVOL = SPKRSETVOL;
+#if defined(__x86_64__)
+unsigned IOCTL_NVMM_IOC_CAPABILITY = NVMM_IOC_CAPABILITY;
+unsigned IOCTL_NVMM_IOC_MACHINE_CREATE = NVMM_IOC_MACHINE_CREATE;
+unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY = NVMM_IOC_MACHINE_DESTROY;
+unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE = NVMM_IOC_MACHINE_CONFIGURE;
+unsigned IOCTL_NVMM_IOC_VCPU_CREATE = NVMM_IOC_VCPU_CREATE;
+unsigned IOCTL_NVMM_IOC_VCPU_DESTROY = NVMM_IOC_VCPU_DESTROY;
+unsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE = NVMM_IOC_VCPU_CONFIGURE;
+unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE = NVMM_IOC_VCPU_SETSTATE;
+unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE = NVMM_IOC_VCPU_GETSTATE;
+unsigned IOCTL_NVMM_IOC_VCPU_INJECT = NVMM_IOC_VCPU_INJECT;
+unsigned IOCTL_NVMM_IOC_VCPU_RUN = NVMM_IOC_VCPU_RUN;
+unsigned IOCTL_NVMM_IOC_GPA_MAP = NVMM_IOC_GPA_MAP;
+unsigned IOCTL_NVMM_IOC_GPA_UNMAP = NVMM_IOC_GPA_UNMAP;
+unsigned IOCTL_NVMM_IOC_HVA_MAP = NVMM_IOC_HVA_MAP;
+unsigned IOCTL_NVMM_IOC_HVA_UNMAP = NVMM_IOC_HVA_UNMAP;
+unsigned IOCTL_NVMM_IOC_CTL = NVMM_IOC_CTL;
+#endif
+unsigned IOCTL_SPI_IOCTL_CONFIGURE = SPI_IOCTL_CONFIGURE;
+unsigned IOCTL_SPI_IOCTL_TRANSFER = SPI_IOCTL_TRANSFER;
+unsigned IOCTL_AUTOFSREQUEST = AUTOFSREQUEST;
+unsigned IOCTL_AUTOFSDONE = AUTOFSDONE;
+unsigned IOCTL_BIOCGBLEN = BIOCGBLEN;
+unsigned IOCTL_BIOCSBLEN = BIOCSBLEN;
+unsigned IOCTL_BIOCSETF = BIOCSETF;
+unsigned IOCTL_BIOCFLUSH = BIOCFLUSH;
+unsigned IOCTL_BIOCPROMISC = BIOCPROMISC;
+unsigned IOCTL_BIOCGDLT = BIOCGDLT;
+unsigned IOCTL_BIOCGETIF = BIOCGETIF;
+unsigned IOCTL_BIOCSETIF = BIOCSETIF;
+unsigned IOCTL_BIOCGSTATS = BIOCGSTATS;
+unsigned IOCTL_BIOCGSTATSOLD = BIOCGSTATSOLD;
+unsigned IOCTL_BIOCIMMEDIATE = BIOCIMMEDIATE;
+unsigned IOCTL_BIOCVERSION = BIOCVERSION;
+unsigned IOCTL_BIOCSTCPF = BIOCSTCPF;
+unsigned IOCTL_BIOCSUDPF = BIOCSUDPF;
+unsigned IOCTL_BIOCGHDRCMPLT = BIOCGHDRCMPLT;
+unsigned IOCTL_BIOCSHDRCMPLT = BIOCSHDRCMPLT;
+unsigned IOCTL_BIOCSDLT = BIOCSDLT;
+unsigned IOCTL_BIOCGDLTLIST = BIOCGDLTLIST;
+unsigned IOCTL_BIOCGDIRECTION = BIOCGDIRECTION;
+unsigned IOCTL_BIOCSDIRECTION = BIOCSDIRECTION;
+unsigned IOCTL_BIOCSRTIMEOUT = BIOCSRTIMEOUT;
+unsigned IOCTL_BIOCGRTIMEOUT = BIOCGRTIMEOUT;
+unsigned IOCTL_BIOCGFEEDBACK = BIOCGFEEDBACK;
+unsigned IOCTL_BIOCSFEEDBACK = BIOCSFEEDBACK;
+unsigned IOCTL_GRESADDRS = GRESADDRS;
+unsigned IOCTL_GRESADDRD = GRESADDRD;
+unsigned IOCTL_GREGADDRS = GREGADDRS;
+unsigned IOCTL_GREGADDRD = GREGADDRD;
+unsigned IOCTL_GRESPROTO = GRESPROTO;
+unsigned IOCTL_GREGPROTO = GREGPROTO;
+unsigned IOCTL_GRESSOCK = GRESSOCK;
+unsigned IOCTL_GREDSOCK = GREDSOCK;
+unsigned IOCTL_PPPIOCGRAWIN = PPPIOCGRAWIN;
+unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
+unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;
+unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
+unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;
+unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;
+unsigned IOCTL_PPPIOCGRASYNCMAP = PPPIOCGRASYNCMAP;
+unsigned IOCTL_PPPIOCSRASYNCMAP = PPPIOCSRASYNCMAP;
+unsigned IOCTL_PPPIOCGMRU = PPPIOCGMRU;
+unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
+unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
+unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;
+unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
+unsigned IOCTL_PPPIOCXFERUNIT = PPPIOCXFERUNIT;
+unsigned IOCTL_PPPIOCSCOMPRESS = PPPIOCSCOMPRESS;
+unsigned IOCTL_PPPIOCGNPMODE = PPPIOCGNPMODE;
+unsigned IOCTL_PPPIOCSNPMODE = PPPIOCSNPMODE;
+unsigned IOCTL_PPPIOCGIDLE = PPPIOCGIDLE;
+unsigned IOCTL_PPPIOCGMTU = PPPIOCGMTU;
+unsigned IOCTL_PPPIOCSMTU = PPPIOCSMTU;
+unsigned IOCTL_SIOCGPPPSTATS = SIOCGPPPSTATS;
+unsigned IOCTL_SIOCGPPPCSTATS = SIOCGPPPCSTATS;
+unsigned IOCTL_IOC_NPF_VERSION = IOC_NPF_VERSION;
+unsigned IOCTL_IOC_NPF_SWITCH = IOC_NPF_SWITCH;
+unsigned IOCTL_IOC_NPF_LOAD = IOC_NPF_LOAD;
+unsigned IOCTL_IOC_NPF_TABLE = IOC_NPF_TABLE;
+unsigned IOCTL_IOC_NPF_STATS = IOC_NPF_STATS;
+unsigned IOCTL_IOC_NPF_SAVE = IOC_NPF_SAVE;
+unsigned IOCTL_IOC_NPF_RULE = IOC_NPF_RULE;
+unsigned IOCTL_IOC_NPF_CONN_LOOKUP = IOC_NPF_CONN_LOOKUP;
+unsigned IOCTL_IOC_NPF_TABLE_REPLACE = IOC_NPF_TABLE_REPLACE;
+unsigned IOCTL_PPPOESETPARMS = PPPOESETPARMS;
+unsigned IOCTL_PPPOEGETPARMS = PPPOEGETPARMS;
+unsigned IOCTL_PPPOEGETSESSION = PPPOEGETSESSION;
+unsigned IOCTL_SPPPGETAUTHCFG = SPPPGETAUTHCFG;
+unsigned IOCTL_SPPPSETAUTHCFG = SPPPSETAUTHCFG;
+unsigned IOCTL_SPPPGETLCPCFG = SPPPGETLCPCFG;
+unsigned IOCTL_SPPPSETLCPCFG = SPPPSETLCPCFG;
+unsigned IOCTL_SPPPGETSTATUS = SPPPGETSTATUS;
+unsigned IOCTL_SPPPGETSTATUSNCP = SPPPGETSTATUSNCP;
+unsigned IOCTL_SPPPGETIDLETO = SPPPGETIDLETO;
+unsigned IOCTL_SPPPSETIDLETO = SPPPSETIDLETO;
+unsigned IOCTL_SPPPGETAUTHFAILURES = SPPPGETAUTHFAILURES;
+unsigned IOCTL_SPPPSETAUTHFAILURE = SPPPSETAUTHFAILURE;
+unsigned IOCTL_SPPPSETDNSOPTS = SPPPSETDNSOPTS;
+unsigned IOCTL_SPPPGETDNSOPTS = SPPPGETDNSOPTS;
+unsigned IOCTL_SPPPGETDNSADDRS = SPPPGETDNSADDRS;
+unsigned IOCTL_SPPPSETKEEPALIVE = SPPPSETKEEPALIVE;
+unsigned IOCTL_SPPPGETKEEPALIVE = SPPPGETKEEPALIVE;
+unsigned IOCTL_SRT_GETNRT = SRT_GETNRT;
+unsigned IOCTL_SRT_GETRT = SRT_GETRT;
+unsigned IOCTL_SRT_SETRT = SRT_SETRT;
+unsigned IOCTL_SRT_DELRT = SRT_DELRT;
+unsigned IOCTL_SRT_SFLAGS = SRT_SFLAGS;
+unsigned IOCTL_SRT_GFLAGS = SRT_GFLAGS;
+unsigned IOCTL_SRT_SGFLAGS = SRT_SGFLAGS;
+unsigned IOCTL_SRT_DEBUG = SRT_DEBUG;
+unsigned IOCTL_TAPGIFNAME = TAPGIFNAME;
+unsigned IOCTL_TUNSDEBUG = TUNSDEBUG;
+unsigned IOCTL_TUNGDEBUG = TUNGDEBUG;
+unsigned IOCTL_TUNSIFMODE = TUNSIFMODE;
+unsigned IOCTL_TUNSLMODE = TUNSLMODE;
+unsigned IOCTL_TUNSIFHEAD = TUNSIFHEAD;
+unsigned IOCTL_TUNGIFHEAD = TUNGIFHEAD;
+unsigned IOCTL_DIOCSTART = DIOCSTART;
+unsigned IOCTL_DIOCSTOP = DIOCSTOP;
+unsigned IOCTL_DIOCADDRULE = DIOCADDRULE;
+unsigned IOCTL_DIOCGETRULES = DIOCGETRULES;
+unsigned IOCTL_DIOCGETRULE = DIOCGETRULE;
+unsigned IOCTL_DIOCSETLCK = DIOCSETLCK;
+unsigned IOCTL_DIOCCLRSTATES = DIOCCLRSTATES;
+unsigned IOCTL_DIOCGETSTATE = DIOCGETSTATE;
+unsigned IOCTL_DIOCSETSTATUSIF = DIOCSETSTATUSIF;
+unsigned IOCTL_DIOCGETSTATUS = DIOCGETSTATUS;
+unsigned IOCTL_DIOCCLRSTATUS = DIOCCLRSTATUS;
+unsigned IOCTL_DIOCNATLOOK = DIOCNATLOOK;
+unsigned IOCTL_DIOCSETDEBUG = DIOCSETDEBUG;
+unsigned IOCTL_DIOCGETSTATES = DIOCGETSTATES;
+unsigned IOCTL_DIOCCHANGERULE = DIOCCHANGERULE;
+unsigned IOCTL_DIOCSETTIMEOUT = DIOCSETTIMEOUT;
+unsigned IOCTL_DIOCGETTIMEOUT = DIOCGETTIMEOUT;
+unsigned IOCTL_DIOCADDSTATE = DIOCADDSTATE;
+unsigned IOCTL_DIOCCLRRULECTRS = DIOCCLRRULECTRS;
+unsigned IOCTL_DIOCGETLIMIT = DIOCGETLIMIT;
+unsigned IOCTL_DIOCSETLIMIT = DIOCSETLIMIT;
+unsigned IOCTL_DIOCKILLSTATES = DIOCKILLSTATES;
+unsigned IOCTL_DIOCSTARTALTQ = DIOCSTARTALTQ;
+unsigned IOCTL_DIOCSTOPALTQ = DIOCSTOPALTQ;
+unsigned IOCTL_DIOCADDALTQ = DIOCADDALTQ;
+unsigned IOCTL_DIOCGETALTQS = DIOCGETALTQS;
+unsigned IOCTL_DIOCGETALTQ = DIOCGETALTQ;
+unsigned IOCTL_DIOCCHANGEALTQ = DIOCCHANGEALTQ;
+unsigned IOCTL_DIOCGETQSTATS = DIOCGETQSTATS;
+unsigned IOCTL_DIOCBEGINADDRS = DIOCBEGINADDRS;
+unsigned IOCTL_DIOCADDADDR = DIOCADDADDR;
+unsigned IOCTL_DIOCGETADDRS = DIOCGETADDRS;
+unsigned IOCTL_DIOCGETADDR = DIOCGETADDR;
+unsigned IOCTL_DIOCCHANGEADDR = DIOCCHANGEADDR;
+unsigned IOCTL_DIOCADDSTATES = DIOCADDSTATES;
+unsigned IOCTL_DIOCGETRULESETS = DIOCGETRULESETS;
+unsigned IOCTL_DIOCGETRULESET = DIOCGETRULESET;
+unsigned IOCTL_DIOCRCLRTABLES = DIOCRCLRTABLES;
+unsigned IOCTL_DIOCRADDTABLES = DIOCRADDTABLES;
+unsigned IOCTL_DIOCRDELTABLES = DIOCRDELTABLES;
+unsigned IOCTL_DIOCRGETTABLES = DIOCRGETTABLES;
+unsigned IOCTL_DIOCRGETTSTATS = DIOCRGETTSTATS;
+unsigned IOCTL_DIOCRCLRTSTATS = DIOCRCLRTSTATS;
+unsigned IOCTL_DIOCRCLRADDRS = DIOCRCLRADDRS;
+unsigned IOCTL_DIOCRADDADDRS = DIOCRADDADDRS;
+unsigned IOCTL_DIOCRDELADDRS = DIOCRDELADDRS;
+unsigned IOCTL_DIOCRSETADDRS = DIOCRSETADDRS;
+unsigned IOCTL_DIOCRGETADDRS = DIOCRGETADDRS;
+unsigned IOCTL_DIOCRGETASTATS = DIOCRGETASTATS;
+unsigned IOCTL_DIOCRCLRASTATS = DIOCRCLRASTATS;
+unsigned IOCTL_DIOCRTSTADDRS = DIOCRTSTADDRS;
+unsigned IOCTL_DIOCRSETTFLAGS = DIOCRSETTFLAGS;
+unsigned IOCTL_DIOCRINADEFINE = DIOCRINADEFINE;
+unsigned IOCTL_DIOCOSFPFLUSH = DIOCOSFPFLUSH;
+unsigned IOCTL_DIOCOSFPADD = DIOCOSFPADD;
+unsigned IOCTL_DIOCOSFPGET = DIOCOSFPGET;
+unsigned IOCTL_DIOCXBEGIN = DIOCXBEGIN;
+unsigned IOCTL_DIOCXCOMMIT = DIOCXCOMMIT;
+unsigned IOCTL_DIOCXROLLBACK = DIOCXROLLBACK;
+unsigned IOCTL_DIOCGETSRCNODES = DIOCGETSRCNODES;
+unsigned IOCTL_DIOCCLRSRCNODES = DIOCCLRSRCNODES;
+unsigned IOCTL_DIOCSETHOSTID = DIOCSETHOSTID;
+unsigned IOCTL_DIOCIGETIFACES = DIOCIGETIFACES;
+unsigned IOCTL_DIOCSETIFFLAG = DIOCSETIFFLAG;
+unsigned IOCTL_DIOCCLRIFFLAG = DIOCCLRIFFLAG;
+unsigned IOCTL_DIOCKILLSRCNODES = DIOCKILLSRCNODES;
+unsigned IOCTL_SLIOCGUNIT = SLIOCGUNIT;
+unsigned IOCTL_SIOCGBTINFO = SIOCGBTINFO;
+unsigned IOCTL_SIOCGBTINFOA = SIOCGBTINFOA;
+unsigned IOCTL_SIOCNBTINFO = SIOCNBTINFO;
+unsigned IOCTL_SIOCSBTFLAGS = SIOCSBTFLAGS;
+unsigned IOCTL_SIOCSBTPOLICY = SIOCSBTPOLICY;
+unsigned IOCTL_SIOCSBTPTYPE = SIOCSBTPTYPE;
+unsigned IOCTL_SIOCGBTSTATS = SIOCGBTSTATS;
+unsigned IOCTL_SIOCZBTSTATS = SIOCZBTSTATS;
+unsigned IOCTL_SIOCBTDUMP = SIOCBTDUMP;
+unsigned IOCTL_SIOCSBTSCOMTU = SIOCSBTSCOMTU;
+unsigned IOCTL_SIOCGBTFEAT = SIOCGBTFEAT;
+unsigned IOCTL_SIOCADNAT = SIOCADNAT;
+unsigned IOCTL_SIOCRMNAT = SIOCRMNAT;
+unsigned IOCTL_SIOCGNATS = SIOCGNATS;
+unsigned IOCTL_SIOCGNATL = SIOCGNATL;
+unsigned IOCTL_SIOCPURGENAT = SIOCPURGENAT;
+unsigned IOCTL_SIOCCONNECTX = SIOCCONNECTX;
+unsigned IOCTL_SIOCCONNECTXDEL = SIOCCONNECTXDEL;
+unsigned IOCTL_SIOCSIFINFO_FLAGS = SIOCSIFINFO_FLAGS;
+unsigned IOCTL_SIOCAADDRCTL_POLICY = SIOCAADDRCTL_POLICY;
+unsigned IOCTL_SIOCDADDRCTL_POLICY = SIOCDADDRCTL_POLICY;
+unsigned IOCTL_SMBIOC_OPENSESSION = SMBIOC_OPENSESSION;
+unsigned IOCTL_SMBIOC_OPENSHARE = SMBIOC_OPENSHARE;
+unsigned IOCTL_SMBIOC_REQUEST = SMBIOC_REQUEST;
+unsigned IOCTL_SMBIOC_SETFLAGS = SMBIOC_SETFLAGS;
+unsigned IOCTL_SMBIOC_LOOKUP = SMBIOC_LOOKUP;
+unsigned IOCTL_SMBIOC_READ = SMBIOC_READ;
+unsigned IOCTL_SMBIOC_WRITE = SMBIOC_WRITE;
+unsigned IOCTL_AGPIOC_INFO = AGPIOC_INFO;
+unsigned IOCTL_AGPIOC_ACQUIRE = AGPIOC_ACQUIRE;
+unsigned IOCTL_AGPIOC_RELEASE = AGPIOC_RELEASE;
+unsigned IOCTL_AGPIOC_SETUP = AGPIOC_SETUP;
+unsigned IOCTL_AGPIOC_ALLOCATE = AGPIOC_ALLOCATE;
+unsigned IOCTL_AGPIOC_DEALLOCATE = AGPIOC_DEALLOCATE;
+unsigned IOCTL_AGPIOC_BIND = AGPIOC_BIND;
+unsigned IOCTL_AGPIOC_UNBIND = AGPIOC_UNBIND;
+unsigned IOCTL_AUDIO_GETINFO = AUDIO_GETINFO;
+unsigned IOCTL_AUDIO_SETINFO = AUDIO_SETINFO;
+unsigned IOCTL_AUDIO_DRAIN = AUDIO_DRAIN;
+unsigned IOCTL_AUDIO_FLUSH = AUDIO_FLUSH;
+unsigned IOCTL_AUDIO_WSEEK = AUDIO_WSEEK;
+unsigned IOCTL_AUDIO_RERROR = AUDIO_RERROR;
+unsigned IOCTL_AUDIO_GETDEV = AUDIO_GETDEV;
+unsigned IOCTL_AUDIO_GETENC = AUDIO_GETENC;
+unsigned IOCTL_AUDIO_GETFD = AUDIO_GETFD;
+unsigned IOCTL_AUDIO_SETFD = AUDIO_SETFD;
+unsigned IOCTL_AUDIO_PERROR = AUDIO_PERROR;
+unsigned IOCTL_AUDIO_GETIOFFS = AUDIO_GETIOFFS;
+unsigned IOCTL_AUDIO_GETOOFFS = AUDIO_GETOOFFS;
+unsigned IOCTL_AUDIO_GETPROPS = AUDIO_GETPROPS;
+unsigned IOCTL_AUDIO_GETBUFINFO = AUDIO_GETBUFINFO;
+unsigned IOCTL_AUDIO_SETCHAN = AUDIO_SETCHAN;
+unsigned IOCTL_AUDIO_GETCHAN = AUDIO_GETCHAN;
+unsigned IOCTL_AUDIO_QUERYFORMAT = AUDIO_QUERYFORMAT;
+unsigned IOCTL_AUDIO_GETFORMAT = AUDIO_GETFORMAT;
+unsigned IOCTL_AUDIO_SETFORMAT = AUDIO_SETFORMAT;
+unsigned IOCTL_AUDIO_MIXER_READ = AUDIO_MIXER_READ;
+unsigned IOCTL_AUDIO_MIXER_WRITE = AUDIO_MIXER_WRITE;
+unsigned IOCTL_AUDIO_MIXER_DEVINFO = AUDIO_MIXER_DEVINFO;
+unsigned IOCTL_ATAIOCCOMMAND = ATAIOCCOMMAND;
+unsigned IOCTL_ATABUSIOSCAN = ATABUSIOSCAN;
+unsigned IOCTL_ATABUSIORESET = ATABUSIORESET;
+unsigned IOCTL_ATABUSIODETACH = ATABUSIODETACH;
+unsigned IOCTL_CDIOCPLAYTRACKS = CDIOCPLAYTRACKS;
+unsigned IOCTL_CDIOCPLAYBLOCKS = CDIOCPLAYBLOCKS;
+unsigned IOCTL_CDIOCREADSUBCHANNEL = CDIOCREADSUBCHANNEL;
+unsigned IOCTL_CDIOREADTOCHEADER = CDIOREADTOCHEADER;
+unsigned IOCTL_CDIOREADTOCENTRIES = CDIOREADTOCENTRIES;
+unsigned IOCTL_CDIOREADMSADDR = CDIOREADMSADDR;
+unsigned IOCTL_CDIOCSETPATCH = CDIOCSETPATCH;
+unsigned IOCTL_CDIOCGETVOL = CDIOCGETVOL;
+unsigned IOCTL_CDIOCSETVOL = CDIOCSETVOL;
+unsigned IOCTL_CDIOCSETMONO = CDIOCSETMONO;
+unsigned IOCTL_CDIOCSETSTEREO = CDIOCSETSTEREO;
+unsigned IOCTL_CDIOCSETMUTE = CDIOCSETMUTE;
+unsigned IOCTL_CDIOCSETLEFT = CDIOCSETLEFT;
+unsigned IOCTL_CDIOCSETRIGHT = CDIOCSETRIGHT;
+unsigned IOCTL_CDIOCSETDEBUG = CDIOCSETDEBUG;
+unsigned IOCTL_CDIOCCLRDEBUG = CDIOCCLRDEBUG;
+unsigned IOCTL_CDIOCPAUSE = CDIOCPAUSE;
+unsigned IOCTL_CDIOCRESUME = CDIOCRESUME;
+unsigned IOCTL_CDIOCRESET = CDIOCRESET;
+unsigned IOCTL_CDIOCSTART = CDIOCSTART;
+unsigned IOCTL_CDIOCSTOP = CDIOCSTOP;
+unsigned IOCTL_CDIOCEJECT = CDIOCEJECT;
+unsigned IOCTL_CDIOCALLOW = CDIOCALLOW;
+unsigned IOCTL_CDIOCPREVENT = CDIOCPREVENT;
+unsigned IOCTL_CDIOCCLOSE = CDIOCCLOSE;
+unsigned IOCTL_CDIOCPLAYMSF = CDIOCPLAYMSF;
+unsigned IOCTL_CDIOCLOADUNLOAD = CDIOCLOADUNLOAD;
+unsigned IOCTL_CHIOMOVE = CHIOMOVE;
+unsigned IOCTL_CHIOEXCHANGE = CHIOEXCHANGE;
+unsigned IOCTL_CHIOPOSITION = CHIOPOSITION;
+unsigned IOCTL_CHIOGPICKER = CHIOGPICKER;
+unsigned IOCTL_CHIOSPICKER = CHIOSPICKER;
+unsigned IOCTL_CHIOGPARAMS = CHIOGPARAMS;
+unsigned IOCTL_CHIOIELEM = CHIOIELEM;
+unsigned IOCTL_OCHIOGSTATUS = OCHIOGSTATUS;
+unsigned IOCTL_CHIOGSTATUS = CHIOGSTATUS;
+unsigned IOCTL_CHIOSVOLTAG = CHIOSVOLTAG;
+unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY = CLOCKCTL_SETTIMEOFDAY;
+unsigned IOCTL_CLOCKCTL_ADJTIME = CLOCKCTL_ADJTIME;
+unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME = CLOCKCTL_CLOCK_SETTIME;
+unsigned IOCTL_CLOCKCTL_NTP_ADJTIME = CLOCKCTL_NTP_ADJTIME;
+unsigned IOCTL_IOC_CPU_SETSTATE = IOC_CPU_SETSTATE;
+unsigned IOCTL_IOC_CPU_GETSTATE = IOC_CPU_GETSTATE;
+unsigned IOCTL_IOC_CPU_GETCOUNT = IOC_CPU_GETCOUNT;
+unsigned IOCTL_IOC_CPU_MAPID = IOC_CPU_MAPID;
+unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION = IOC_CPU_UCODE_GET_VERSION;
+unsigned IOCTL_IOC_CPU_UCODE_APPLY = IOC_CPU_UCODE_APPLY;
+unsigned IOCTL_DIOCGDINFO = DIOCGDINFO;
+unsigned IOCTL_DIOCSDINFO = DIOCSDINFO;
+unsigned IOCTL_DIOCWDINFO = DIOCWDINFO;
+unsigned IOCTL_DIOCRFORMAT = DIOCRFORMAT;
+unsigned IOCTL_DIOCWFORMAT = DIOCWFORMAT;
+unsigned IOCTL_DIOCSSTEP = DIOCSSTEP;
+unsigned IOCTL_DIOCSRETRIES = DIOCSRETRIES;
+unsigned IOCTL_DIOCKLABEL = DIOCKLABEL;
+unsigned IOCTL_DIOCWLABEL = DIOCWLABEL;
+unsigned IOCTL_DIOCSBAD = DIOCSBAD;
+unsigned IOCTL_DIOCEJECT = DIOCEJECT;
+unsigned IOCTL_ODIOCEJECT = ODIOCEJECT;
+unsigned IOCTL_DIOCLOCK = DIOCLOCK;
+unsigned IOCTL_DIOCGDEFLABEL = DIOCGDEFLABEL;
+unsigned IOCTL_DIOCCLRLABEL = DIOCCLRLABEL;
+unsigned IOCTL_DIOCGCACHE = DIOCGCACHE;
+unsigned IOCTL_DIOCSCACHE = DIOCSCACHE;
+unsigned IOCTL_DIOCCACHESYNC = DIOCCACHESYNC;
+unsigned IOCTL_DIOCBSLIST = DIOCBSLIST;
+unsigned IOCTL_DIOCBSFLUSH = DIOCBSFLUSH;
+unsigned IOCTL_DIOCAWEDGE = DIOCAWEDGE;
+unsigned IOCTL_DIOCGWEDGEINFO = DIOCGWEDGEINFO;
+unsigned IOCTL_DIOCDWEDGE = DIOCDWEDGE;
+unsigned IOCTL_DIOCLWEDGES = DIOCLWEDGES;
+unsigned IOCTL_DIOCGSTRATEGY = DIOCGSTRATEGY;
+unsigned IOCTL_DIOCSSTRATEGY = DIOCSSTRATEGY;
+unsigned IOCTL_DIOCGDISKINFO = DIOCGDISKINFO;
+unsigned IOCTL_DIOCTUR = DIOCTUR;
+unsigned IOCTL_DIOCMWEDGES = DIOCMWEDGES;
+unsigned IOCTL_DIOCGSECTORSIZE = DIOCGSECTORSIZE;
+unsigned IOCTL_DIOCGMEDIASIZE = DIOCGMEDIASIZE;
+unsigned IOCTL_DIOCRMWEDGES = DIOCRMWEDGES;
+unsigned IOCTL_DRVDETACHDEV = DRVDETACHDEV;
+unsigned IOCTL_DRVRESCANBUS = DRVRESCANBUS;
+unsigned IOCTL_DRVCTLCOMMAND = DRVCTLCOMMAND;
+unsigned IOCTL_DRVRESUMEDEV = DRVRESUMEDEV;
+unsigned IOCTL_DRVLISTDEV = DRVLISTDEV;
+unsigned IOCTL_DRVGETEVENT = DRVGETEVENT;
+unsigned IOCTL_DRVSUSPENDDEV = DRVSUSPENDDEV;
+unsigned IOCTL_DVD_READ_STRUCT = DVD_READ_STRUCT;
+unsigned IOCTL_DVD_WRITE_STRUCT = DVD_WRITE_STRUCT;
+unsigned IOCTL_DVD_AUTH = DVD_AUTH;
+unsigned IOCTL_ENVSYS_GETDICTIONARY = ENVSYS_GETDICTIONARY;
+unsigned IOCTL_ENVSYS_SETDICTIONARY = ENVSYS_SETDICTIONARY;
+unsigned IOCTL_ENVSYS_REMOVEPROPS = ENVSYS_REMOVEPROPS;
+unsigned IOCTL_ENVSYS_GTREDATA = ENVSYS_GTREDATA;
+unsigned IOCTL_ENVSYS_GTREINFO = ENVSYS_GTREINFO;
+unsigned IOCTL_KFILTER_BYFILTER = KFILTER_BYFILTER;
+unsigned IOCTL_KFILTER_BYNAME = KFILTER_BYNAME;
+unsigned IOCTL_FDIOCGETOPTS = FDIOCGETOPTS;
+unsigned IOCTL_FDIOCSETOPTS = FDIOCSETOPTS;
+unsigned IOCTL_FDIOCSETFORMAT = FDIOCSETFORMAT;
+unsigned IOCTL_FDIOCGETFORMAT = FDIOCGETFORMAT;
+unsigned IOCTL_FDIOCFORMAT_TRACK = FDIOCFORMAT_TRACK;
+unsigned IOCTL_FIOCLEX = FIOCLEX;
+unsigned IOCTL_FIONCLEX = FIONCLEX;
+unsigned IOCTL_FIOSEEKDATA = FIOSEEKDATA;
+unsigned IOCTL_FIOSEEKHOLE = FIOSEEKHOLE;
+unsigned IOCTL_FIONREAD = FIONREAD;
+unsigned IOCTL_FIONBIO = FIONBIO;
+unsigned IOCTL_FIOASYNC = FIOASYNC;
+unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+unsigned IOCTL_OFIOGETBMAP = OFIOGETBMAP;
+unsigned IOCTL_FIOGETBMAP = FIOGETBMAP;
+unsigned IOCTL_FIONWRITE = FIONWRITE;
+unsigned IOCTL_FIONSPACE = FIONSPACE;
+unsigned IOCTL_GPIOINFO = GPIOINFO;
+unsigned IOCTL_GPIOSET = GPIOSET;
+unsigned IOCTL_GPIOUNSET = GPIOUNSET;
+unsigned IOCTL_GPIOREAD = GPIOREAD;
+unsigned IOCTL_GPIOWRITE = GPIOWRITE;
+unsigned IOCTL_GPIOTOGGLE = GPIOTOGGLE;
+unsigned IOCTL_GPIOATTACH = GPIOATTACH;
+unsigned IOCTL_PTIOCNETBSD = PTIOCNETBSD;
+unsigned IOCTL_PTIOCSUNOS = PTIOCSUNOS;
+unsigned IOCTL_PTIOCLINUX = PTIOCLINUX;
+unsigned IOCTL_PTIOCFREEBSD = PTIOCFREEBSD;
+unsigned IOCTL_PTIOCULTRIX = PTIOCULTRIX;
+unsigned IOCTL_TIOCHPCL = TIOCHPCL;
+unsigned IOCTL_TIOCGETP = TIOCGETP;
+unsigned IOCTL_TIOCSETP = TIOCSETP;
+unsigned IOCTL_TIOCSETN = TIOCSETN;
+unsigned IOCTL_TIOCSETC = TIOCSETC;
+unsigned IOCTL_TIOCGETC = TIOCGETC;
+unsigned IOCTL_TIOCLBIS = TIOCLBIS;
+unsigned IOCTL_TIOCLBIC = TIOCLBIC;
+unsigned IOCTL_TIOCLSET = TIOCLSET;
+unsigned IOCTL_TIOCLGET = TIOCLGET;
+unsigned IOCTL_TIOCSLTC = TIOCSLTC;
+unsigned IOCTL_TIOCGLTC = TIOCGLTC;
+unsigned IOCTL_OTIOCCONS = OTIOCCONS;
+unsigned IOCTL_JOY_SETTIMEOUT = JOY_SETTIMEOUT;
+unsigned IOCTL_JOY_GETTIMEOUT = JOY_GETTIMEOUT;
+unsigned IOCTL_JOY_SET_X_OFFSET = JOY_SET_X_OFFSET;
+unsigned IOCTL_JOY_SET_Y_OFFSET = JOY_SET_Y_OFFSET;
+unsigned IOCTL_JOY_GET_X_OFFSET = JOY_GET_X_OFFSET;
+unsigned IOCTL_JOY_GET_Y_OFFSET = JOY_GET_Y_OFFSET;
+unsigned IOCTL_OKIOCGSYMBOL = OKIOCGSYMBOL;
+unsigned IOCTL_OKIOCGVALUE = OKIOCGVALUE;
+unsigned IOCTL_KIOCGSIZE = KIOCGSIZE;
+unsigned IOCTL_KIOCGVALUE = KIOCGVALUE;
+unsigned IOCTL_KIOCGSYMBOL = KIOCGSYMBOL;
+unsigned IOCTL_LUAINFO = LUAINFO;
+unsigned IOCTL_LUACREATE = LUACREATE;
+unsigned IOCTL_LUADESTROY = LUADESTROY;
+unsigned IOCTL_LUAREQUIRE = LUAREQUIRE;
+unsigned IOCTL_LUALOAD = LUALOAD;
+unsigned IOCTL_MIDI_PRETIME = MIDI_PRETIME;
+unsigned IOCTL_MIDI_MPUMODE = MIDI_MPUMODE;
+unsigned IOCTL_MIDI_MPUCMD = MIDI_MPUCMD;
+unsigned IOCTL_SEQUENCER_RESET = SEQUENCER_RESET;
+unsigned IOCTL_SEQUENCER_SYNC = SEQUENCER_SYNC;
+unsigned IOCTL_SEQUENCER_INFO = SEQUENCER_INFO;
+unsigned IOCTL_SEQUENCER_CTRLRATE = SEQUENCER_CTRLRATE;
+unsigned IOCTL_SEQUENCER_GETOUTCOUNT = SEQUENCER_GETOUTCOUNT;
+unsigned IOCTL_SEQUENCER_GETINCOUNT = SEQUENCER_GETINCOUNT;
+unsigned IOCTL_SEQUENCER_RESETSAMPLES = SEQUENCER_RESETSAMPLES;
+unsigned IOCTL_SEQUENCER_NRSYNTHS = SEQUENCER_NRSYNTHS;
+unsigned IOCTL_SEQUENCER_NRMIDIS = SEQUENCER_NRMIDIS;
+unsigned IOCTL_SEQUENCER_THRESHOLD = SEQUENCER_THRESHOLD;
+unsigned IOCTL_SEQUENCER_MEMAVL = SEQUENCER_MEMAVL;
+unsigned IOCTL_SEQUENCER_PANIC = SEQUENCER_PANIC;
+unsigned IOCTL_SEQUENCER_OUTOFBAND = SEQUENCER_OUTOFBAND;
+unsigned IOCTL_SEQUENCER_GETTIME = SEQUENCER_GETTIME;
+unsigned IOCTL_SEQUENCER_TMR_TIMEBASE = SEQUENCER_TMR_TIMEBASE;
+unsigned IOCTL_SEQUENCER_TMR_START = SEQUENCER_TMR_START;
+unsigned IOCTL_SEQUENCER_TMR_STOP = SEQUENCER_TMR_STOP;
+unsigned IOCTL_SEQUENCER_TMR_CONTINUE = SEQUENCER_TMR_CONTINUE;
+unsigned IOCTL_SEQUENCER_TMR_TEMPO = SEQUENCER_TMR_TEMPO;
+unsigned IOCTL_SEQUENCER_TMR_SOURCE = SEQUENCER_TMR_SOURCE;
+unsigned IOCTL_SEQUENCER_TMR_METRONOME = SEQUENCER_TMR_METRONOME;
+unsigned IOCTL_SEQUENCER_TMR_SELECT = SEQUENCER_TMR_SELECT;
+unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+unsigned IOCTL_MTIOCGET = MTIOCGET;
+unsigned IOCTL_MTIOCIEOT = MTIOCIEOT;
+unsigned IOCTL_MTIOCEEOT = MTIOCEEOT;
+unsigned IOCTL_MTIOCRDSPOS = MTIOCRDSPOS;
+unsigned IOCTL_MTIOCRDHPOS = MTIOCRDHPOS;
+unsigned IOCTL_MTIOCSLOCATE = MTIOCSLOCATE;
+unsigned IOCTL_MTIOCHLOCATE = MTIOCHLOCATE;
+unsigned IOCTL_POWER_EVENT_RECVDICT = POWER_EVENT_RECVDICT;
+unsigned IOCTL_POWER_IOC_GET_TYPE = POWER_IOC_GET_TYPE;
+unsigned IOCTL_RIOCGINFO = RIOCGINFO;
+unsigned IOCTL_RIOCSINFO = RIOCSINFO;
+unsigned IOCTL_RIOCSSRCH = RIOCSSRCH;
+unsigned IOCTL_RNDGETENTCNT = RNDGETENTCNT;
+unsigned IOCTL_RNDGETSRCNUM = RNDGETSRCNUM;
+unsigned IOCTL_RNDGETSRCNAME = RNDGETSRCNAME;
+unsigned IOCTL_RNDCTL = RNDCTL;
+unsigned IOCTL_RNDADDDATA = RNDADDDATA;
+unsigned IOCTL_RNDGETPOOLSTAT = RNDGETPOOLSTAT;
+unsigned IOCTL_RNDGETESTNUM = RNDGETESTNUM;
+unsigned IOCTL_RNDGETESTNAME = RNDGETESTNAME;
+unsigned IOCTL_SCIOCGET = SCIOCGET;
+unsigned IOCTL_SCIOCSET = SCIOCSET;
+unsigned IOCTL_SCIOCRESTART = SCIOCRESTART;
+unsigned IOCTL_SCIOC_USE_ADF = SCIOC_USE_ADF;
+unsigned IOCTL_SCIOCCOMMAND = SCIOCCOMMAND;
+unsigned IOCTL_SCIOCDEBUG = SCIOCDEBUG;
+unsigned IOCTL_SCIOCIDENTIFY = SCIOCIDENTIFY;
+unsigned IOCTL_OSCIOCIDENTIFY = OSCIOCIDENTIFY;
+unsigned IOCTL_SCIOCDECONFIG = SCIOCDECONFIG;
+unsigned IOCTL_SCIOCRECONFIG = SCIOCRECONFIG;
+unsigned IOCTL_SCIOCRESET = SCIOCRESET;
+unsigned IOCTL_SCBUSIOSCAN = SCBUSIOSCAN;
+unsigned IOCTL_SCBUSIORESET = SCBUSIORESET;
+unsigned IOCTL_SCBUSIODETACH = SCBUSIODETACH;
+unsigned IOCTL_SCBUSACCEL = SCBUSACCEL;
+unsigned IOCTL_SCBUSIOLLSCAN = SCBUSIOLLSCAN;
+unsigned IOCTL_SIOCSHIWAT = SIOCSHIWAT;
+unsigned IOCTL_SIOCGHIWAT = SIOCGHIWAT;
+unsigned IOCTL_SIOCSLOWAT = SIOCSLOWAT;
+unsigned IOCTL_SIOCGLOWAT = SIOCGLOWAT;
+unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+unsigned IOCTL_SIOCPEELOFF = SIOCPEELOFF;
+unsigned IOCTL_SIOCADDRT = SIOCADDRT;
+unsigned IOCTL_SIOCDELRT = SIOCDELRT;
+unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+unsigned IOCTL_SIOCDIFADDR = SIOCDIFADDR;
+unsigned IOCTL_SIOCAIFADDR = SIOCAIFADDR;
+unsigned IOCTL_SIOCGIFALIAS = SIOCGIFALIAS;
+unsigned IOCTL_SIOCGIFAFLAG_IN = SIOCGIFAFLAG_IN;
+unsigned IOCTL_SIOCALIFADDR = SIOCALIFADDR;
+unsigned IOCTL_SIOCGLIFADDR = SIOCGLIFADDR;
+unsigned IOCTL_SIOCDLIFADDR = SIOCDLIFADDR;
+unsigned IOCTL_SIOCSIFADDRPREF = SIOCSIFADDRPREF;
+unsigned IOCTL_SIOCGIFADDRPREF = SIOCGIFADDRPREF;
+unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+unsigned IOCTL_SIOCSIFMEDIA = SIOCSIFMEDIA;
+unsigned IOCTL_SIOCGIFMEDIA = SIOCGIFMEDIA;
+unsigned IOCTL_SIOCSIFGENERIC = SIOCSIFGENERIC;
+unsigned IOCTL_SIOCGIFGENERIC = SIOCGIFGENERIC;
+unsigned IOCTL_SIOCSIFPHYADDR = SIOCSIFPHYADDR;
+unsigned IOCTL_SIOCGIFPSRCADDR = SIOCGIFPSRCADDR;
+unsigned IOCTL_SIOCGIFPDSTADDR = SIOCGIFPDSTADDR;
+unsigned IOCTL_SIOCDIFPHYADDR = SIOCDIFPHYADDR;
+unsigned IOCTL_SIOCSLIFPHYADDR = SIOCSLIFPHYADDR;
+unsigned IOCTL_SIOCGLIFPHYADDR = SIOCGLIFPHYADDR;
+unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+unsigned IOCTL_SIOCSDRVSPEC = SIOCSDRVSPEC;
+unsigned IOCTL_SIOCGDRVSPEC = SIOCGDRVSPEC;
+unsigned IOCTL_SIOCIFCREATE = SIOCIFCREATE;
+unsigned IOCTL_SIOCIFDESTROY = SIOCIFDESTROY;
+unsigned IOCTL_SIOCIFGCLONERS = SIOCIFGCLONERS;
+unsigned IOCTL_SIOCGIFDLT = SIOCGIFDLT;
+unsigned IOCTL_SIOCGIFCAP = SIOCGIFCAP;
+unsigned IOCTL_SIOCSIFCAP = SIOCSIFCAP;
+unsigned IOCTL_SIOCSVH = SIOCSVH;
+unsigned IOCTL_SIOCGVH = SIOCGVH;
+unsigned IOCTL_SIOCINITIFADDR = SIOCINITIFADDR;
+unsigned IOCTL_SIOCGIFDATA = SIOCGIFDATA;
+unsigned IOCTL_SIOCZIFDATA = SIOCZIFDATA;
+unsigned IOCTL_SIOCGLINKSTR = SIOCGLINKSTR;
+unsigned IOCTL_SIOCSLINKSTR = SIOCSLINKSTR;
+unsigned IOCTL_SIOCGETHERCAP = SIOCGETHERCAP;
+unsigned IOCTL_SIOCGIFINDEX = SIOCGIFINDEX;
+unsigned IOCTL_SIOCSETHERCAP = SIOCSETHERCAP;
+unsigned IOCTL_SIOCSIFDESCR = SIOCSIFDESCR;
+unsigned IOCTL_SIOCGIFDESCR = SIOCGIFDESCR;
+unsigned IOCTL_SIOCGUMBINFO = SIOCGUMBINFO;
+unsigned IOCTL_SIOCSUMBPARAM = SIOCSUMBPARAM;
+unsigned IOCTL_SIOCGUMBPARAM = SIOCGUMBPARAM;
+unsigned IOCTL_SIOCSETPFSYNC = SIOCSETPFSYNC;
+unsigned IOCTL_SIOCGETPFSYNC = SIOCGETPFSYNC;
+unsigned IOCTL_PPS_IOC_CREATE = PPS_IOC_CREATE;
+unsigned IOCTL_PPS_IOC_DESTROY = PPS_IOC_DESTROY;
+unsigned IOCTL_PPS_IOC_SETPARAMS = PPS_IOC_SETPARAMS;
+unsigned IOCTL_PPS_IOC_GETPARAMS = PPS_IOC_GETPARAMS;
+unsigned IOCTL_PPS_IOC_GETCAP = PPS_IOC_GETCAP;
+unsigned IOCTL_PPS_IOC_FETCH = PPS_IOC_FETCH;
+unsigned IOCTL_PPS_IOC_KCBIND = PPS_IOC_KCBIND;
+unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+unsigned IOCTL_TIOCFLUSH = TIOCFLUSH;
+unsigned IOCTL_TIOCGETA = TIOCGETA;
+unsigned IOCTL_TIOCSETA = TIOCSETA;
+unsigned IOCTL_TIOCSETAW = TIOCSETAW;
+unsigned IOCTL_TIOCSETAF = TIOCSETAF;
+unsigned IOCTL_TIOCGETD = TIOCGETD;
+unsigned IOCTL_TIOCSETD = TIOCSETD;
+unsigned IOCTL_TIOCGLINED = TIOCGLINED;
+unsigned IOCTL_TIOCSLINED = TIOCSLINED;
+unsigned IOCTL_TIOCSBRK = TIOCSBRK;
+unsigned IOCTL_TIOCCBRK = TIOCCBRK;
+unsigned IOCTL_TIOCSDTR = TIOCSDTR;
+unsigned IOCTL_TIOCCDTR = TIOCCDTR;
+unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+unsigned IOCTL_TIOCSTI = TIOCSTI;
+unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+unsigned IOCTL_TIOCPKT = TIOCPKT;
+unsigned IOCTL_TIOCSTOP = TIOCSTOP;
+unsigned IOCTL_TIOCSTART = TIOCSTART;
+unsigned IOCTL_TIOCMSET = TIOCMSET;
+unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+unsigned IOCTL_TIOCMGET = TIOCMGET;
+unsigned IOCTL_TIOCREMOTE = TIOCREMOTE;
+unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+unsigned IOCTL_TIOCUCNTL = TIOCUCNTL;
+unsigned IOCTL_TIOCSTAT = TIOCSTAT;
+unsigned IOCTL_TIOCGSID = TIOCGSID;
+unsigned IOCTL_TIOCCONS = TIOCCONS;
+unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+unsigned IOCTL_TIOCEXT = TIOCEXT;
+unsigned IOCTL_TIOCSIG = TIOCSIG;
+unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
+unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
+unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
+unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
+unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
+unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
+unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
+unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
+unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
+unsigned IOCTL_TIOCSQSIZE = TIOCSQSIZE;
+unsigned IOCTL_TIOCGQSIZE = TIOCGQSIZE;
+unsigned IOCTL_VERIEXEC_LOAD = VERIEXEC_LOAD;
+unsigned IOCTL_VERIEXEC_TABLESIZE = VERIEXEC_TABLESIZE;
+unsigned IOCTL_VERIEXEC_DELETE = VERIEXEC_DELETE;
+unsigned IOCTL_VERIEXEC_QUERY = VERIEXEC_QUERY;
+unsigned IOCTL_VERIEXEC_DUMP = VERIEXEC_DUMP;
+unsigned IOCTL_VERIEXEC_FLUSH = VERIEXEC_FLUSH;
+unsigned IOCTL_VIDIOC_QUERYCAP = VIDIOC_QUERYCAP;
+unsigned IOCTL_VIDIOC_RESERVED = VIDIOC_RESERVED;
+unsigned IOCTL_VIDIOC_ENUM_FMT = VIDIOC_ENUM_FMT;
+unsigned IOCTL_VIDIOC_G_FMT = VIDIOC_G_FMT;
+unsigned IOCTL_VIDIOC_S_FMT = VIDIOC_S_FMT;
+unsigned IOCTL_VIDIOC_REQBUFS = VIDIOC_REQBUFS;
+unsigned IOCTL_VIDIOC_QUERYBUF = VIDIOC_QUERYBUF;
+unsigned IOCTL_VIDIOC_G_FBUF = VIDIOC_G_FBUF;
+unsigned IOCTL_VIDIOC_S_FBUF = VIDIOC_S_FBUF;
+unsigned IOCTL_VIDIOC_OVERLAY = VIDIOC_OVERLAY;
+unsigned IOCTL_VIDIOC_QBUF = VIDIOC_QBUF;
+unsigned IOCTL_VIDIOC_DQBUF = VIDIOC_DQBUF;
+unsigned IOCTL_VIDIOC_STREAMON = VIDIOC_STREAMON;
+unsigned IOCTL_VIDIOC_STREAMOFF = VIDIOC_STREAMOFF;
+unsigned IOCTL_VIDIOC_G_PARM = VIDIOC_G_PARM;
+unsigned IOCTL_VIDIOC_S_PARM = VIDIOC_S_PARM;
+unsigned IOCTL_VIDIOC_G_STD = VIDIOC_G_STD;
+unsigned IOCTL_VIDIOC_S_STD = VIDIOC_S_STD;
+unsigned IOCTL_VIDIOC_ENUMSTD = VIDIOC_ENUMSTD;
+unsigned IOCTL_VIDIOC_ENUMINPUT = VIDIOC_ENUMINPUT;
+unsigned IOCTL_VIDIOC_G_CTRL = VIDIOC_G_CTRL;
+unsigned IOCTL_VIDIOC_S_CTRL = VIDIOC_S_CTRL;
+unsigned IOCTL_VIDIOC_G_TUNER = VIDIOC_G_TUNER;
+unsigned IOCTL_VIDIOC_S_TUNER = VIDIOC_S_TUNER;
+unsigned IOCTL_VIDIOC_G_AUDIO = VIDIOC_G_AUDIO;
+unsigned IOCTL_VIDIOC_S_AUDIO = VIDIOC_S_AUDIO;
+unsigned IOCTL_VIDIOC_QUERYCTRL = VIDIOC_QUERYCTRL;
+unsigned IOCTL_VIDIOC_QUERYMENU = VIDIOC_QUERYMENU;
+unsigned IOCTL_VIDIOC_G_INPUT = VIDIOC_G_INPUT;
+unsigned IOCTL_VIDIOC_S_INPUT = VIDIOC_S_INPUT;
+unsigned IOCTL_VIDIOC_G_OUTPUT = VIDIOC_G_OUTPUT;
+unsigned IOCTL_VIDIOC_S_OUTPUT = VIDIOC_S_OUTPUT;
+unsigned IOCTL_VIDIOC_ENUMOUTPUT = VIDIOC_ENUMOUTPUT;
+unsigned IOCTL_VIDIOC_G_AUDOUT = VIDIOC_G_AUDOUT;
+unsigned IOCTL_VIDIOC_S_AUDOUT = VIDIOC_S_AUDOUT;
+unsigned IOCTL_VIDIOC_G_MODULATOR = VIDIOC_G_MODULATOR;
+unsigned IOCTL_VIDIOC_S_MODULATOR = VIDIOC_S_MODULATOR;
+unsigned IOCTL_VIDIOC_G_FREQUENCY = VIDIOC_G_FREQUENCY;
+unsigned IOCTL_VIDIOC_S_FREQUENCY = VIDIOC_S_FREQUENCY;
+unsigned IOCTL_VIDIOC_CROPCAP = VIDIOC_CROPCAP;
+unsigned IOCTL_VIDIOC_G_CROP = VIDIOC_G_CROP;
+unsigned IOCTL_VIDIOC_S_CROP = VIDIOC_S_CROP;
+unsigned IOCTL_VIDIOC_G_JPEGCOMP = VIDIOC_G_JPEGCOMP;
+unsigned IOCTL_VIDIOC_S_JPEGCOMP = VIDIOC_S_JPEGCOMP;
+unsigned IOCTL_VIDIOC_QUERYSTD = VIDIOC_QUERYSTD;
+unsigned IOCTL_VIDIOC_TRY_FMT = VIDIOC_TRY_FMT;
+unsigned IOCTL_VIDIOC_ENUMAUDIO = VIDIOC_ENUMAUDIO;
+unsigned IOCTL_VIDIOC_ENUMAUDOUT = VIDIOC_ENUMAUDOUT;
+unsigned IOCTL_VIDIOC_G_PRIORITY = VIDIOC_G_PRIORITY;
+unsigned IOCTL_VIDIOC_S_PRIORITY = VIDIOC_S_PRIORITY;
+unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES = VIDIOC_ENUM_FRAMESIZES;
+unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS = VIDIOC_ENUM_FRAMEINTERVALS;
+unsigned IOCTL_WDOGIOC_GMODE = WDOGIOC_GMODE;
+unsigned IOCTL_WDOGIOC_SMODE = WDOGIOC_SMODE;
+unsigned IOCTL_WDOGIOC_WHICH = WDOGIOC_WHICH;
+unsigned IOCTL_WDOGIOC_TICKLE = WDOGIOC_TICKLE;
+unsigned IOCTL_WDOGIOC_GTICKLER = WDOGIOC_GTICKLER;
+unsigned IOCTL_WDOGIOC_GWDOGS = WDOGIOC_GWDOGS;
+unsigned IOCTL_KCOV_IOC_SETBUFSIZE = KCOV_IOC_SETBUFSIZE;
+unsigned IOCTL_KCOV_IOC_ENABLE = KCOV_IOC_ENABLE;
+unsigned IOCTL_KCOV_IOC_DISABLE = KCOV_IOC_DISABLE;
+unsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC = IPMICTL_RECEIVE_MSG_TRUNC;
+unsigned IOCTL_IPMICTL_RECEIVE_MSG = IPMICTL_RECEIVE_MSG;
+unsigned IOCTL_IPMICTL_SEND_COMMAND = IPMICTL_SEND_COMMAND;
+unsigned IOCTL_IPMICTL_REGISTER_FOR_CMD = IPMICTL_REGISTER_FOR_CMD;
+unsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD = IPMICTL_UNREGISTER_FOR_CMD;
+unsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD = IPMICTL_SET_GETS_EVENTS_CMD;
+unsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD = IPMICTL_SET_MY_ADDRESS_CMD;
+unsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD = IPMICTL_GET_MY_ADDRESS_CMD;
+unsigned IOCTL_IPMICTL_SET_MY_LUN_CMD = IPMICTL_SET_MY_LUN_CMD;
+unsigned IOCTL_IPMICTL_GET_MY_LUN_CMD = IPMICTL_GET_MY_LUN_CMD;
+unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
+unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;
+unsigned IOCTL_SNDCTL_DSP_CHANNELS = SNDCTL_DSP_CHANNELS;
+unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;
+unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
+unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;
+unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+unsigned IOCTL_SNDCTL_DSP_GETCAPS = SNDCTL_DSP_GETCAPS;
+unsigned IOCTL_SNDCTL_DSP_GETTRIGGER = SNDCTL_DSP_GETTRIGGER;
+unsigned IOCTL_SNDCTL_DSP_SETTRIGGER = SNDCTL_DSP_SETTRIGGER;
+unsigned IOCTL_SNDCTL_DSP_GETIPTR = SNDCTL_DSP_GETIPTR;
+unsigned IOCTL_SNDCTL_DSP_GETOPTR = SNDCTL_DSP_GETOPTR;
+unsigned IOCTL_SNDCTL_DSP_MAPINBUF = SNDCTL_DSP_MAPINBUF;
+unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF = SNDCTL_DSP_MAPOUTBUF;
+unsigned IOCTL_SNDCTL_DSP_SETSYNCRO = SNDCTL_DSP_SETSYNCRO;
+unsigned IOCTL_SNDCTL_DSP_SETDUPLEX = SNDCTL_DSP_SETDUPLEX;
+unsigned IOCTL_SNDCTL_DSP_PROFILE = SNDCTL_DSP_PROFILE;
+unsigned IOCTL_SNDCTL_DSP_GETODELAY = SNDCTL_DSP_GETODELAY;
+unsigned IOCTL_SOUND_MIXER_INFO = SOUND_MIXER_INFO;
+unsigned IOCTL_SOUND_OLD_MIXER_INFO = SOUND_OLD_MIXER_INFO;
+unsigned IOCTL_OSS_GETVERSION = OSS_GETVERSION;
+unsigned IOCTL_SNDCTL_SYSINFO = SNDCTL_SYSINFO;
+unsigned IOCTL_SNDCTL_AUDIOINFO = SNDCTL_AUDIOINFO;
+unsigned IOCTL_SNDCTL_ENGINEINFO = SNDCTL_ENGINEINFO;
+unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL = SNDCTL_DSP_GETPLAYVOL;
+unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL = SNDCTL_DSP_SETPLAYVOL;
+unsigned IOCTL_SNDCTL_DSP_GETRECVOL = SNDCTL_DSP_GETRECVOL;
+unsigned IOCTL_SNDCTL_DSP_SETRECVOL = SNDCTL_DSP_SETRECVOL;
+unsigned IOCTL_SNDCTL_DSP_SKIP = SNDCTL_DSP_SKIP;
+unsigned IOCTL_SNDCTL_DSP_SILENCE = SNDCTL_DSP_SILENCE;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+
+const int modctl_load = MODCTL_LOAD;
+const int modctl_unload = MODCTL_UNLOAD;
+const int modctl_stat = MODCTL_STAT;
+const int modctl_exists = MODCTL_EXISTS;
+
+const unsigned SHA1_CTX_sz = sizeof(SHA1_CTX);
+const unsigned SHA1_return_length = SHA1_DIGEST_STRING_LENGTH;
+
+const unsigned MD4_CTX_sz = sizeof(MD4_CTX);
+const unsigned MD4_return_length = MD4_DIGEST_STRING_LENGTH;
+
+const unsigned RMD160_CTX_sz = sizeof(RMD160_CTX);
+const unsigned RMD160_return_length = RMD160_DIGEST_STRING_LENGTH;
+
+const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
+const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
+
+const unsigned fpos_t_sz = sizeof(fpos_t);
+
+const unsigned MD2_CTX_sz = sizeof(MD2_CTX);
+const unsigned MD2_return_length = MD2_DIGEST_STRING_LENGTH;
+
+#define SHA2_CONST(LEN) \
+ const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
+ const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
+ const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
+ const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
+
+SHA2_CONST(224);
+SHA2_CONST(256);
+SHA2_CONST(384);
+SHA2_CONST(512);
+
+#undef SHA2_CONST
+
+const int unvis_valid = UNVIS_VALID;
+const int unvis_validpush = UNVIS_VALIDPUSH;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _p);
+CHECK_SIZE_AND_OFFSET(FILE, _r);
+CHECK_SIZE_AND_OFFSET(FILE, _w);
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _file);
+CHECK_SIZE_AND_OFFSET(FILE, _bf);
+CHECK_SIZE_AND_OFFSET(FILE, _lbfsize);
+CHECK_SIZE_AND_OFFSET(FILE, _cookie);
+CHECK_SIZE_AND_OFFSET(FILE, _close);
+CHECK_SIZE_AND_OFFSET(FILE, _read);
+CHECK_SIZE_AND_OFFSET(FILE, _seek);
+CHECK_SIZE_AND_OFFSET(FILE, _write);
+CHECK_SIZE_AND_OFFSET(FILE, _ext);
+CHECK_SIZE_AND_OFFSET(FILE, _up);
+CHECK_SIZE_AND_OFFSET(FILE, _ur);
+CHECK_SIZE_AND_OFFSET(FILE, _ubuf);
+CHECK_SIZE_AND_OFFSET(FILE, _nbuf);
+CHECK_SIZE_AND_OFFSET(FILE, _flush);
+CHECK_SIZE_AND_OFFSET(FILE, _lb_unused);
+CHECK_SIZE_AND_OFFSET(FILE, _blksize);
+CHECK_SIZE_AND_OFFSET(FILE, _offset);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+CHECK_TYPE_SIZE(modctl_load_t);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_filename);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_flags);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_props);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_propslen);
+
+// Compat with 9.0
+struct statvfs90 {
+ unsigned long f_flag;
+ unsigned long f_bsize;
+ unsigned long f_frsize;
+ unsigned long f_iosize;
+
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_bresvd;
+
+ u64 f_files;
+ u64 f_ffree;
+ u64 f_favail;
+ u64 f_fresvd;
+
+ u64 f_syncreads;
+ u64 f_syncwrites;
+
+ u64 f_asyncreads;
+ u64 f_asyncwrites;
+
+ struct {
+ s32 __fsid_val[2];
+ } f_fsidx;
+ unsigned long f_fsid;
+ unsigned long f_namemax;
+ u32 f_owner;
+
+ u32 f_spare[4];
+
+ char f_fstypename[32];
+ char f_mntonname[32];
+ char f_mntfromname[32];
+};
+unsigned struct_statvfs90_sz = sizeof(struct statvfs90);
+
+#endif // SANITIZER_NETBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.h b/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.h
new file mode 100644
index 0000000000..d80280d9bf
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -0,0 +1,2422 @@
+//===-- sanitizer_platform_limits_netbsd.h --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_NETBSD_H
+#define SANITIZER_PLATFORM_LIMITS_NETBSD_H
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_stack_t_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+extern unsigned struct_sembuf_sz;
+
+extern unsigned struct_kevent_sz;
+extern unsigned struct_FTS_sz;
+extern unsigned struct_FTSENT_sz;
+
+extern unsigned struct_regex_sz;
+extern unsigned struct_regmatch_sz;
+
+extern unsigned struct_fstab_sz;
+
+struct __sanitizer_regmatch {
+ OFF_T rm_so;
+ OFF_T rm_eo;
+};
+
+typedef struct __sanitizer_modctl_load {
+ const char *ml_filename;
+ int ml_flags;
+ const char *ml_props;
+ uptr ml_propslen;
+} __sanitizer_modctl_load_t;
+extern const int modctl_load;
+extern const int modctl_unload;
+extern const int modctl_stat;
+extern const int modctl_exists;
+
+union __sanitizer_sigval {
+ int sival_int;
+ uptr sival_ptr;
+};
+
+struct __sanitizer_sigevent {
+ int sigev_notify;
+ int sigev_signo;
+ union __sanitizer_sigval sigev_value;
+ uptr sigev_notify_function;
+ uptr sigev_notify_attributes;
+};
+
+struct __sanitizer_aiocb {
+ u64 aio_offset;
+ uptr aio_buf;
+ uptr aio_nbytes;
+ int aio_fildes;
+ int aio_lio_opcode;
+ int aio_reqprio;
+ struct __sanitizer_sigevent aio_sigevent;
+ int _state;
+ int _errno;
+ long _retval;
+};
+
+struct __sanitizer_sem_t {
+ uptr data[5];
+};
+
+struct __sanitizer_ipc_perm {
+ u32 uid;
+ u32 gid;
+ u32 cuid;
+ u32 cgid;
+ u32 mode;
+ unsigned short _seq;
+ long _key;
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ u32 shm_lpid;
+ u32 shm_cpid;
+ unsigned int shm_nattch;
+ u64 shm_atime;
+ u64 shm_dtime;
+ u64 shm_ctime;
+ void *_shm_internal;
+};
+
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ unsigned int ifa_addrflags;
+};
+
+typedef unsigned int __sanitizer_socklen_t;
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+typedef long long __sanitizer_time_t;
+typedef int __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timespec {
+ __sanitizer_time_t tv_sec;
+ long tv_nsec;
+};
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+
+struct __sanitizer_mmsghdr {
+ struct __sanitizer_msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+ u64 d_fileno;
+ u16 d_reclen;
+ // more fields that we don't care about
+};
+
+typedef int __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+typedef u32 __sanitizer___kernel_uid_t;
+typedef u32 __sanitizer___kernel_gid_t;
+typedef u64 __sanitizer___kernel_off_t;
+typedef struct {
+ u32 fds_bits[8];
+} __sanitizer___kernel_fd_set;
+
+typedef struct {
+ unsigned int pta_magic;
+ int pta_flags;
+ void *pta_private;
+} __sanitizer_pthread_attr_t;
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sighandler_ptr handler;
+ __sanitizer_sigactionhandler_ptr sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
+
+extern unsigned struct_sigaltstack_sz;
+
+typedef unsigned int __sanitizer_sigset13_t;
+
+struct __sanitizer_sigaction13 {
+ __sanitizer_sighandler_ptr osa_handler;
+ __sanitizer_sigset13_t osa_mask;
+ int osa_flags;
+};
+
+struct __sanitizer_sigaltstack {
+ void *ss_sp;
+ uptr ss_size;
+ int ss_flags;
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if defined(__sparc__) && defined(_LP64)
+ int __ai_pad0;
+#endif
+ unsigned ai_addrlen;
+#if defined(__alpha__) || (defined(__i386__) && defined(_LP64))
+ int __ai_pad0;
+#endif
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+typedef int __sanitizer_lwpid_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+extern int struct_ttyent_sz;
+
+extern int ptrace_pt_io;
+extern int ptrace_pt_lwpinfo;
+extern int ptrace_pt_set_event_mask;
+extern int ptrace_pt_get_event_mask;
+extern int ptrace_pt_get_process_state;
+extern int ptrace_pt_set_siginfo;
+extern int ptrace_pt_get_siginfo;
+extern int ptrace_pt_lwpstatus;
+extern int ptrace_pt_lwpnext;
+extern int ptrace_piod_read_d;
+extern int ptrace_piod_write_d;
+extern int ptrace_piod_read_i;
+extern int ptrace_piod_write_i;
+extern int ptrace_piod_read_auxv;
+extern int ptrace_pt_setregs;
+extern int ptrace_pt_getregs;
+extern int ptrace_pt_setfpregs;
+extern int ptrace_pt_getfpregs;
+extern int ptrace_pt_setdbregs;
+extern int ptrace_pt_getdbregs;
+
+struct __sanitizer_ptrace_io_desc {
+ int piod_op;
+ void *piod_offs;
+ void *piod_addr;
+ uptr piod_len;
+};
+
+struct __sanitizer_ptrace_lwpinfo {
+ __sanitizer_lwpid_t pl_lwpid;
+ int pl_event;
+};
+
+struct __sanitizer_ptrace_lwpstatus {
+ __sanitizer_lwpid_t pl_lwpid;
+ __sanitizer_sigset_t pl_sigpend;
+ __sanitizer_sigset_t pl_sigmask;
+ char pl_name[20];
+ void *pl_private;
+};
+
+extern unsigned struct_ptrace_ptrace_io_desc_struct_sz;
+extern unsigned struct_ptrace_ptrace_lwpinfo_struct_sz;
+extern unsigned struct_ptrace_ptrace_lwpstatus_struct_sz;
+extern unsigned struct_ptrace_ptrace_event_struct_sz;
+extern unsigned struct_ptrace_ptrace_siginfo_struct_sz;
+
+extern unsigned struct_ptrace_reg_struct_sz;
+extern unsigned struct_ptrace_fpreg_struct_sz;
+extern unsigned struct_ptrace_dbreg_struct_sz;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char *we_strings;
+ uptr we_nbytes;
+};
+
+struct __sanitizer_FILE {
+ unsigned char *_p;
+ int _r;
+ int _w;
+ unsigned short _flags;
+ short _file;
+ struct {
+ unsigned char *_base;
+ int _size;
+ } _bf;
+ int _lbfsize;
+ void *_cookie;
+ int (*_close)(void *ptr);
+ u64 (*_read)(void *, void *, uptr);
+ u64 (*_seek)(void *, u64, int);
+ uptr (*_write)(void *, const void *, uptr);
+ struct {
+ unsigned char *_base;
+ int _size;
+ } _ext;
+ unsigned char *_up;
+ int _ur;
+ unsigned char _ubuf[3];
+ unsigned char _nbuf[1];
+ int (*_flush)(void *ptr);
+ char _lb_unused[sizeof(uptr)];
+ int _blksize;
+ u64 _offset;
+};
+#define SANITIZER_HAS_STRUCT_FILE 1
+
+extern int shmctl_ipc_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+struct __sanitizer_ttyent {
+ char *ty_name;
+ char *ty_getty;
+ char *ty_type;
+ int ty_status;
+ char *ty_window;
+ char *ty_comment;
+ char *ty_class;
+};
+
+extern const unsigned long __sanitizer_bufsiz;
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#undef IOC_DIRMASK
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+
+// ioctl request identifiers
+
+extern unsigned struct_altqreq_sz;
+extern unsigned struct_amr_user_ioctl_sz;
+extern unsigned struct_ap_control_sz;
+extern unsigned struct_apm_ctl_sz;
+extern unsigned struct_apm_event_info_sz;
+extern unsigned struct_apm_power_info_sz;
+extern unsigned struct_atabusiodetach_args_sz;
+extern unsigned struct_atabusioscan_args_sz;
+extern unsigned struct_ath_diag_sz;
+extern unsigned struct_atm_flowmap_sz;
+extern unsigned struct_audio_buf_info_sz;
+extern unsigned struct_audio_device_sz;
+extern unsigned struct_audio_encoding_sz;
+extern unsigned struct_audio_info_sz;
+extern unsigned struct_audio_offset_sz;
+extern unsigned struct_bio_locate_sz;
+extern unsigned struct_bioc_alarm_sz;
+extern unsigned struct_bioc_blink_sz;
+extern unsigned struct_bioc_disk_sz;
+extern unsigned struct_bioc_inq_sz;
+extern unsigned struct_bioc_setstate_sz;
+extern unsigned struct_bioc_vol_sz;
+extern unsigned struct_bioc_volops_sz;
+extern unsigned struct_bktr_chnlset_sz;
+extern unsigned struct_bktr_remote_sz;
+extern unsigned struct_blue_conf_sz;
+extern unsigned struct_blue_interface_sz;
+extern unsigned struct_blue_stats_sz;
+extern unsigned struct_bpf_dltlist_sz;
+extern unsigned struct_bpf_program_sz;
+extern unsigned struct_bpf_stat_old_sz;
+extern unsigned struct_bpf_stat_sz;
+extern unsigned struct_bpf_version_sz;
+extern unsigned struct_btreq_sz;
+extern unsigned struct_btsco_info_sz;
+extern unsigned struct_buffmem_desc_sz;
+extern unsigned struct_cbq_add_class_sz;
+extern unsigned struct_cbq_add_filter_sz;
+extern unsigned struct_cbq_delete_class_sz;
+extern unsigned struct_cbq_delete_filter_sz;
+extern unsigned struct_cbq_getstats_sz;
+extern unsigned struct_cbq_interface_sz;
+extern unsigned struct_cbq_modify_class_sz;
+extern unsigned struct_ccd_ioctl_sz;
+extern unsigned struct_cdnr_add_element_sz;
+extern unsigned struct_cdnr_add_filter_sz;
+extern unsigned struct_cdnr_add_tbmeter_sz;
+extern unsigned struct_cdnr_add_trtcm_sz;
+extern unsigned struct_cdnr_add_tswtcm_sz;
+extern unsigned struct_cdnr_delete_element_sz;
+extern unsigned struct_cdnr_delete_filter_sz;
+extern unsigned struct_cdnr_get_stats_sz;
+extern unsigned struct_cdnr_interface_sz;
+extern unsigned struct_cdnr_modify_tbmeter_sz;
+extern unsigned struct_cdnr_modify_trtcm_sz;
+extern unsigned struct_cdnr_modify_tswtcm_sz;
+extern unsigned struct_cdnr_tbmeter_stats_sz;
+extern unsigned struct_cdnr_tcm_stats_sz;
+extern unsigned struct_cgd_ioctl_sz;
+extern unsigned struct_cgd_user_sz;
+extern unsigned struct_changer_element_status_request_sz;
+extern unsigned struct_changer_exchange_request_sz;
+extern unsigned struct_changer_move_request_sz;
+extern unsigned struct_changer_params_sz;
+extern unsigned struct_changer_position_request_sz;
+extern unsigned struct_changer_set_voltag_request_sz;
+extern unsigned struct_clockctl_adjtime_sz;
+extern unsigned struct_clockctl_clock_settime_sz;
+extern unsigned struct_clockctl_ntp_adjtime_sz;
+extern unsigned struct_clockctl_settimeofday_sz;
+extern unsigned struct_cnwistats_sz;
+extern unsigned struct_cnwitrail_sz;
+extern unsigned struct_cnwstatus_sz;
+extern unsigned struct_count_info_sz;
+extern unsigned struct_cpu_ucode_sz;
+extern unsigned struct_cpu_ucode_version_sz;
+extern unsigned struct_crypt_kop_sz;
+extern unsigned struct_crypt_mkop_sz;
+extern unsigned struct_crypt_mop_sz;
+extern unsigned struct_crypt_op_sz;
+extern unsigned struct_crypt_result_sz;
+extern unsigned struct_crypt_sfop_sz;
+extern unsigned struct_crypt_sgop_sz;
+extern unsigned struct_cryptret_sz;
+extern unsigned struct_devdetachargs_sz;
+extern unsigned struct_devlistargs_sz;
+extern unsigned struct_devpmargs_sz;
+extern unsigned struct_devrescanargs_sz;
+extern unsigned struct_disk_badsecinfo_sz;
+extern unsigned struct_disk_strategy_sz;
+extern unsigned struct_disklabel_sz;
+extern unsigned struct_dkbad_sz;
+extern unsigned struct_dkwedge_info_sz;
+extern unsigned struct_dkwedge_list_sz;
+extern unsigned struct_dmio_setfunc_sz;
+extern unsigned struct_dmx_pes_filter_params_sz;
+extern unsigned struct_dmx_sct_filter_params_sz;
+extern unsigned struct_dmx_stc_sz;
+extern unsigned struct_dvb_diseqc_master_cmd_sz;
+extern unsigned struct_dvb_diseqc_slave_reply_sz;
+extern unsigned struct_dvb_frontend_event_sz;
+extern unsigned struct_dvb_frontend_info_sz;
+extern unsigned struct_dvb_frontend_parameters_sz;
+extern unsigned struct_eccapreq_sz;
+extern unsigned struct_fbcmap_sz;
+extern unsigned struct_fbcurpos_sz;
+extern unsigned struct_fbcursor_sz;
+extern unsigned struct_fbgattr_sz;
+extern unsigned struct_fbsattr_sz;
+extern unsigned struct_fbtype_sz;
+extern unsigned struct_fdformat_cmd_sz;
+extern unsigned struct_fdformat_parms_sz;
+extern unsigned struct_fifoq_conf_sz;
+extern unsigned struct_fifoq_getstats_sz;
+extern unsigned struct_fifoq_interface_sz;
+extern unsigned struct_format_op_sz;
+extern unsigned struct_fss_get_sz;
+extern unsigned struct_fss_set_sz;
+extern unsigned struct_gpio_attach_sz;
+extern unsigned struct_gpio_info_sz;
+extern unsigned struct_gpio_req_sz;
+extern unsigned struct_gpio_set_sz;
+extern unsigned struct_hfsc_add_class_sz;
+extern unsigned struct_hfsc_add_filter_sz;
+extern unsigned struct_hfsc_attach_sz;
+extern unsigned struct_hfsc_class_stats_sz;
+extern unsigned struct_hfsc_delete_class_sz;
+extern unsigned struct_hfsc_delete_filter_sz;
+extern unsigned struct_hfsc_interface_sz;
+extern unsigned struct_hfsc_modify_class_sz;
+extern unsigned struct_hpcfb_dsp_op_sz;
+extern unsigned struct_hpcfb_dspconf_sz;
+extern unsigned struct_hpcfb_fbconf_sz;
+extern unsigned struct_if_addrprefreq_sz;
+extern unsigned struct_if_clonereq_sz;
+extern unsigned struct_if_laddrreq_sz;
+extern unsigned struct_ifaddr_sz;
+extern unsigned struct_ifaliasreq_sz;
+extern unsigned struct_ifcapreq_sz;
+extern unsigned struct_ifconf_sz;
+extern unsigned struct_ifdatareq_sz;
+extern unsigned struct_ifdrv_sz;
+extern unsigned struct_ifmediareq_sz;
+extern unsigned struct_ifpppcstatsreq_sz;
+extern unsigned struct_ifpppstatsreq_sz;
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_in6_addrpolicy_sz;
+extern unsigned struct_in6_ndireq_sz;
+extern unsigned struct_ioc_load_unload_sz;
+extern unsigned struct_ioc_patch_sz;
+extern unsigned struct_ioc_play_blocks_sz;
+extern unsigned struct_ioc_play_msf_sz;
+extern unsigned struct_ioc_play_track_sz;
+extern unsigned struct_ioc_read_subchannel_sz;
+extern unsigned struct_ioc_read_toc_entry_sz;
+extern unsigned struct_ioc_toc_header_sz;
+extern unsigned struct_ioc_vol_sz;
+extern unsigned struct_ioctl_pt_sz;
+extern unsigned struct_ioppt_sz;
+extern unsigned struct_iovec_sz;
+extern unsigned struct_ipfobj_sz;
+extern unsigned struct_irda_params_sz;
+extern unsigned struct_isp_fc_device_sz;
+extern unsigned struct_isp_fc_tsk_mgmt_sz;
+extern unsigned struct_isp_hba_device_sz;
+extern unsigned struct_isv_cmd_sz;
+extern unsigned struct_jobs_add_class_sz;
+extern unsigned struct_jobs_add_filter_sz;
+extern unsigned struct_jobs_attach_sz;
+extern unsigned struct_jobs_class_stats_sz;
+extern unsigned struct_jobs_delete_class_sz;
+extern unsigned struct_jobs_delete_filter_sz;
+extern unsigned struct_jobs_interface_sz;
+extern unsigned struct_jobs_modify_class_sz;
+extern unsigned struct_kbentry_sz;
+extern unsigned struct_kfilter_mapping_sz;
+extern unsigned struct_kiockeymap_sz;
+extern unsigned struct_ksyms_gsymbol_sz;
+extern unsigned struct_ksyms_gvalue_sz;
+extern unsigned struct_ksyms_ogsymbol_sz;
+extern unsigned struct_kttcp_io_args_sz;
+extern unsigned struct_ltchars_sz;
+extern unsigned struct_lua_create_sz;
+extern unsigned struct_lua_info_sz;
+extern unsigned struct_lua_load_sz;
+extern unsigned struct_lua_require_sz;
+extern unsigned struct_mbpp_param_sz;
+extern unsigned struct_md_conf_sz;
+extern unsigned struct_meteor_capframe_sz;
+extern unsigned struct_meteor_counts_sz;
+extern unsigned struct_meteor_geomet_sz;
+extern unsigned struct_meteor_pixfmt_sz;
+extern unsigned struct_meteor_video_sz;
+extern unsigned struct_mlx_cinfo_sz;
+extern unsigned struct_mlx_pause_sz;
+extern unsigned struct_mlx_rebuild_request_sz;
+extern unsigned struct_mlx_rebuild_status_sz;
+extern unsigned struct_mlx_usercommand_sz;
+extern unsigned struct_mly_user_command_sz;
+extern unsigned struct_mly_user_health_sz;
+extern unsigned struct_mtget_sz;
+extern unsigned struct_mtop_sz;
+extern unsigned struct_npf_ioctl_table_sz;
+extern unsigned struct_npioctl_sz;
+extern unsigned struct_nvme_pt_command_sz;
+extern unsigned struct_ochanger_element_status_request_sz;
+extern unsigned struct_ofiocdesc_sz;
+extern unsigned struct_okiockey_sz;
+extern unsigned struct_ortentry_sz;
+extern unsigned struct_oscsi_addr_sz;
+extern unsigned struct_oss_audioinfo_sz;
+extern unsigned struct_oss_sysinfo_sz;
+extern unsigned struct_pciio_bdf_cfgreg_sz;
+extern unsigned struct_pciio_businfo_sz;
+extern unsigned struct_pciio_cfgreg_sz;
+extern unsigned struct_pciio_drvname_sz;
+extern unsigned struct_pciio_drvnameonbus_sz;
+extern unsigned struct_pcvtid_sz;
+extern unsigned struct_pf_osfp_ioctl_sz;
+extern unsigned struct_pf_status_sz;
+extern unsigned struct_pfioc_altq_sz;
+extern unsigned struct_pfioc_if_sz;
+extern unsigned struct_pfioc_iface_sz;
+extern unsigned struct_pfioc_limit_sz;
+extern unsigned struct_pfioc_natlook_sz;
+extern unsigned struct_pfioc_pooladdr_sz;
+extern unsigned struct_pfioc_qstats_sz;
+extern unsigned struct_pfioc_rule_sz;
+extern unsigned struct_pfioc_ruleset_sz;
+extern unsigned struct_pfioc_src_node_kill_sz;
+extern unsigned struct_pfioc_src_nodes_sz;
+extern unsigned struct_pfioc_state_kill_sz;
+extern unsigned struct_pfioc_state_sz;
+extern unsigned struct_pfioc_states_sz;
+extern unsigned struct_pfioc_table_sz;
+extern unsigned struct_pfioc_tm_sz;
+extern unsigned struct_pfioc_trans_sz;
+extern unsigned struct_plistref_sz;
+extern unsigned struct_power_type_sz;
+extern unsigned struct_ppp_idle_sz;
+extern unsigned struct_ppp_option_data_sz;
+extern unsigned struct_ppp_rawin_sz;
+extern unsigned struct_pppoeconnectionstate_sz;
+extern unsigned struct_pppoediscparms_sz;
+extern unsigned struct_priq_add_class_sz;
+extern unsigned struct_priq_add_filter_sz;
+extern unsigned struct_priq_class_stats_sz;
+extern unsigned struct_priq_delete_class_sz;
+extern unsigned struct_priq_delete_filter_sz;
+extern unsigned struct_priq_interface_sz;
+extern unsigned struct_priq_modify_class_sz;
+extern unsigned struct_ptmget_sz;
+extern unsigned struct_pvctxreq_sz;
+extern unsigned struct_radio_info_sz;
+extern unsigned struct_red_conf_sz;
+extern unsigned struct_red_interface_sz;
+extern unsigned struct_red_stats_sz;
+extern unsigned struct_redparams_sz;
+extern unsigned struct_rf_pmparams_sz;
+extern unsigned struct_rf_pmstat_sz;
+extern unsigned struct_rf_recon_req_sz;
+extern unsigned struct_rio_conf_sz;
+extern unsigned struct_rio_interface_sz;
+extern unsigned struct_rio_stats_sz;
+extern unsigned struct_scan_io_sz;
+extern unsigned struct_scbusaccel_args_sz;
+extern unsigned struct_scbusiodetach_args_sz;
+extern unsigned struct_scbusioscan_args_sz;
+extern unsigned struct_scsi_addr_sz;
+extern unsigned struct_seq_event_rec_sz;
+extern unsigned struct_session_op_sz;
+extern unsigned struct_sgttyb_sz;
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+extern unsigned struct_smbioc_flags_sz;
+extern unsigned struct_smbioc_lookup_sz;
+extern unsigned struct_smbioc_oshare_sz;
+extern unsigned struct_smbioc_ossn_sz;
+extern unsigned struct_smbioc_rq_sz;
+extern unsigned struct_smbioc_rw_sz;
+extern unsigned struct_spppauthcfg_sz;
+extern unsigned struct_spppauthfailuresettings_sz;
+extern unsigned struct_spppauthfailurestats_sz;
+extern unsigned struct_spppdnsaddrs_sz;
+extern unsigned struct_spppdnssettings_sz;
+extern unsigned struct_spppidletimeout_sz;
+extern unsigned struct_spppkeepalivesettings_sz;
+extern unsigned struct_sppplcpcfg_sz;
+extern unsigned struct_spppstatus_sz;
+extern unsigned struct_spppstatusncp_sz;
+extern unsigned struct_srt_rt_sz;
+extern unsigned struct_stic_xinfo_sz;
+extern unsigned struct_sun_dkctlr_sz;
+extern unsigned struct_sun_dkgeom_sz;
+extern unsigned struct_sun_dkpart_sz;
+extern unsigned struct_synth_info_sz;
+extern unsigned struct_tbrreq_sz;
+extern unsigned struct_tchars_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_timeval_sz;
+extern unsigned struct_twe_drivecommand_sz;
+extern unsigned struct_twe_paramcommand_sz;
+extern unsigned struct_twe_usercommand_sz;
+extern unsigned struct_ukyopon_identify_sz;
+extern unsigned struct_urio_command_sz;
+extern unsigned struct_usb_alt_interface_sz;
+extern unsigned struct_usb_bulk_ra_wb_opt_sz;
+extern unsigned struct_usb_config_desc_sz;
+extern unsigned struct_usb_ctl_report_desc_sz;
+extern unsigned struct_usb_ctl_report_sz;
+extern unsigned struct_usb_ctl_request_sz;
+#if defined(__x86_64__)
+extern unsigned struct_nvmm_ioc_capability_sz;
+extern unsigned struct_nvmm_ioc_machine_create_sz;
+extern unsigned struct_nvmm_ioc_machine_destroy_sz;
+extern unsigned struct_nvmm_ioc_machine_configure_sz;
+extern unsigned struct_nvmm_ioc_vcpu_create_sz;
+extern unsigned struct_nvmm_ioc_vcpu_destroy_sz;
+extern unsigned struct_nvmm_ioc_vcpu_configure_sz;
+extern unsigned struct_nvmm_ioc_vcpu_setstate_sz;
+extern unsigned struct_nvmm_ioc_vcpu_getstate_sz;
+extern unsigned struct_nvmm_ioc_vcpu_inject_sz;
+extern unsigned struct_nvmm_ioc_vcpu_run_sz;
+extern unsigned struct_nvmm_ioc_gpa_map_sz;
+extern unsigned struct_nvmm_ioc_gpa_unmap_sz;
+extern unsigned struct_nvmm_ioc_hva_map_sz;
+extern unsigned struct_nvmm_ioc_hva_unmap_sz;
+extern unsigned struct_nvmm_ioc_ctl_sz;
+#endif
+extern unsigned struct_spi_ioctl_configure_sz;
+extern unsigned struct_spi_ioctl_transfer_sz;
+extern unsigned struct_autofs_daemon_request_sz;
+extern unsigned struct_autofs_daemon_done_sz;
+extern unsigned struct_sctp_connectx_addrs_sz;
+extern unsigned struct_usb_device_info_old_sz;
+extern unsigned struct_usb_device_info_sz;
+extern unsigned struct_usb_device_stats_sz;
+extern unsigned struct_usb_endpoint_desc_sz;
+extern unsigned struct_usb_full_desc_sz;
+extern unsigned struct_usb_interface_desc_sz;
+extern unsigned struct_usb_string_desc_sz;
+extern unsigned struct_utoppy_readfile_sz;
+extern unsigned struct_utoppy_rename_sz;
+extern unsigned struct_utoppy_stats_sz;
+extern unsigned struct_utoppy_writefile_sz;
+extern unsigned struct_v4l2_audio_sz;
+extern unsigned struct_v4l2_audioout_sz;
+extern unsigned struct_v4l2_buffer_sz;
+extern unsigned struct_v4l2_capability_sz;
+extern unsigned struct_v4l2_control_sz;
+extern unsigned struct_v4l2_crop_sz;
+extern unsigned struct_v4l2_cropcap_sz;
+extern unsigned struct_v4l2_fmtdesc_sz;
+extern unsigned struct_v4l2_format_sz;
+extern unsigned struct_v4l2_framebuffer_sz;
+extern unsigned struct_v4l2_frequency_sz;
+extern unsigned struct_v4l2_frmivalenum_sz;
+extern unsigned struct_v4l2_frmsizeenum_sz;
+extern unsigned struct_v4l2_input_sz;
+extern unsigned struct_v4l2_jpegcompression_sz;
+extern unsigned struct_v4l2_modulator_sz;
+extern unsigned struct_v4l2_output_sz;
+extern unsigned struct_v4l2_queryctrl_sz;
+extern unsigned struct_v4l2_querymenu_sz;
+extern unsigned struct_v4l2_requestbuffers_sz;
+extern unsigned struct_v4l2_standard_sz;
+extern unsigned struct_v4l2_streamparm_sz;
+extern unsigned struct_v4l2_tuner_sz;
+extern unsigned struct_vnd_ioctl_sz;
+extern unsigned struct_vnd_user_sz;
+extern unsigned struct_vt_stat_sz;
+extern unsigned struct_wdog_conf_sz;
+extern unsigned struct_wdog_mode_sz;
+extern unsigned struct_ipmi_recv_sz;
+extern unsigned struct_ipmi_req_sz;
+extern unsigned struct_ipmi_cmdspec_sz;
+extern unsigned struct_wfq_conf_sz;
+extern unsigned struct_wfq_getqid_sz;
+extern unsigned struct_wfq_getstats_sz;
+extern unsigned struct_wfq_interface_sz;
+extern unsigned struct_wfq_setweight_sz;
+extern unsigned struct_winsize_sz;
+extern unsigned struct_wscons_event_sz;
+extern unsigned struct_wsdisplay_addscreendata_sz;
+extern unsigned struct_wsdisplay_char_sz;
+extern unsigned struct_wsdisplay_cmap_sz;
+extern unsigned struct_wsdisplay_curpos_sz;
+extern unsigned struct_wsdisplay_cursor_sz;
+extern unsigned struct_wsdisplay_delscreendata_sz;
+extern unsigned struct_wsdisplay_fbinfo_sz;
+extern unsigned struct_wsdisplay_font_sz;
+extern unsigned struct_wsdisplay_kbddata_sz;
+extern unsigned struct_wsdisplay_msgattrs_sz;
+extern unsigned struct_wsdisplay_param_sz;
+extern unsigned struct_wsdisplay_scroll_data_sz;
+extern unsigned struct_wsdisplay_usefontdata_sz;
+extern unsigned struct_wsdisplayio_blit_sz;
+extern unsigned struct_wsdisplayio_bus_id_sz;
+extern unsigned struct_wsdisplayio_edid_info_sz;
+extern unsigned struct_wsdisplayio_fbinfo_sz;
+extern unsigned struct_wskbd_bell_data_sz;
+extern unsigned struct_wskbd_keyrepeat_data_sz;
+extern unsigned struct_wskbd_map_data_sz;
+extern unsigned struct_wskbd_scroll_data_sz;
+extern unsigned struct_wsmouse_calibcoords_sz;
+extern unsigned struct_wsmouse_id_sz;
+extern unsigned struct_wsmouse_repeat_sz;
+extern unsigned struct_wsmux_device_list_sz;
+extern unsigned struct_wsmux_device_sz;
+extern unsigned struct_xd_iocmd_sz;
+
+extern unsigned struct_scsireq_sz;
+extern unsigned struct_tone_sz;
+extern unsigned union_twe_statrequest_sz;
+extern unsigned struct_usb_device_descriptor_sz;
+extern unsigned struct_vt_mode_sz;
+extern unsigned struct__old_mixer_info_sz;
+extern unsigned struct__agp_allocate_sz;
+extern unsigned struct__agp_bind_sz;
+extern unsigned struct__agp_info_sz;
+extern unsigned struct__agp_setup_sz;
+extern unsigned struct__agp_unbind_sz;
+extern unsigned struct_atareq_sz;
+extern unsigned struct_cpustate_sz;
+extern unsigned struct_dmx_caps_sz;
+extern unsigned enum_dmx_source_sz;
+extern unsigned union_dvd_authinfo_sz;
+extern unsigned union_dvd_struct_sz;
+extern unsigned enum_v4l2_priority_sz;
+extern unsigned struct_envsys_basic_info_sz;
+extern unsigned struct_envsys_tre_data_sz;
+extern unsigned enum_fe_sec_mini_cmd_sz;
+extern unsigned enum_fe_sec_tone_mode_sz;
+extern unsigned enum_fe_sec_voltage_sz;
+extern unsigned enum_fe_status_sz;
+extern unsigned struct_gdt_ctrt_sz;
+extern unsigned struct_gdt_event_sz;
+extern unsigned struct_gdt_osv_sz;
+extern unsigned struct_gdt_rescan_sz;
+extern unsigned struct_gdt_statist_sz;
+extern unsigned struct_gdt_ucmd_sz;
+extern unsigned struct_iscsi_conn_status_parameters_sz;
+extern unsigned struct_iscsi_get_version_parameters_sz;
+extern unsigned struct_iscsi_iocommand_parameters_sz;
+extern unsigned struct_iscsi_login_parameters_sz;
+extern unsigned struct_iscsi_logout_parameters_sz;
+extern unsigned struct_iscsi_register_event_parameters_sz;
+extern unsigned struct_iscsi_remove_parameters_sz;
+extern unsigned struct_iscsi_send_targets_parameters_sz;
+extern unsigned struct_iscsi_set_node_name_parameters_sz;
+extern unsigned struct_iscsi_wait_event_parameters_sz;
+extern unsigned struct_isp_stats_sz;
+extern unsigned struct_lsenable_sz;
+extern unsigned struct_lsdisable_sz;
+extern unsigned struct_audio_format_query_sz;
+extern unsigned struct_mixer_ctrl_sz;
+extern unsigned struct_mixer_devinfo_sz;
+extern unsigned struct_mpu_command_rec_sz;
+extern unsigned struct_rndstat_sz;
+extern unsigned struct_rndstat_name_sz;
+extern unsigned struct_rndctl_sz;
+extern unsigned struct_rnddata_sz;
+extern unsigned struct_rndpoolstat_sz;
+extern unsigned struct_rndstat_est_sz;
+extern unsigned struct_rndstat_est_name_sz;
+extern unsigned struct_pps_params_sz;
+extern unsigned struct_pps_info_sz;
+extern unsigned struct_mixer_info_sz;
+extern unsigned struct_RF_SparetWait_sz;
+extern unsigned struct_RF_ComponentLabel_sz;
+extern unsigned struct_RF_SingleComponent_sz;
+extern unsigned struct_RF_ProgressInfo_sz;
+extern unsigned struct_nvlist_ref_sz;
+extern unsigned struct_StringList_sz;
+
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+
+extern unsigned IOCTL_AFM_ADDFMAP;
+extern unsigned IOCTL_AFM_DELFMAP;
+extern unsigned IOCTL_AFM_CLEANFMAP;
+extern unsigned IOCTL_AFM_GETFMAP;
+extern unsigned IOCTL_ALTQGTYPE;
+extern unsigned IOCTL_ALTQTBRSET;
+extern unsigned IOCTL_ALTQTBRGET;
+extern unsigned IOCTL_BLUE_IF_ATTACH;
+extern unsigned IOCTL_BLUE_IF_DETACH;
+extern unsigned IOCTL_BLUE_ENABLE;
+extern unsigned IOCTL_BLUE_DISABLE;
+extern unsigned IOCTL_BLUE_CONFIG;
+extern unsigned IOCTL_BLUE_GETSTATS;
+extern unsigned IOCTL_CBQ_IF_ATTACH;
+extern unsigned IOCTL_CBQ_IF_DETACH;
+extern unsigned IOCTL_CBQ_ENABLE;
+extern unsigned IOCTL_CBQ_DISABLE;
+extern unsigned IOCTL_CBQ_CLEAR_HIERARCHY;
+extern unsigned IOCTL_CBQ_ADD_CLASS;
+extern unsigned IOCTL_CBQ_DEL_CLASS;
+extern unsigned IOCTL_CBQ_MODIFY_CLASS;
+extern unsigned IOCTL_CBQ_ADD_FILTER;
+extern unsigned IOCTL_CBQ_DEL_FILTER;
+extern unsigned IOCTL_CBQ_GETSTATS;
+extern unsigned IOCTL_CDNR_IF_ATTACH;
+extern unsigned IOCTL_CDNR_IF_DETACH;
+extern unsigned IOCTL_CDNR_ENABLE;
+extern unsigned IOCTL_CDNR_DISABLE;
+extern unsigned IOCTL_CDNR_ADD_FILTER;
+extern unsigned IOCTL_CDNR_DEL_FILTER;
+extern unsigned IOCTL_CDNR_GETSTATS;
+extern unsigned IOCTL_CDNR_ADD_ELEM;
+extern unsigned IOCTL_CDNR_DEL_ELEM;
+extern unsigned IOCTL_CDNR_ADD_TBM;
+extern unsigned IOCTL_CDNR_MOD_TBM;
+extern unsigned IOCTL_CDNR_TBM_STATS;
+extern unsigned IOCTL_CDNR_ADD_TCM;
+extern unsigned IOCTL_CDNR_MOD_TCM;
+extern unsigned IOCTL_CDNR_TCM_STATS;
+extern unsigned IOCTL_CDNR_ADD_TSW;
+extern unsigned IOCTL_CDNR_MOD_TSW;
+extern unsigned IOCTL_FIFOQ_IF_ATTACH;
+extern unsigned IOCTL_FIFOQ_IF_DETACH;
+extern unsigned IOCTL_FIFOQ_ENABLE;
+extern unsigned IOCTL_FIFOQ_DISABLE;
+extern unsigned IOCTL_FIFOQ_CONFIG;
+extern unsigned IOCTL_FIFOQ_GETSTATS;
+extern unsigned IOCTL_HFSC_IF_ATTACH;
+extern unsigned IOCTL_HFSC_IF_DETACH;
+extern unsigned IOCTL_HFSC_ENABLE;
+extern unsigned IOCTL_HFSC_DISABLE;
+extern unsigned IOCTL_HFSC_CLEAR_HIERARCHY;
+extern unsigned IOCTL_HFSC_ADD_CLASS;
+extern unsigned IOCTL_HFSC_DEL_CLASS;
+extern unsigned IOCTL_HFSC_MOD_CLASS;
+extern unsigned IOCTL_HFSC_ADD_FILTER;
+extern unsigned IOCTL_HFSC_DEL_FILTER;
+extern unsigned IOCTL_HFSC_GETSTATS;
+extern unsigned IOCTL_JOBS_IF_ATTACH;
+extern unsigned IOCTL_JOBS_IF_DETACH;
+extern unsigned IOCTL_JOBS_ENABLE;
+extern unsigned IOCTL_JOBS_DISABLE;
+extern unsigned IOCTL_JOBS_CLEAR;
+extern unsigned IOCTL_JOBS_ADD_CLASS;
+extern unsigned IOCTL_JOBS_DEL_CLASS;
+extern unsigned IOCTL_JOBS_MOD_CLASS;
+extern unsigned IOCTL_JOBS_ADD_FILTER;
+extern unsigned IOCTL_JOBS_DEL_FILTER;
+extern unsigned IOCTL_JOBS_GETSTATS;
+extern unsigned IOCTL_PRIQ_IF_ATTACH;
+extern unsigned IOCTL_PRIQ_IF_DETACH;
+extern unsigned IOCTL_PRIQ_ENABLE;
+extern unsigned IOCTL_PRIQ_DISABLE;
+extern unsigned IOCTL_PRIQ_CLEAR;
+extern unsigned IOCTL_PRIQ_ADD_CLASS;
+extern unsigned IOCTL_PRIQ_DEL_CLASS;
+extern unsigned IOCTL_PRIQ_MOD_CLASS;
+extern unsigned IOCTL_PRIQ_ADD_FILTER;
+extern unsigned IOCTL_PRIQ_DEL_FILTER;
+extern unsigned IOCTL_PRIQ_GETSTATS;
+extern unsigned IOCTL_RED_IF_ATTACH;
+extern unsigned IOCTL_RED_IF_DETACH;
+extern unsigned IOCTL_RED_ENABLE;
+extern unsigned IOCTL_RED_DISABLE;
+extern unsigned IOCTL_RED_CONFIG;
+extern unsigned IOCTL_RED_GETSTATS;
+extern unsigned IOCTL_RED_SETDEFAULTS;
+extern unsigned IOCTL_RIO_IF_ATTACH;
+extern unsigned IOCTL_RIO_IF_DETACH;
+extern unsigned IOCTL_RIO_ENABLE;
+extern unsigned IOCTL_RIO_DISABLE;
+extern unsigned IOCTL_RIO_CONFIG;
+extern unsigned IOCTL_RIO_GETSTATS;
+extern unsigned IOCTL_RIO_SETDEFAULTS;
+extern unsigned IOCTL_WFQ_IF_ATTACH;
+extern unsigned IOCTL_WFQ_IF_DETACH;
+extern unsigned IOCTL_WFQ_ENABLE;
+extern unsigned IOCTL_WFQ_DISABLE;
+extern unsigned IOCTL_WFQ_CONFIG;
+extern unsigned IOCTL_WFQ_GET_STATS;
+extern unsigned IOCTL_WFQ_GET_QID;
+extern unsigned IOCTL_WFQ_SET_WEIGHT;
+extern unsigned IOCTL_CRIOGET;
+extern unsigned IOCTL_CIOCFSESSION;
+extern unsigned IOCTL_CIOCKEY;
+extern unsigned IOCTL_CIOCNFKEYM;
+extern unsigned IOCTL_CIOCNFSESSION;
+extern unsigned IOCTL_CIOCNCRYPTRETM;
+extern unsigned IOCTL_CIOCNCRYPTRET;
+extern unsigned IOCTL_CIOCGSESSION;
+extern unsigned IOCTL_CIOCNGSESSION;
+extern unsigned IOCTL_CIOCCRYPT;
+extern unsigned IOCTL_CIOCNCRYPTM;
+extern unsigned IOCTL_CIOCASYMFEAT;
+extern unsigned IOCTL_APM_IOC_REJECT;
+extern unsigned IOCTL_APM_IOC_STANDBY;
+extern unsigned IOCTL_APM_IOC_SUSPEND;
+extern unsigned IOCTL_OAPM_IOC_GETPOWER;
+extern unsigned IOCTL_APM_IOC_GETPOWER;
+extern unsigned IOCTL_APM_IOC_NEXTEVENT;
+extern unsigned IOCTL_APM_IOC_DEV_CTL;
+extern unsigned IOCTL_NETBSD_DM_IOCTL;
+extern unsigned IOCTL_DMIO_SETFUNC;
+extern unsigned IOCTL_DMX_START;
+extern unsigned IOCTL_DMX_STOP;
+extern unsigned IOCTL_DMX_SET_FILTER;
+extern unsigned IOCTL_DMX_SET_PES_FILTER;
+extern unsigned IOCTL_DMX_SET_BUFFER_SIZE;
+extern unsigned IOCTL_DMX_GET_STC;
+extern unsigned IOCTL_DMX_ADD_PID;
+extern unsigned IOCTL_DMX_REMOVE_PID;
+extern unsigned IOCTL_DMX_GET_CAPS;
+extern unsigned IOCTL_DMX_SET_SOURCE;
+extern unsigned IOCTL_FE_READ_STATUS;
+extern unsigned IOCTL_FE_READ_BER;
+extern unsigned IOCTL_FE_READ_SNR;
+extern unsigned IOCTL_FE_READ_SIGNAL_STRENGTH;
+extern unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS;
+extern unsigned IOCTL_FE_SET_FRONTEND;
+extern unsigned IOCTL_FE_GET_FRONTEND;
+extern unsigned IOCTL_FE_GET_EVENT;
+extern unsigned IOCTL_FE_GET_INFO;
+extern unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD;
+extern unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD;
+extern unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY;
+extern unsigned IOCTL_FE_DISEQC_SEND_BURST;
+extern unsigned IOCTL_FE_SET_TONE;
+extern unsigned IOCTL_FE_SET_VOLTAGE;
+extern unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE;
+extern unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE;
+extern unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD;
+extern unsigned IOCTL_FILEMON_SET_FD;
+extern unsigned IOCTL_FILEMON_SET_PID;
+extern unsigned IOCTL_HDAUDIO_FGRP_INFO;
+extern unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG;
+extern unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG;
+extern unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO;
+extern unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO;
+extern unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO;
+extern unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO;
+extern unsigned IOCTL_CEC_GET_PHYS_ADDR;
+extern unsigned IOCTL_CEC_GET_LOG_ADDRS;
+extern unsigned IOCTL_CEC_SET_LOG_ADDRS;
+extern unsigned IOCTL_CEC_GET_VENDOR_ID;
+extern unsigned IOCTL_HPCFBIO_GCONF;
+extern unsigned IOCTL_HPCFBIO_SCONF;
+extern unsigned IOCTL_HPCFBIO_GDSPCONF;
+extern unsigned IOCTL_HPCFBIO_SDSPCONF;
+extern unsigned IOCTL_HPCFBIO_GOP;
+extern unsigned IOCTL_HPCFBIO_SOP;
+extern unsigned IOCTL_IOPIOCPT;
+extern unsigned IOCTL_IOPIOCGLCT;
+extern unsigned IOCTL_IOPIOCGSTATUS;
+extern unsigned IOCTL_IOPIOCRECONFIG;
+extern unsigned IOCTL_IOPIOCGTIDMAP;
+extern unsigned IOCTL_SIOCGATHSTATS;
+extern unsigned IOCTL_SIOCGATHDIAG;
+extern unsigned IOCTL_METEORCAPTUR;
+extern unsigned IOCTL_METEORCAPFRM;
+extern unsigned IOCTL_METEORSETGEO;
+extern unsigned IOCTL_METEORGETGEO;
+extern unsigned IOCTL_METEORSTATUS;
+extern unsigned IOCTL_METEORSHUE;
+extern unsigned IOCTL_METEORGHUE;
+extern unsigned IOCTL_METEORSFMT;
+extern unsigned IOCTL_METEORGFMT;
+extern unsigned IOCTL_METEORSINPUT;
+extern unsigned IOCTL_METEORGINPUT;
+extern unsigned IOCTL_METEORSCHCV;
+extern unsigned IOCTL_METEORGCHCV;
+extern unsigned IOCTL_METEORSCOUNT;
+extern unsigned IOCTL_METEORGCOUNT;
+extern unsigned IOCTL_METEORSFPS;
+extern unsigned IOCTL_METEORGFPS;
+extern unsigned IOCTL_METEORSSIGNAL;
+extern unsigned IOCTL_METEORGSIGNAL;
+extern unsigned IOCTL_METEORSVIDEO;
+extern unsigned IOCTL_METEORGVIDEO;
+extern unsigned IOCTL_METEORSBRIG;
+extern unsigned IOCTL_METEORGBRIG;
+extern unsigned IOCTL_METEORSCSAT;
+extern unsigned IOCTL_METEORGCSAT;
+extern unsigned IOCTL_METEORSCONT;
+extern unsigned IOCTL_METEORGCONT;
+extern unsigned IOCTL_METEORSHWS;
+extern unsigned IOCTL_METEORGHWS;
+extern unsigned IOCTL_METEORSVWS;
+extern unsigned IOCTL_METEORGVWS;
+extern unsigned IOCTL_METEORSTS;
+extern unsigned IOCTL_METEORGTS;
+extern unsigned IOCTL_TVTUNER_SETCHNL;
+extern unsigned IOCTL_TVTUNER_GETCHNL;
+extern unsigned IOCTL_TVTUNER_SETTYPE;
+extern unsigned IOCTL_TVTUNER_GETTYPE;
+extern unsigned IOCTL_TVTUNER_GETSTATUS;
+extern unsigned IOCTL_TVTUNER_SETFREQ;
+extern unsigned IOCTL_TVTUNER_GETFREQ;
+extern unsigned IOCTL_TVTUNER_SETAFC;
+extern unsigned IOCTL_TVTUNER_GETAFC;
+extern unsigned IOCTL_RADIO_SETMODE;
+extern unsigned IOCTL_RADIO_GETMODE;
+extern unsigned IOCTL_RADIO_SETFREQ;
+extern unsigned IOCTL_RADIO_GETFREQ;
+extern unsigned IOCTL_METEORSACTPIXFMT;
+extern unsigned IOCTL_METEORGACTPIXFMT;
+extern unsigned IOCTL_METEORGSUPPIXFMT;
+extern unsigned IOCTL_TVTUNER_GETCHNLSET;
+extern unsigned IOCTL_REMOTE_GETKEY;
+extern unsigned IOCTL_GDT_IOCTL_GENERAL;
+extern unsigned IOCTL_GDT_IOCTL_DRVERS;
+extern unsigned IOCTL_GDT_IOCTL_CTRTYPE;
+extern unsigned IOCTL_GDT_IOCTL_OSVERS;
+extern unsigned IOCTL_GDT_IOCTL_CTRCNT;
+extern unsigned IOCTL_GDT_IOCTL_EVENT;
+extern unsigned IOCTL_GDT_IOCTL_STATIST;
+extern unsigned IOCTL_GDT_IOCTL_RESCAN;
+extern unsigned IOCTL_ISP_SDBLEV;
+extern unsigned IOCTL_ISP_RESETHBA;
+extern unsigned IOCTL_ISP_RESCAN;
+extern unsigned IOCTL_ISP_SETROLE;
+extern unsigned IOCTL_ISP_GETROLE;
+extern unsigned IOCTL_ISP_GET_STATS;
+extern unsigned IOCTL_ISP_CLR_STATS;
+extern unsigned IOCTL_ISP_FC_LIP;
+extern unsigned IOCTL_ISP_FC_GETDINFO;
+extern unsigned IOCTL_ISP_GET_FW_CRASH_DUMP;
+extern unsigned IOCTL_ISP_FORCE_CRASH_DUMP;
+extern unsigned IOCTL_ISP_FC_GETHINFO;
+extern unsigned IOCTL_ISP_TSK_MGMT;
+extern unsigned IOCTL_ISP_FC_GETDLIST;
+extern unsigned IOCTL_MLXD_STATUS;
+extern unsigned IOCTL_MLXD_CHECKASYNC;
+extern unsigned IOCTL_MLXD_DETACH;
+extern unsigned IOCTL_MLX_RESCAN_DRIVES;
+extern unsigned IOCTL_MLX_PAUSE_CHANNEL;
+extern unsigned IOCTL_MLX_COMMAND;
+extern unsigned IOCTL_MLX_REBUILDASYNC;
+extern unsigned IOCTL_MLX_REBUILDSTAT;
+extern unsigned IOCTL_MLX_GET_SYSDRIVE;
+extern unsigned IOCTL_MLX_GET_CINFO;
+extern unsigned IOCTL_NVME_PASSTHROUGH_CMD;
+extern unsigned IOCTL_FWCFGIO_SET_INDEX;
+extern unsigned IOCTL_IRDA_RESET_PARAMS;
+extern unsigned IOCTL_IRDA_SET_PARAMS;
+extern unsigned IOCTL_IRDA_GET_SPEEDMASK;
+extern unsigned IOCTL_IRDA_GET_TURNAROUNDMASK;
+extern unsigned IOCTL_IRFRAMETTY_GET_DEVICE;
+extern unsigned IOCTL_IRFRAMETTY_GET_DONGLE;
+extern unsigned IOCTL_IRFRAMETTY_SET_DONGLE;
+extern unsigned IOCTL_ISV_CMD;
+extern unsigned IOCTL_WTQICMD;
+extern unsigned IOCTL_ISCSI_GET_VERSION;
+extern unsigned IOCTL_ISCSI_LOGIN;
+extern unsigned IOCTL_ISCSI_LOGOUT;
+extern unsigned IOCTL_ISCSI_ADD_CONNECTION;
+extern unsigned IOCTL_ISCSI_RESTORE_CONNECTION;
+extern unsigned IOCTL_ISCSI_REMOVE_CONNECTION;
+extern unsigned IOCTL_ISCSI_CONNECTION_STATUS;
+extern unsigned IOCTL_ISCSI_SEND_TARGETS;
+extern unsigned IOCTL_ISCSI_SET_NODE_NAME;
+extern unsigned IOCTL_ISCSI_IO_COMMAND;
+extern unsigned IOCTL_ISCSI_REGISTER_EVENT;
+extern unsigned IOCTL_ISCSI_DEREGISTER_EVENT;
+extern unsigned IOCTL_ISCSI_WAIT_EVENT;
+extern unsigned IOCTL_ISCSI_POLL_EVENT;
+extern unsigned IOCTL_OFIOCGET;
+extern unsigned IOCTL_OFIOCSET;
+extern unsigned IOCTL_OFIOCNEXTPROP;
+extern unsigned IOCTL_OFIOCGETOPTNODE;
+extern unsigned IOCTL_OFIOCGETNEXT;
+extern unsigned IOCTL_OFIOCGETCHILD;
+extern unsigned IOCTL_OFIOCFINDDEVICE;
+extern unsigned IOCTL_AMR_IO_VERSION;
+extern unsigned IOCTL_AMR_IO_COMMAND;
+extern unsigned IOCTL_MLYIO_COMMAND;
+extern unsigned IOCTL_MLYIO_HEALTH;
+extern unsigned IOCTL_PCI_IOC_CFGREAD;
+extern unsigned IOCTL_PCI_IOC_CFGWRITE;
+extern unsigned IOCTL_PCI_IOC_BDF_CFGREAD;
+extern unsigned IOCTL_PCI_IOC_BDF_CFGWRITE;
+extern unsigned IOCTL_PCI_IOC_BUSINFO;
+extern unsigned IOCTL_PCI_IOC_DRVNAME;
+extern unsigned IOCTL_PCI_IOC_DRVNAMEONBUS;
+extern unsigned IOCTL_TWEIO_COMMAND;
+extern unsigned IOCTL_TWEIO_STATS;
+extern unsigned IOCTL_TWEIO_AEN_POLL;
+extern unsigned IOCTL_TWEIO_AEN_WAIT;
+extern unsigned IOCTL_TWEIO_SET_PARAM;
+extern unsigned IOCTL_TWEIO_GET_PARAM;
+extern unsigned IOCTL_TWEIO_RESET;
+extern unsigned IOCTL_TWEIO_ADD_UNIT;
+extern unsigned IOCTL_TWEIO_DEL_UNIT;
+extern unsigned IOCTL_SIOCSCNWDOMAIN;
+extern unsigned IOCTL_SIOCGCNWDOMAIN;
+extern unsigned IOCTL_SIOCSCNWKEY;
+extern unsigned IOCTL_SIOCGCNWSTATUS;
+extern unsigned IOCTL_SIOCGCNWSTATS;
+extern unsigned IOCTL_SIOCGCNWTRAIL;
+extern unsigned IOCTL_SIOCGRAYSIGLEV;
+extern unsigned IOCTL_RAIDFRAME_SHUTDOWN;
+extern unsigned IOCTL_RAIDFRAME_TUR;
+extern unsigned IOCTL_RAIDFRAME_FAIL_DISK;
+extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS;
+extern unsigned IOCTL_RAIDFRAME_REWRITEPARITY;
+extern unsigned IOCTL_RAIDFRAME_COPYBACK;
+extern unsigned IOCTL_RAIDFRAME_SPARET_WAIT;
+extern unsigned IOCTL_RAIDFRAME_SEND_SPARET;
+extern unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT;
+extern unsigned IOCTL_RAIDFRAME_START_ATRACE;
+extern unsigned IOCTL_RAIDFRAME_STOP_ATRACE;
+extern unsigned IOCTL_RAIDFRAME_GET_SIZE;
+extern unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS;
+extern unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS;
+extern unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL;
+extern unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL;
+extern unsigned IOCTL_RAIDFRAME_INIT_LABELS;
+extern unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITY;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS;
+extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS;
+extern unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG;
+extern unsigned IOCTL_RAIDFRAME_SET_ROOT;
+extern unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT;
+extern unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CONFIGURE;
+extern unsigned IOCTL_RAIDFRAME_GET_INFO;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS;
+extern unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT;
+extern unsigned IOCTL_MBPPIOCSPARAM;
+extern unsigned IOCTL_MBPPIOCGPARAM;
+extern unsigned IOCTL_MBPPIOCGSTAT;
+extern unsigned IOCTL_SESIOC_GETNOBJ;
+extern unsigned IOCTL_SESIOC_GETOBJMAP;
+extern unsigned IOCTL_SESIOC_GETENCSTAT;
+extern unsigned IOCTL_SESIOC_SETENCSTAT;
+extern unsigned IOCTL_SESIOC_GETOBJSTAT;
+extern unsigned IOCTL_SESIOC_SETOBJSTAT;
+extern unsigned IOCTL_SESIOC_GETTEXT;
+extern unsigned IOCTL_SESIOC_INIT;
+extern unsigned IOCTL_SUN_DKIOCGGEOM;
+extern unsigned IOCTL_SUN_DKIOCINFO;
+extern unsigned IOCTL_SUN_DKIOCGPART;
+extern unsigned IOCTL_FBIOGTYPE;
+extern unsigned IOCTL_FBIOPUTCMAP;
+extern unsigned IOCTL_FBIOGETCMAP;
+extern unsigned IOCTL_FBIOGATTR;
+extern unsigned IOCTL_FBIOSVIDEO;
+extern unsigned IOCTL_FBIOGVIDEO;
+extern unsigned IOCTL_FBIOSCURSOR;
+extern unsigned IOCTL_FBIOGCURSOR;
+extern unsigned IOCTL_FBIOSCURPOS;
+extern unsigned IOCTL_FBIOGCURPOS;
+extern unsigned IOCTL_FBIOGCURMAX;
+extern unsigned IOCTL_KIOCTRANS;
+extern unsigned IOCTL_KIOCSETKEY;
+extern unsigned IOCTL_KIOCGETKEY;
+extern unsigned IOCTL_KIOCGTRANS;
+extern unsigned IOCTL_KIOCCMD;
+extern unsigned IOCTL_KIOCTYPE;
+extern unsigned IOCTL_KIOCSDIRECT;
+extern unsigned IOCTL_KIOCSKEY;
+extern unsigned IOCTL_KIOCGKEY;
+extern unsigned IOCTL_KIOCSLED;
+extern unsigned IOCTL_KIOCGLED;
+extern unsigned IOCTL_KIOCLAYOUT;
+extern unsigned IOCTL_VUIDSFORMAT;
+extern unsigned IOCTL_VUIDGFORMAT;
+extern unsigned IOCTL_STICIO_GXINFO;
+extern unsigned IOCTL_STICIO_RESET;
+extern unsigned IOCTL_STICIO_STARTQ;
+extern unsigned IOCTL_STICIO_STOPQ;
+extern unsigned IOCTL_UKYOPON_IDENTIFY;
+extern unsigned IOCTL_URIO_SEND_COMMAND;
+extern unsigned IOCTL_URIO_RECV_COMMAND;
+extern unsigned IOCTL_USB_REQUEST;
+extern unsigned IOCTL_USB_SETDEBUG;
+extern unsigned IOCTL_USB_DISCOVER;
+extern unsigned IOCTL_USB_DEVICEINFO;
+extern unsigned IOCTL_USB_DEVICEINFO_OLD;
+extern unsigned IOCTL_USB_DEVICESTATS;
+extern unsigned IOCTL_USB_GET_REPORT_DESC;
+extern unsigned IOCTL_USB_SET_IMMED;
+extern unsigned IOCTL_USB_GET_REPORT;
+extern unsigned IOCTL_USB_SET_REPORT;
+extern unsigned IOCTL_USB_GET_REPORT_ID;
+extern unsigned IOCTL_USB_GET_CONFIG;
+extern unsigned IOCTL_USB_SET_CONFIG;
+extern unsigned IOCTL_USB_GET_ALTINTERFACE;
+extern unsigned IOCTL_USB_SET_ALTINTERFACE;
+extern unsigned IOCTL_USB_GET_NO_ALT;
+extern unsigned IOCTL_USB_GET_DEVICE_DESC;
+extern unsigned IOCTL_USB_GET_CONFIG_DESC;
+extern unsigned IOCTL_USB_GET_INTERFACE_DESC;
+extern unsigned IOCTL_USB_GET_ENDPOINT_DESC;
+extern unsigned IOCTL_USB_GET_FULL_DESC;
+extern unsigned IOCTL_USB_GET_STRING_DESC;
+extern unsigned IOCTL_USB_DO_REQUEST;
+extern unsigned IOCTL_USB_GET_DEVICEINFO;
+extern unsigned IOCTL_USB_GET_DEVICEINFO_OLD;
+extern unsigned IOCTL_USB_SET_SHORT_XFER;
+extern unsigned IOCTL_USB_SET_TIMEOUT;
+extern unsigned IOCTL_USB_SET_BULK_RA;
+extern unsigned IOCTL_USB_SET_BULK_WB;
+extern unsigned IOCTL_USB_SET_BULK_RA_OPT;
+extern unsigned IOCTL_USB_SET_BULK_WB_OPT;
+extern unsigned IOCTL_USB_GET_CM_OVER_DATA;
+extern unsigned IOCTL_USB_SET_CM_OVER_DATA;
+extern unsigned IOCTL_UTOPPYIOTURBO;
+extern unsigned IOCTL_UTOPPYIOCANCEL;
+extern unsigned IOCTL_UTOPPYIOREBOOT;
+extern unsigned IOCTL_UTOPPYIOSTATS;
+extern unsigned IOCTL_UTOPPYIORENAME;
+extern unsigned IOCTL_UTOPPYIOMKDIR;
+extern unsigned IOCTL_UTOPPYIODELETE;
+extern unsigned IOCTL_UTOPPYIOREADDIR;
+extern unsigned IOCTL_UTOPPYIOREADFILE;
+extern unsigned IOCTL_UTOPPYIOWRITEFILE;
+extern unsigned IOCTL_DIOSXDCMD;
+extern unsigned IOCTL_VT_OPENQRY;
+extern unsigned IOCTL_VT_SETMODE;
+extern unsigned IOCTL_VT_GETMODE;
+extern unsigned IOCTL_VT_RELDISP;
+extern unsigned IOCTL_VT_ACTIVATE;
+extern unsigned IOCTL_VT_WAITACTIVE;
+extern unsigned IOCTL_VT_GETACTIVE;
+extern unsigned IOCTL_VT_GETSTATE;
+extern unsigned IOCTL_KDGETKBENT;
+extern unsigned IOCTL_KDGKBMODE;
+extern unsigned IOCTL_KDSKBMODE;
+extern unsigned IOCTL_KDMKTONE;
+extern unsigned IOCTL_KDSETMODE;
+extern unsigned IOCTL_KDENABIO;
+extern unsigned IOCTL_KDDISABIO;
+extern unsigned IOCTL_KDGKBTYPE;
+extern unsigned IOCTL_KDGETLED;
+extern unsigned IOCTL_KDSETLED;
+extern unsigned IOCTL_KDSETRAD;
+extern unsigned IOCTL_VGAPCVTID;
+extern unsigned IOCTL_CONS_GETVERS;
+extern unsigned IOCTL_WSKBDIO_GTYPE;
+extern unsigned IOCTL_WSKBDIO_BELL;
+extern unsigned IOCTL_WSKBDIO_COMPLEXBELL;
+extern unsigned IOCTL_WSKBDIO_SETBELL;
+extern unsigned IOCTL_WSKBDIO_GETBELL;
+extern unsigned IOCTL_WSKBDIO_SETDEFAULTBELL;
+extern unsigned IOCTL_WSKBDIO_GETDEFAULTBELL;
+extern unsigned IOCTL_WSKBDIO_SETKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_GETKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_SETLEDS;
+extern unsigned IOCTL_WSKBDIO_GETLEDS;
+extern unsigned IOCTL_WSKBDIO_GETMAP;
+extern unsigned IOCTL_WSKBDIO_SETMAP;
+extern unsigned IOCTL_WSKBDIO_GETENCODING;
+extern unsigned IOCTL_WSKBDIO_SETENCODING;
+extern unsigned IOCTL_WSKBDIO_SETMODE;
+extern unsigned IOCTL_WSKBDIO_GETMODE;
+extern unsigned IOCTL_WSKBDIO_SETKEYCLICK;
+extern unsigned IOCTL_WSKBDIO_GETKEYCLICK;
+extern unsigned IOCTL_WSKBDIO_GETSCROLL;
+extern unsigned IOCTL_WSKBDIO_SETSCROLL;
+extern unsigned IOCTL_WSKBDIO_SETVERSION;
+extern unsigned IOCTL_WSMOUSEIO_GTYPE;
+extern unsigned IOCTL_WSMOUSEIO_SRES;
+extern unsigned IOCTL_WSMOUSEIO_SSCALE;
+extern unsigned IOCTL_WSMOUSEIO_SRATE;
+extern unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS;
+extern unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS;
+extern unsigned IOCTL_WSMOUSEIO_GETID;
+extern unsigned IOCTL_WSMOUSEIO_GETREPEAT;
+extern unsigned IOCTL_WSMOUSEIO_SETREPEAT;
+extern unsigned IOCTL_WSMOUSEIO_SETVERSION;
+extern unsigned IOCTL_WSDISPLAYIO_GTYPE;
+extern unsigned IOCTL_WSDISPLAYIO_GINFO;
+extern unsigned IOCTL_WSDISPLAYIO_GETCMAP;
+extern unsigned IOCTL_WSDISPLAYIO_PUTCMAP;
+extern unsigned IOCTL_WSDISPLAYIO_GVIDEO;
+extern unsigned IOCTL_WSDISPLAYIO_SVIDEO;
+extern unsigned IOCTL_WSDISPLAYIO_GCURPOS;
+extern unsigned IOCTL_WSDISPLAYIO_SCURPOS;
+extern unsigned IOCTL_WSDISPLAYIO_GCURMAX;
+extern unsigned IOCTL_WSDISPLAYIO_GCURSOR;
+extern unsigned IOCTL_WSDISPLAYIO_SCURSOR;
+extern unsigned IOCTL_WSDISPLAYIO_GMODE;
+extern unsigned IOCTL_WSDISPLAYIO_SMODE;
+extern unsigned IOCTL_WSDISPLAYIO_LDFONT;
+extern unsigned IOCTL_WSDISPLAYIO_ADDSCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_DELSCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_SFONT;
+extern unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD;
+extern unsigned IOCTL_WSDISPLAYIO_GETPARAM;
+extern unsigned IOCTL_WSDISPLAYIO_SETPARAM;
+extern unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_GETWSCHAR;
+extern unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR;
+extern unsigned IOCTL_WSDISPLAYIO_DGSCROLL;
+extern unsigned IOCTL_WSDISPLAYIO_DSSCROLL;
+extern unsigned IOCTL_WSDISPLAYIO_GMSGATTRS;
+extern unsigned IOCTL_WSDISPLAYIO_SMSGATTRS;
+extern unsigned IOCTL_WSDISPLAYIO_GBORDER;
+extern unsigned IOCTL_WSDISPLAYIO_SBORDER;
+extern unsigned IOCTL_WSDISPLAYIO_SSPLASH;
+extern unsigned IOCTL_WSDISPLAYIO_SPROGRESS;
+extern unsigned IOCTL_WSDISPLAYIO_LINEBYTES;
+extern unsigned IOCTL_WSDISPLAYIO_SETVERSION;
+extern unsigned IOCTL_WSMUXIO_ADD_DEVICE;
+extern unsigned IOCTL_WSMUXIO_REMOVE_DEVICE;
+extern unsigned IOCTL_WSMUXIO_LIST_DEVICES;
+extern unsigned IOCTL_WSMUXIO_INJECTEVENT;
+extern unsigned IOCTL_WSDISPLAYIO_GET_BUSID;
+extern unsigned IOCTL_WSDISPLAYIO_GET_EDID;
+extern unsigned IOCTL_WSDISPLAYIO_SET_POLLING;
+extern unsigned IOCTL_WSDISPLAYIO_GET_FBINFO;
+extern unsigned IOCTL_WSDISPLAYIO_DOBLIT;
+extern unsigned IOCTL_WSDISPLAYIO_WAITBLIT;
+extern unsigned IOCTL_BIOCLOCATE;
+extern unsigned IOCTL_BIOCINQ;
+extern unsigned IOCTL_BIOCDISK_NOVOL;
+extern unsigned IOCTL_BIOCDISK;
+extern unsigned IOCTL_BIOCVOL;
+extern unsigned IOCTL_BIOCALARM;
+extern unsigned IOCTL_BIOCBLINK;
+extern unsigned IOCTL_BIOCSETSTATE;
+extern unsigned IOCTL_BIOCVOLOPS;
+extern unsigned IOCTL_MD_GETCONF;
+extern unsigned IOCTL_MD_SETCONF;
+extern unsigned IOCTL_CCDIOCSET;
+extern unsigned IOCTL_CCDIOCCLR;
+extern unsigned IOCTL_CGDIOCSET;
+extern unsigned IOCTL_CGDIOCCLR;
+extern unsigned IOCTL_CGDIOCGET;
+extern unsigned IOCTL_FSSIOCSET;
+extern unsigned IOCTL_FSSIOCGET;
+extern unsigned IOCTL_FSSIOCCLR;
+extern unsigned IOCTL_FSSIOFSET;
+extern unsigned IOCTL_FSSIOFGET;
+extern unsigned IOCTL_BTDEV_ATTACH;
+extern unsigned IOCTL_BTDEV_DETACH;
+extern unsigned IOCTL_BTSCO_GETINFO;
+extern unsigned IOCTL_KTTCP_IO_SEND;
+extern unsigned IOCTL_KTTCP_IO_RECV;
+extern unsigned IOCTL_IOC_LOCKSTAT_GVERSION;
+extern unsigned IOCTL_IOC_LOCKSTAT_ENABLE;
+extern unsigned IOCTL_IOC_LOCKSTAT_DISABLE;
+extern unsigned IOCTL_VNDIOCSET;
+extern unsigned IOCTL_VNDIOCCLR;
+extern unsigned IOCTL_VNDIOCGET;
+extern unsigned IOCTL_SPKRTONE;
+extern unsigned IOCTL_SPKRTUNE;
+extern unsigned IOCTL_SPKRGETVOL;
+extern unsigned IOCTL_SPKRSETVOL;
+#if defined(__x86_64__)
+extern unsigned IOCTL_NVMM_IOC_CAPABILITY;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_CREATE;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_CREATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_DESTROY;
+extern unsigned IOCTL_NVMM_IOC_VCPU_CONFIGURE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_INJECT;
+extern unsigned IOCTL_NVMM_IOC_VCPU_RUN;
+extern unsigned IOCTL_NVMM_IOC_GPA_MAP;
+extern unsigned IOCTL_NVMM_IOC_GPA_UNMAP;
+extern unsigned IOCTL_NVMM_IOC_HVA_MAP;
+extern unsigned IOCTL_NVMM_IOC_HVA_UNMAP;
+extern unsigned IOCTL_NVMM_IOC_CTL;
+#endif
+extern unsigned IOCTL_AUTOFSREQUEST;
+extern unsigned IOCTL_AUTOFSDONE;
+extern unsigned IOCTL_BIOCGBLEN;
+extern unsigned IOCTL_BIOCSBLEN;
+extern unsigned IOCTL_BIOCSETF;
+extern unsigned IOCTL_BIOCFLUSH;
+extern unsigned IOCTL_BIOCPROMISC;
+extern unsigned IOCTL_BIOCGDLT;
+extern unsigned IOCTL_BIOCGETIF;
+extern unsigned IOCTL_BIOCSETIF;
+extern unsigned IOCTL_BIOCGSTATS;
+extern unsigned IOCTL_BIOCGSTATSOLD;
+extern unsigned IOCTL_BIOCIMMEDIATE;
+extern unsigned IOCTL_BIOCVERSION;
+extern unsigned IOCTL_BIOCSTCPF;
+extern unsigned IOCTL_BIOCSUDPF;
+extern unsigned IOCTL_BIOCGHDRCMPLT;
+extern unsigned IOCTL_BIOCSHDRCMPLT;
+extern unsigned IOCTL_BIOCSDLT;
+extern unsigned IOCTL_BIOCGDLTLIST;
+extern unsigned IOCTL_BIOCGDIRECTION;
+extern unsigned IOCTL_BIOCSDIRECTION;
+extern unsigned IOCTL_BIOCSRTIMEOUT;
+extern unsigned IOCTL_BIOCGRTIMEOUT;
+extern unsigned IOCTL_BIOCGFEEDBACK;
+extern unsigned IOCTL_BIOCSFEEDBACK;
+extern unsigned IOCTL_GRESADDRS;
+extern unsigned IOCTL_GRESADDRD;
+extern unsigned IOCTL_GREGADDRS;
+extern unsigned IOCTL_GREGADDRD;
+extern unsigned IOCTL_GRESPROTO;
+extern unsigned IOCTL_GREGPROTO;
+extern unsigned IOCTL_GRESSOCK;
+extern unsigned IOCTL_GREDSOCK;
+extern unsigned IOCTL_PPPIOCGRAWIN;
+extern unsigned IOCTL_PPPIOCGFLAGS;
+extern unsigned IOCTL_PPPIOCSFLAGS;
+extern unsigned IOCTL_PPPIOCGASYNCMAP;
+extern unsigned IOCTL_PPPIOCSASYNCMAP;
+extern unsigned IOCTL_PPPIOCGUNIT;
+extern unsigned IOCTL_PPPIOCGRASYNCMAP;
+extern unsigned IOCTL_PPPIOCSRASYNCMAP;
+extern unsigned IOCTL_PPPIOCGMRU;
+extern unsigned IOCTL_PPPIOCSMRU;
+extern unsigned IOCTL_PPPIOCSMAXCID;
+extern unsigned IOCTL_PPPIOCGXASYNCMAP;
+extern unsigned IOCTL_PPPIOCSXASYNCMAP;
+extern unsigned IOCTL_PPPIOCXFERUNIT;
+extern unsigned IOCTL_PPPIOCSCOMPRESS;
+extern unsigned IOCTL_PPPIOCGNPMODE;
+extern unsigned IOCTL_PPPIOCSNPMODE;
+extern unsigned IOCTL_PPPIOCGIDLE;
+extern unsigned IOCTL_PPPIOCGMTU;
+extern unsigned IOCTL_PPPIOCSMTU;
+extern unsigned IOCTL_SIOCGPPPSTATS;
+extern unsigned IOCTL_SIOCGPPPCSTATS;
+extern unsigned IOCTL_IOC_NPF_VERSION;
+extern unsigned IOCTL_IOC_NPF_SWITCH;
+extern unsigned IOCTL_IOC_NPF_LOAD;
+extern unsigned IOCTL_IOC_NPF_TABLE;
+extern unsigned IOCTL_IOC_NPF_STATS;
+extern unsigned IOCTL_IOC_NPF_SAVE;
+extern unsigned IOCTL_IOC_NPF_RULE;
+extern unsigned IOCTL_IOC_NPF_CONN_LOOKUP;
+extern unsigned IOCTL_IOC_NPF_TABLE_REPLACE;
+extern unsigned IOCTL_PPPOESETPARMS;
+extern unsigned IOCTL_PPPOEGETPARMS;
+extern unsigned IOCTL_PPPOEGETSESSION;
+extern unsigned IOCTL_SPPPGETAUTHCFG;
+extern unsigned IOCTL_SPPPSETAUTHCFG;
+extern unsigned IOCTL_SPPPGETLCPCFG;
+extern unsigned IOCTL_SPPPSETLCPCFG;
+extern unsigned IOCTL_SPPPGETSTATUS;
+extern unsigned IOCTL_SPPPGETSTATUSNCP;
+extern unsigned IOCTL_SPPPGETIDLETO;
+extern unsigned IOCTL_SPPPSETIDLETO;
+extern unsigned IOCTL_SPPPGETAUTHFAILURES;
+extern unsigned IOCTL_SPPPSETAUTHFAILURE;
+extern unsigned IOCTL_SPPPSETDNSOPTS;
+extern unsigned IOCTL_SPPPGETDNSOPTS;
+extern unsigned IOCTL_SPPPGETDNSADDRS;
+extern unsigned IOCTL_SPPPSETKEEPALIVE;
+extern unsigned IOCTL_SPPPGETKEEPALIVE;
+extern unsigned IOCTL_SRT_GETNRT;
+extern unsigned IOCTL_SRT_GETRT;
+extern unsigned IOCTL_SRT_SETRT;
+extern unsigned IOCTL_SRT_DELRT;
+extern unsigned IOCTL_SRT_SFLAGS;
+extern unsigned IOCTL_SRT_GFLAGS;
+extern unsigned IOCTL_SRT_SGFLAGS;
+extern unsigned IOCTL_SRT_DEBUG;
+extern unsigned IOCTL_TAPGIFNAME;
+extern unsigned IOCTL_TUNSDEBUG;
+extern unsigned IOCTL_TUNGDEBUG;
+extern unsigned IOCTL_TUNSIFMODE;
+extern unsigned IOCTL_TUNSLMODE;
+extern unsigned IOCTL_TUNSIFHEAD;
+extern unsigned IOCTL_TUNGIFHEAD;
+extern unsigned IOCTL_DIOCSTART;
+extern unsigned IOCTL_DIOCSTOP;
+extern unsigned IOCTL_DIOCADDRULE;
+extern unsigned IOCTL_DIOCGETRULES;
+extern unsigned IOCTL_DIOCGETRULE;
+extern unsigned IOCTL_DIOCSETLCK;
+extern unsigned IOCTL_DIOCCLRSTATES;
+extern unsigned IOCTL_DIOCGETSTATE;
+extern unsigned IOCTL_DIOCSETSTATUSIF;
+extern unsigned IOCTL_DIOCGETSTATUS;
+extern unsigned IOCTL_DIOCCLRSTATUS;
+extern unsigned IOCTL_DIOCNATLOOK;
+extern unsigned IOCTL_DIOCSETDEBUG;
+extern unsigned IOCTL_DIOCGETSTATES;
+extern unsigned IOCTL_DIOCCHANGERULE;
+extern unsigned IOCTL_DIOCSETTIMEOUT;
+extern unsigned IOCTL_DIOCGETTIMEOUT;
+extern unsigned IOCTL_DIOCADDSTATE;
+extern unsigned IOCTL_DIOCCLRRULECTRS;
+extern unsigned IOCTL_DIOCGETLIMIT;
+extern unsigned IOCTL_DIOCSETLIMIT;
+extern unsigned IOCTL_DIOCKILLSTATES;
+extern unsigned IOCTL_DIOCSTARTALTQ;
+extern unsigned IOCTL_DIOCSTOPALTQ;
+extern unsigned IOCTL_DIOCADDALTQ;
+extern unsigned IOCTL_DIOCGETALTQS;
+extern unsigned IOCTL_DIOCGETALTQ;
+extern unsigned IOCTL_DIOCCHANGEALTQ;
+extern unsigned IOCTL_DIOCGETQSTATS;
+extern unsigned IOCTL_DIOCBEGINADDRS;
+extern unsigned IOCTL_DIOCADDADDR;
+extern unsigned IOCTL_DIOCGETADDRS;
+extern unsigned IOCTL_DIOCGETADDR;
+extern unsigned IOCTL_DIOCCHANGEADDR;
+extern unsigned IOCTL_DIOCADDSTATES;
+extern unsigned IOCTL_DIOCGETRULESETS;
+extern unsigned IOCTL_DIOCGETRULESET;
+extern unsigned IOCTL_DIOCRCLRTABLES;
+extern unsigned IOCTL_DIOCRADDTABLES;
+extern unsigned IOCTL_DIOCRDELTABLES;
+extern unsigned IOCTL_DIOCRGETTABLES;
+extern unsigned IOCTL_DIOCRGETTSTATS;
+extern unsigned IOCTL_DIOCRCLRTSTATS;
+extern unsigned IOCTL_DIOCRCLRADDRS;
+extern unsigned IOCTL_DIOCRADDADDRS;
+extern unsigned IOCTL_DIOCRDELADDRS;
+extern unsigned IOCTL_DIOCRSETADDRS;
+extern unsigned IOCTL_DIOCRGETADDRS;
+extern unsigned IOCTL_DIOCRGETASTATS;
+extern unsigned IOCTL_DIOCRCLRASTATS;
+extern unsigned IOCTL_DIOCRTSTADDRS;
+extern unsigned IOCTL_DIOCRSETTFLAGS;
+extern unsigned IOCTL_DIOCRINADEFINE;
+extern unsigned IOCTL_DIOCOSFPFLUSH;
+extern unsigned IOCTL_DIOCOSFPADD;
+extern unsigned IOCTL_DIOCOSFPGET;
+extern unsigned IOCTL_DIOCXBEGIN;
+extern unsigned IOCTL_DIOCXCOMMIT;
+extern unsigned IOCTL_DIOCXROLLBACK;
+extern unsigned IOCTL_DIOCGETSRCNODES;
+extern unsigned IOCTL_DIOCCLRSRCNODES;
+extern unsigned IOCTL_DIOCSETHOSTID;
+extern unsigned IOCTL_DIOCIGETIFACES;
+extern unsigned IOCTL_DIOCSETIFFLAG;
+extern unsigned IOCTL_DIOCCLRIFFLAG;
+extern unsigned IOCTL_DIOCKILLSRCNODES;
+extern unsigned IOCTL_SLIOCGUNIT;
+extern unsigned IOCTL_SIOCGBTINFO;
+extern unsigned IOCTL_SIOCGBTINFOA;
+extern unsigned IOCTL_SIOCNBTINFO;
+extern unsigned IOCTL_SIOCSBTFLAGS;
+extern unsigned IOCTL_SIOCSBTPOLICY;
+extern unsigned IOCTL_SIOCSBTPTYPE;
+extern unsigned IOCTL_SIOCGBTSTATS;
+extern unsigned IOCTL_SIOCZBTSTATS;
+extern unsigned IOCTL_SIOCBTDUMP;
+extern unsigned IOCTL_SIOCSBTSCOMTU;
+extern unsigned IOCTL_SIOCGBTFEAT;
+extern unsigned IOCTL_SIOCADNAT;
+extern unsigned IOCTL_SIOCRMNAT;
+extern unsigned IOCTL_SIOCGNATS;
+extern unsigned IOCTL_SIOCGNATL;
+extern unsigned IOCTL_SIOCPURGENAT;
+extern unsigned IOCTL_SIOCCONNECTX;
+extern unsigned IOCTL_SIOCCONNECTXDEL;
+extern unsigned IOCTL_SIOCSIFINFO_FLAGS;
+extern unsigned IOCTL_SIOCAADDRCTL_POLICY;
+extern unsigned IOCTL_SIOCDADDRCTL_POLICY;
+extern unsigned IOCTL_SMBIOC_OPENSESSION;
+extern unsigned IOCTL_SMBIOC_OPENSHARE;
+extern unsigned IOCTL_SMBIOC_REQUEST;
+extern unsigned IOCTL_SMBIOC_SETFLAGS;
+extern unsigned IOCTL_SMBIOC_LOOKUP;
+extern unsigned IOCTL_SMBIOC_READ;
+extern unsigned IOCTL_SMBIOC_WRITE;
+extern unsigned IOCTL_AGPIOC_INFO;
+extern unsigned IOCTL_AGPIOC_ACQUIRE;
+extern unsigned IOCTL_AGPIOC_RELEASE;
+extern unsigned IOCTL_AGPIOC_SETUP;
+extern unsigned IOCTL_AGPIOC_ALLOCATE;
+extern unsigned IOCTL_AGPIOC_DEALLOCATE;
+extern unsigned IOCTL_AGPIOC_BIND;
+extern unsigned IOCTL_AGPIOC_UNBIND;
+extern unsigned IOCTL_AUDIO_GETINFO;
+extern unsigned IOCTL_AUDIO_SETINFO;
+extern unsigned IOCTL_AUDIO_DRAIN;
+extern unsigned IOCTL_AUDIO_FLUSH;
+extern unsigned IOCTL_AUDIO_WSEEK;
+extern unsigned IOCTL_AUDIO_RERROR;
+extern unsigned IOCTL_AUDIO_GETDEV;
+extern unsigned IOCTL_AUDIO_GETENC;
+extern unsigned IOCTL_AUDIO_GETFD;
+extern unsigned IOCTL_AUDIO_SETFD;
+extern unsigned IOCTL_AUDIO_PERROR;
+extern unsigned IOCTL_AUDIO_GETIOFFS;
+extern unsigned IOCTL_AUDIO_GETOOFFS;
+extern unsigned IOCTL_AUDIO_GETPROPS;
+extern unsigned IOCTL_AUDIO_GETBUFINFO;
+extern unsigned IOCTL_AUDIO_SETCHAN;
+extern unsigned IOCTL_AUDIO_GETCHAN;
+extern unsigned IOCTL_AUDIO_QUERYFORMAT;
+extern unsigned IOCTL_AUDIO_GETFORMAT;
+extern unsigned IOCTL_AUDIO_SETFORMAT;
+extern unsigned IOCTL_AUDIO_MIXER_READ;
+extern unsigned IOCTL_AUDIO_MIXER_WRITE;
+extern unsigned IOCTL_AUDIO_MIXER_DEVINFO;
+extern unsigned IOCTL_ATAIOCCOMMAND;
+extern unsigned IOCTL_ATABUSIOSCAN;
+extern unsigned IOCTL_ATABUSIORESET;
+extern unsigned IOCTL_ATABUSIODETACH;
+extern unsigned IOCTL_CDIOCPLAYTRACKS;
+extern unsigned IOCTL_CDIOCPLAYBLOCKS;
+extern unsigned IOCTL_CDIOCREADSUBCHANNEL;
+extern unsigned IOCTL_CDIOREADTOCHEADER;
+extern unsigned IOCTL_CDIOREADTOCENTRIES;
+extern unsigned IOCTL_CDIOREADMSADDR;
+extern unsigned IOCTL_CDIOCSETPATCH;
+extern unsigned IOCTL_CDIOCGETVOL;
+extern unsigned IOCTL_CDIOCSETVOL;
+extern unsigned IOCTL_CDIOCSETMONO;
+extern unsigned IOCTL_CDIOCSETSTEREO;
+extern unsigned IOCTL_CDIOCSETMUTE;
+extern unsigned IOCTL_CDIOCSETLEFT;
+extern unsigned IOCTL_CDIOCSETRIGHT;
+extern unsigned IOCTL_CDIOCSETDEBUG;
+extern unsigned IOCTL_CDIOCCLRDEBUG;
+extern unsigned IOCTL_CDIOCPAUSE;
+extern unsigned IOCTL_CDIOCRESUME;
+extern unsigned IOCTL_CDIOCRESET;
+extern unsigned IOCTL_CDIOCSTART;
+extern unsigned IOCTL_CDIOCSTOP;
+extern unsigned IOCTL_CDIOCEJECT;
+extern unsigned IOCTL_CDIOCALLOW;
+extern unsigned IOCTL_CDIOCPREVENT;
+extern unsigned IOCTL_CDIOCCLOSE;
+extern unsigned IOCTL_CDIOCPLAYMSF;
+extern unsigned IOCTL_CDIOCLOADUNLOAD;
+extern unsigned IOCTL_CHIOMOVE;
+extern unsigned IOCTL_CHIOEXCHANGE;
+extern unsigned IOCTL_CHIOPOSITION;
+extern unsigned IOCTL_CHIOGPICKER;
+extern unsigned IOCTL_CHIOSPICKER;
+extern unsigned IOCTL_CHIOGPARAMS;
+extern unsigned IOCTL_CHIOIELEM;
+extern unsigned IOCTL_OCHIOGSTATUS;
+extern unsigned IOCTL_CHIOGSTATUS;
+extern unsigned IOCTL_CHIOSVOLTAG;
+extern unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY;
+extern unsigned IOCTL_CLOCKCTL_ADJTIME;
+extern unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME;
+extern unsigned IOCTL_CLOCKCTL_NTP_ADJTIME;
+extern unsigned IOCTL_IOC_CPU_SETSTATE;
+extern unsigned IOCTL_IOC_CPU_GETSTATE;
+extern unsigned IOCTL_IOC_CPU_GETCOUNT;
+extern unsigned IOCTL_IOC_CPU_MAPID;
+extern unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION;
+extern unsigned IOCTL_IOC_CPU_UCODE_APPLY;
+extern unsigned IOCTL_DIOCGDINFO;
+extern unsigned IOCTL_DIOCSDINFO;
+extern unsigned IOCTL_DIOCWDINFO;
+extern unsigned IOCTL_DIOCRFORMAT;
+extern unsigned IOCTL_DIOCWFORMAT;
+extern unsigned IOCTL_DIOCSSTEP;
+extern unsigned IOCTL_DIOCSRETRIES;
+extern unsigned IOCTL_DIOCKLABEL;
+extern unsigned IOCTL_DIOCWLABEL;
+extern unsigned IOCTL_DIOCSBAD;
+extern unsigned IOCTL_DIOCEJECT;
+extern unsigned IOCTL_ODIOCEJECT;
+extern unsigned IOCTL_DIOCLOCK;
+extern unsigned IOCTL_DIOCGDEFLABEL;
+extern unsigned IOCTL_DIOCCLRLABEL;
+extern unsigned IOCTL_DIOCGCACHE;
+extern unsigned IOCTL_DIOCSCACHE;
+extern unsigned IOCTL_DIOCCACHESYNC;
+extern unsigned IOCTL_DIOCBSLIST;
+extern unsigned IOCTL_DIOCBSFLUSH;
+extern unsigned IOCTL_DIOCAWEDGE;
+extern unsigned IOCTL_DIOCGWEDGEINFO;
+extern unsigned IOCTL_DIOCDWEDGE;
+extern unsigned IOCTL_DIOCLWEDGES;
+extern unsigned IOCTL_DIOCGSTRATEGY;
+extern unsigned IOCTL_DIOCSSTRATEGY;
+extern unsigned IOCTL_DIOCGDISKINFO;
+extern unsigned IOCTL_DIOCTUR;
+extern unsigned IOCTL_DIOCMWEDGES;
+extern unsigned IOCTL_DIOCGSECTORSIZE;
+extern unsigned IOCTL_DIOCGMEDIASIZE;
+extern unsigned IOCTL_DIOCRMWEDGES;
+extern unsigned IOCTL_DRVDETACHDEV;
+extern unsigned IOCTL_DRVRESCANBUS;
+extern unsigned IOCTL_DRVCTLCOMMAND;
+extern unsigned IOCTL_DRVRESUMEDEV;
+extern unsigned IOCTL_DRVLISTDEV;
+extern unsigned IOCTL_DRVGETEVENT;
+extern unsigned IOCTL_DRVSUSPENDDEV;
+extern unsigned IOCTL_DVD_READ_STRUCT;
+extern unsigned IOCTL_DVD_WRITE_STRUCT;
+extern unsigned IOCTL_DVD_AUTH;
+extern unsigned IOCTL_ENVSYS_GETDICTIONARY;
+extern unsigned IOCTL_ENVSYS_SETDICTIONARY;
+extern unsigned IOCTL_ENVSYS_REMOVEPROPS;
+extern unsigned IOCTL_ENVSYS_GTREDATA;
+extern unsigned IOCTL_ENVSYS_GTREINFO;
+extern unsigned IOCTL_KFILTER_BYFILTER;
+extern unsigned IOCTL_KFILTER_BYNAME;
+extern unsigned IOCTL_FDIOCGETOPTS;
+extern unsigned IOCTL_FDIOCSETOPTS;
+extern unsigned IOCTL_FDIOCSETFORMAT;
+extern unsigned IOCTL_FDIOCGETFORMAT;
+extern unsigned IOCTL_FDIOCFORMAT_TRACK;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSEEKDATA;
+extern unsigned IOCTL_FIOSEEKHOLE;
+extern unsigned IOCTL_FIONREAD;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_OFIOGETBMAP;
+extern unsigned IOCTL_FIOGETBMAP;
+extern unsigned IOCTL_FIONWRITE;
+extern unsigned IOCTL_FIONSPACE;
+extern unsigned IOCTL_GPIOINFO;
+extern unsigned IOCTL_GPIOSET;
+extern unsigned IOCTL_GPIOUNSET;
+extern unsigned IOCTL_GPIOREAD;
+extern unsigned IOCTL_GPIOWRITE;
+extern unsigned IOCTL_GPIOTOGGLE;
+extern unsigned IOCTL_GPIOATTACH;
+extern unsigned IOCTL_PTIOCNETBSD;
+extern unsigned IOCTL_PTIOCSUNOS;
+extern unsigned IOCTL_PTIOCLINUX;
+extern unsigned IOCTL_PTIOCFREEBSD;
+extern unsigned IOCTL_PTIOCULTRIX;
+extern unsigned IOCTL_TIOCHPCL;
+extern unsigned IOCTL_TIOCGETP;
+extern unsigned IOCTL_TIOCSETP;
+extern unsigned IOCTL_TIOCSETN;
+extern unsigned IOCTL_TIOCSETC;
+extern unsigned IOCTL_TIOCGETC;
+extern unsigned IOCTL_TIOCLBIS;
+extern unsigned IOCTL_TIOCLBIC;
+extern unsigned IOCTL_TIOCLSET;
+extern unsigned IOCTL_TIOCLGET;
+extern unsigned IOCTL_TIOCSLTC;
+extern unsigned IOCTL_TIOCGLTC;
+extern unsigned IOCTL_OTIOCCONS;
+extern unsigned IOCTL_JOY_SETTIMEOUT;
+extern unsigned IOCTL_JOY_GETTIMEOUT;
+extern unsigned IOCTL_JOY_SET_X_OFFSET;
+extern unsigned IOCTL_JOY_SET_Y_OFFSET;
+extern unsigned IOCTL_JOY_GET_X_OFFSET;
+extern unsigned IOCTL_JOY_GET_Y_OFFSET;
+extern unsigned IOCTL_OKIOCGSYMBOL;
+extern unsigned IOCTL_OKIOCGVALUE;
+extern unsigned IOCTL_KIOCGSIZE;
+extern unsigned IOCTL_KIOCGVALUE;
+extern unsigned IOCTL_KIOCGSYMBOL;
+extern unsigned IOCTL_LUAINFO;
+extern unsigned IOCTL_LUACREATE;
+extern unsigned IOCTL_LUADESTROY;
+extern unsigned IOCTL_LUAREQUIRE;
+extern unsigned IOCTL_LUALOAD;
+extern unsigned IOCTL_MIDI_PRETIME;
+extern unsigned IOCTL_MIDI_MPUMODE;
+extern unsigned IOCTL_MIDI_MPUCMD;
+extern unsigned IOCTL_SEQUENCER_RESET;
+extern unsigned IOCTL_SEQUENCER_SYNC;
+extern unsigned IOCTL_SEQUENCER_INFO;
+extern unsigned IOCTL_SEQUENCER_CTRLRATE;
+extern unsigned IOCTL_SEQUENCER_GETOUTCOUNT;
+extern unsigned IOCTL_SEQUENCER_GETINCOUNT;
+extern unsigned IOCTL_SEQUENCER_RESETSAMPLES;
+extern unsigned IOCTL_SEQUENCER_NRSYNTHS;
+extern unsigned IOCTL_SEQUENCER_NRMIDIS;
+extern unsigned IOCTL_SEQUENCER_THRESHOLD;
+extern unsigned IOCTL_SEQUENCER_MEMAVL;
+extern unsigned IOCTL_SEQUENCER_PANIC;
+extern unsigned IOCTL_SEQUENCER_OUTOFBAND;
+extern unsigned IOCTL_SEQUENCER_GETTIME;
+extern unsigned IOCTL_SEQUENCER_TMR_TIMEBASE;
+extern unsigned IOCTL_SEQUENCER_TMR_START;
+extern unsigned IOCTL_SEQUENCER_TMR_STOP;
+extern unsigned IOCTL_SEQUENCER_TMR_CONTINUE;
+extern unsigned IOCTL_SEQUENCER_TMR_TEMPO;
+extern unsigned IOCTL_SEQUENCER_TMR_SOURCE;
+extern unsigned IOCTL_SEQUENCER_TMR_METRONOME;
+extern unsigned IOCTL_SEQUENCER_TMR_SELECT;
+extern unsigned IOCTL_SPI_IOCTL_CONFIGURE;
+extern unsigned IOCTL_SPI_IOCTL_TRANSFER;
+extern unsigned IOCTL_MTIOCTOP;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCIEOT;
+extern unsigned IOCTL_MTIOCEEOT;
+extern unsigned IOCTL_MTIOCRDSPOS;
+extern unsigned IOCTL_MTIOCRDHPOS;
+extern unsigned IOCTL_MTIOCSLOCATE;
+extern unsigned IOCTL_MTIOCHLOCATE;
+extern unsigned IOCTL_POWER_EVENT_RECVDICT;
+extern unsigned IOCTL_POWER_IOC_GET_TYPE;
+extern unsigned IOCTL_RIOCGINFO;
+extern unsigned IOCTL_RIOCSINFO;
+extern unsigned IOCTL_RIOCSSRCH;
+extern unsigned IOCTL_RNDGETENTCNT;
+extern unsigned IOCTL_RNDGETSRCNUM;
+extern unsigned IOCTL_RNDGETSRCNAME;
+extern unsigned IOCTL_RNDCTL;
+extern unsigned IOCTL_RNDADDDATA;
+extern unsigned IOCTL_RNDGETPOOLSTAT;
+extern unsigned IOCTL_RNDGETESTNUM;
+extern unsigned IOCTL_RNDGETESTNAME;
+extern unsigned IOCTL_SCIOCGET;
+extern unsigned IOCTL_SCIOCSET;
+extern unsigned IOCTL_SCIOCRESTART;
+extern unsigned IOCTL_SCIOC_USE_ADF;
+extern unsigned IOCTL_SCIOCCOMMAND;
+extern unsigned IOCTL_SCIOCDEBUG;
+extern unsigned IOCTL_SCIOCIDENTIFY;
+extern unsigned IOCTL_OSCIOCIDENTIFY;
+extern unsigned IOCTL_SCIOCDECONFIG;
+extern unsigned IOCTL_SCIOCRECONFIG;
+extern unsigned IOCTL_SCIOCRESET;
+extern unsigned IOCTL_SCBUSIOSCAN;
+extern unsigned IOCTL_SCBUSIORESET;
+extern unsigned IOCTL_SCBUSIODETACH;
+extern unsigned IOCTL_SCBUSACCEL;
+extern unsigned IOCTL_SCBUSIOLLSCAN;
+extern unsigned IOCTL_SIOCSHIWAT;
+extern unsigned IOCTL_SIOCGHIWAT;
+extern unsigned IOCTL_SIOCSLOWAT;
+extern unsigned IOCTL_SIOCGLOWAT;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCPEELOFF;
+extern unsigned IOCTL_SIOCADDRT;
+extern unsigned IOCTL_SIOCDELRT;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCDIFADDR;
+extern unsigned IOCTL_SIOCAIFADDR;
+extern unsigned IOCTL_SIOCGIFALIAS;
+extern unsigned IOCTL_SIOCGIFAFLAG_IN;
+extern unsigned IOCTL_SIOCALIFADDR;
+extern unsigned IOCTL_SIOCGLIFADDR;
+extern unsigned IOCTL_SIOCDLIFADDR;
+extern unsigned IOCTL_SIOCSIFADDRPREF;
+extern unsigned IOCTL_SIOCGIFADDRPREF;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGETVIFCNT;
+extern unsigned IOCTL_SIOCGETSGCNT;
+extern unsigned IOCTL_SIOCSIFMEDIA;
+extern unsigned IOCTL_SIOCGIFMEDIA;
+extern unsigned IOCTL_SIOCSIFGENERIC;
+extern unsigned IOCTL_SIOCGIFGENERIC;
+extern unsigned IOCTL_SIOCSIFPHYADDR;
+extern unsigned IOCTL_SIOCGIFPSRCADDR;
+extern unsigned IOCTL_SIOCGIFPDSTADDR;
+extern unsigned IOCTL_SIOCDIFPHYADDR;
+extern unsigned IOCTL_SIOCSLIFPHYADDR;
+extern unsigned IOCTL_SIOCGLIFPHYADDR;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCSDRVSPEC;
+extern unsigned IOCTL_SIOCGDRVSPEC;
+extern unsigned IOCTL_SIOCIFCREATE;
+extern unsigned IOCTL_SIOCIFDESTROY;
+extern unsigned IOCTL_SIOCIFGCLONERS;
+extern unsigned IOCTL_SIOCGIFDLT;
+extern unsigned IOCTL_SIOCGIFCAP;
+extern unsigned IOCTL_SIOCSIFCAP;
+extern unsigned IOCTL_SIOCSVH;
+extern unsigned IOCTL_SIOCGVH;
+extern unsigned IOCTL_SIOCINITIFADDR;
+extern unsigned IOCTL_SIOCGIFDATA;
+extern unsigned IOCTL_SIOCZIFDATA;
+extern unsigned IOCTL_SIOCGLINKSTR;
+extern unsigned IOCTL_SIOCSLINKSTR;
+extern unsigned IOCTL_SIOCGETHERCAP;
+extern unsigned IOCTL_SIOCGIFINDEX;
+extern unsigned IOCTL_SIOCSETHERCAP;
+extern unsigned IOCTL_SIOCSIFDESCR;
+extern unsigned IOCTL_SIOCGIFDESCR;
+extern unsigned IOCTL_SIOCGUMBINFO;
+extern unsigned IOCTL_SIOCSUMBPARAM;
+extern unsigned IOCTL_SIOCGUMBPARAM;
+extern unsigned IOCTL_SIOCSETPFSYNC;
+extern unsigned IOCTL_SIOCGETPFSYNC;
+extern unsigned IOCTL_PPS_IOC_CREATE;
+extern unsigned IOCTL_PPS_IOC_DESTROY;
+extern unsigned IOCTL_PPS_IOC_SETPARAMS;
+extern unsigned IOCTL_PPS_IOC_GETPARAMS;
+extern unsigned IOCTL_PPS_IOC_GETCAP;
+extern unsigned IOCTL_PPS_IOC_FETCH;
+extern unsigned IOCTL_PPS_IOC_KCBIND;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCFLUSH;
+extern unsigned IOCTL_TIOCGETA;
+extern unsigned IOCTL_TIOCSETA;
+extern unsigned IOCTL_TIOCSETAW;
+extern unsigned IOCTL_TIOCSETAF;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCGLINED;
+extern unsigned IOCTL_TIOCSLINED;
+extern unsigned IOCTL_TIOCSBRK;
+extern unsigned IOCTL_TIOCCBRK;
+extern unsigned IOCTL_TIOCSDTR;
+extern unsigned IOCTL_TIOCCDTR;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSTOP;
+extern unsigned IOCTL_TIOCSTART;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCREMOTE;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_TIOCUCNTL;
+extern unsigned IOCTL_TIOCSTAT;
+extern unsigned IOCTL_TIOCGSID;
+extern unsigned IOCTL_TIOCCONS;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCEXT;
+extern unsigned IOCTL_TIOCSIG;
+extern unsigned IOCTL_TIOCDRAIN;
+extern unsigned IOCTL_TIOCGFLAGS;
+extern unsigned IOCTL_TIOCSFLAGS;
+extern unsigned IOCTL_TIOCDCDTIMESTAMP;
+extern unsigned IOCTL_TIOCRCVFRAME;
+extern unsigned IOCTL_TIOCXMTFRAME;
+extern unsigned IOCTL_TIOCPTMGET;
+extern unsigned IOCTL_TIOCGRANTPT;
+extern unsigned IOCTL_TIOCPTSNAME;
+extern unsigned IOCTL_TIOCSQSIZE;
+extern unsigned IOCTL_TIOCGQSIZE;
+extern unsigned IOCTL_VERIEXEC_LOAD;
+extern unsigned IOCTL_VERIEXEC_TABLESIZE;
+extern unsigned IOCTL_VERIEXEC_DELETE;
+extern unsigned IOCTL_VERIEXEC_QUERY;
+extern unsigned IOCTL_VERIEXEC_DUMP;
+extern unsigned IOCTL_VERIEXEC_FLUSH;
+extern unsigned IOCTL_VIDIOC_QUERYCAP;
+extern unsigned IOCTL_VIDIOC_RESERVED;
+extern unsigned IOCTL_VIDIOC_ENUM_FMT;
+extern unsigned IOCTL_VIDIOC_G_FMT;
+extern unsigned IOCTL_VIDIOC_S_FMT;
+extern unsigned IOCTL_VIDIOC_REQBUFS;
+extern unsigned IOCTL_VIDIOC_QUERYBUF;
+extern unsigned IOCTL_VIDIOC_G_FBUF;
+extern unsigned IOCTL_VIDIOC_S_FBUF;
+extern unsigned IOCTL_VIDIOC_OVERLAY;
+extern unsigned IOCTL_VIDIOC_QBUF;
+extern unsigned IOCTL_VIDIOC_DQBUF;
+extern unsigned IOCTL_VIDIOC_STREAMON;
+extern unsigned IOCTL_VIDIOC_STREAMOFF;
+extern unsigned IOCTL_VIDIOC_G_PARM;
+extern unsigned IOCTL_VIDIOC_S_PARM;
+extern unsigned IOCTL_VIDIOC_G_STD;
+extern unsigned IOCTL_VIDIOC_S_STD;
+extern unsigned IOCTL_VIDIOC_ENUMSTD;
+extern unsigned IOCTL_VIDIOC_ENUMINPUT;
+extern unsigned IOCTL_VIDIOC_G_CTRL;
+extern unsigned IOCTL_VIDIOC_S_CTRL;
+extern unsigned IOCTL_VIDIOC_G_TUNER;
+extern unsigned IOCTL_VIDIOC_S_TUNER;
+extern unsigned IOCTL_VIDIOC_G_AUDIO;
+extern unsigned IOCTL_VIDIOC_S_AUDIO;
+extern unsigned IOCTL_VIDIOC_QUERYCTRL;
+extern unsigned IOCTL_VIDIOC_QUERYMENU;
+extern unsigned IOCTL_VIDIOC_G_INPUT;
+extern unsigned IOCTL_VIDIOC_S_INPUT;
+extern unsigned IOCTL_VIDIOC_G_OUTPUT;
+extern unsigned IOCTL_VIDIOC_S_OUTPUT;
+extern unsigned IOCTL_VIDIOC_ENUMOUTPUT;
+extern unsigned IOCTL_VIDIOC_G_AUDOUT;
+extern unsigned IOCTL_VIDIOC_S_AUDOUT;
+extern unsigned IOCTL_VIDIOC_G_MODULATOR;
+extern unsigned IOCTL_VIDIOC_S_MODULATOR;
+extern unsigned IOCTL_VIDIOC_G_FREQUENCY;
+extern unsigned IOCTL_VIDIOC_S_FREQUENCY;
+extern unsigned IOCTL_VIDIOC_CROPCAP;
+extern unsigned IOCTL_VIDIOC_G_CROP;
+extern unsigned IOCTL_VIDIOC_S_CROP;
+extern unsigned IOCTL_VIDIOC_G_JPEGCOMP;
+extern unsigned IOCTL_VIDIOC_S_JPEGCOMP;
+extern unsigned IOCTL_VIDIOC_QUERYSTD;
+extern unsigned IOCTL_VIDIOC_TRY_FMT;
+extern unsigned IOCTL_VIDIOC_ENUMAUDIO;
+extern unsigned IOCTL_VIDIOC_ENUMAUDOUT;
+extern unsigned IOCTL_VIDIOC_G_PRIORITY;
+extern unsigned IOCTL_VIDIOC_S_PRIORITY;
+extern unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES;
+extern unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS;
+extern unsigned IOCTL_WDOGIOC_GMODE;
+extern unsigned IOCTL_WDOGIOC_SMODE;
+extern unsigned IOCTL_WDOGIOC_WHICH;
+extern unsigned IOCTL_WDOGIOC_TICKLE;
+extern unsigned IOCTL_WDOGIOC_GTICKLER;
+extern unsigned IOCTL_WDOGIOC_GWDOGS;
+extern unsigned IOCTL_KCOV_IOC_SETBUFSIZE;
+extern unsigned IOCTL_KCOV_IOC_ENABLE;
+extern unsigned IOCTL_KCOV_IOC_DISABLE;
+extern unsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC;
+extern unsigned IOCTL_IPMICTL_RECEIVE_MSG;
+extern unsigned IOCTL_IPMICTL_SEND_COMMAND;
+extern unsigned IOCTL_IPMICTL_REGISTER_FOR_CMD;
+extern unsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD;
+extern unsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD;
+extern unsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD;
+extern unsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD;
+extern unsigned IOCTL_IPMICTL_SET_MY_LUN_CMD;
+extern unsigned IOCTL_IPMICTL_GET_MY_LUN_CMD;
+extern unsigned IOCTL_SNDCTL_DSP_RESET;
+extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+extern unsigned IOCTL_SNDCTL_DSP_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+extern unsigned IOCTL_SNDCTL_DSP_POST;
+extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;
+extern unsigned IOCTL_SNDCTL_DSP_GETISPACE;
+extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+extern unsigned IOCTL_SNDCTL_DSP_GETCAPS;
+extern unsigned IOCTL_SNDCTL_DSP_GETTRIGGER;
+extern unsigned IOCTL_SNDCTL_DSP_SETTRIGGER;
+extern unsigned IOCTL_SNDCTL_DSP_GETIPTR;
+extern unsigned IOCTL_SNDCTL_DSP_GETOPTR;
+extern unsigned IOCTL_SNDCTL_DSP_MAPINBUF;
+extern unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF;
+extern unsigned IOCTL_SNDCTL_DSP_SETSYNCRO;
+extern unsigned IOCTL_SNDCTL_DSP_SETDUPLEX;
+extern unsigned IOCTL_SNDCTL_DSP_PROFILE;
+extern unsigned IOCTL_SNDCTL_DSP_GETODELAY;
+extern unsigned IOCTL_SOUND_MIXER_INFO;
+extern unsigned IOCTL_SOUND_OLD_MIXER_INFO;
+extern unsigned IOCTL_OSS_GETVERSION;
+extern unsigned IOCTL_SNDCTL_SYSINFO;
+extern unsigned IOCTL_SNDCTL_AUDIOINFO;
+extern unsigned IOCTL_SNDCTL_ENGINEINFO;
+extern unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL;
+extern unsigned IOCTL_SNDCTL_DSP_GETRECVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SETRECVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SKIP;
+extern unsigned IOCTL_SNDCTL_DSP_SILENCE;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+
+extern const unsigned SHA1_CTX_sz;
+extern const unsigned SHA1_return_length;
+
+extern const unsigned MD4_CTX_sz;
+extern const unsigned MD4_return_length;
+
+extern const unsigned RMD160_CTX_sz;
+extern const unsigned RMD160_return_length;
+
+extern const unsigned MD5_CTX_sz;
+extern const unsigned MD5_return_length;
+
+extern const unsigned fpos_t_sz;
+
+extern const unsigned MD2_CTX_sz;
+extern const unsigned MD2_return_length;
+
+#define SHA2_EXTERN(LEN) \
+ extern const unsigned SHA##LEN##_CTX_sz; \
+ extern const unsigned SHA##LEN##_return_length; \
+ extern const unsigned SHA##LEN##_block_length; \
+ extern const unsigned SHA##LEN##_digest_length
+
+SHA2_EXTERN(224);
+SHA2_EXTERN(256);
+SHA2_EXTERN(384);
+SHA2_EXTERN(512);
+
+#undef SHA2_EXTERN
+
+extern const int unvis_valid;
+extern const int unvis_validpush;
+
+struct __sanitizer_cdbr {
+ void (*unmap)(void *, void *, uptr);
+ void *cookie;
+ u8 *mmap_base;
+ uptr mmap_size;
+
+ u8 *hash_base;
+ u8 *offset_base;
+ u8 *data_base;
+
+ u32 data_size;
+ u32 entries;
+ u32 entries_index;
+ u32 seed;
+
+ u8 offset_size;
+ u8 index_size;
+
+ u32 entries_m;
+ u32 entries_index_m;
+ u8 entries_s1, entries_s2;
+ u8 entries_index_s1, entries_index_s2;
+};
+
+struct __sanitizer_cdbw {
+ uptr data_counter;
+ uptr data_allocated;
+ uptr data_size;
+ uptr *data_len;
+ void **data_ptr;
+ uptr hash_size;
+ void *hash;
+ uptr key_counter;
+};
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME __sigaction14
+
+// Compat with 9.0
+extern unsigned struct_statvfs90_sz;
+
+#endif // SANITIZER_NETBSD
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
new file mode 100644
index 0000000000..1420ecbfa5
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
@@ -0,0 +1,279 @@
+//===-- sanitizer_platform_limits_openbsd.cpp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_OPENBSD
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link_elf.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/ppp_defs.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <soundcard.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/signal.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <term.h>
+#include <time.h>
+#include <utime.h>
+#include <utmp.h>
+#include <wchar.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_openbsd.h"
+
+namespace __sanitizer {
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_stack_t_sz = sizeof(stack_t);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+
+const uptr sig_ign = (uptr)SIG_IGN;
+const uptr sig_dfl = (uptr)SIG_DFL;
+const uptr sig_err = (uptr)SIG_ERR;
+const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+
+unsigned struct_utmp_sz = sizeof(struct utmp);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+CHECK_TYPE_SIZE(dl_phdr_info);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_next);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_atimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_dtimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_ctimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#endif // SANITIZER_OPENBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.h b/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.h
new file mode 100644
index 0000000000..8a19487236
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_openbsd.h
@@ -0,0 +1,382 @@
+//===-- sanitizer_platform_limits_openbsd.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific OpenBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_OPENBSD_H
+#define SANITIZER_PLATFORM_LIMITS_OPENBSD_H
+
+#if SANITIZER_OPENBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \
+ ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift))))
+
+#if defined(__x86_64__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 312)
+#elif defined(__i386__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 164)
+#endif
+
+#define RLIMIT_AS RLIMIT_DATA
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_stack_t_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+
+struct __sanitizer_iocb {
+ u64 aio_offset;
+ uptr aio_buf;
+ long aio_nbytes;
+ u32 aio_fildes;
+ u32 aio_lio_opcode;
+ long aio_reqprio;
+#if SANITIZER_WORDSIZE == 64
+ u8 aio_sigevent[32];
+#else
+ u8 aio_sigevent[20];
+#endif
+ u32 _state;
+ u32 _errno;
+ long _retval;
+};
+
+struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+};
+
+struct __sanitizer_sem_t {
+ uptr data[5];
+};
+
+struct __sanitizer_ipc_perm {
+ u32 cuid;
+ u32 cgid;
+ u32 uid;
+ u32 gid;
+ u32 mode;
+ unsigned short seq;
+ long key;
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ int shm_segsz;
+ u32 shm_lpid;
+ u32 shm_cpid;
+ short shm_nattch;
+ u64 shm_atime;
+ long __shm_atimensec;
+ u64 shm_dtime;
+ long __shm_dtimensec;
+ u64 shm_ctime;
+ long __shm_ctimensec;
+ void *_shm_internal;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ struct __sanitizer_sockaddr *ifa_addr; // (struct sockaddr *)
+ struct __sanitizer_sockaddr *ifa_netmask; // (struct sockaddr *)
+ struct __sanitizer_sockaddr *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+typedef long long __sanitizer_time_t;
+typedef int __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+ u64 d_fileno;
+ u64 d_off;
+ u16 d_reclen;
+};
+
+typedef u64 __sanitizer_clock_t;
+typedef u32 __sanitizer_clockid_t;
+
+typedef u32 __sanitizer___kernel_uid_t;
+typedef u32 __sanitizer___kernel_gid_t;
+typedef u64 __sanitizer___kernel_off_t;
+typedef struct {
+ u32 fds_bits[8];
+} __sanitizer___kernel_fd_set;
+
+typedef struct {
+ unsigned int pta_magic;
+ int pta_flags;
+ void *pta_private;
+} __sanitizer_pthread_attr_t;
+
+typedef unsigned int __sanitizer_sigset_t;
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sighandler_ptr handler;
+ __sanitizer_sigactionhandler_ptr sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+#if SANITIZER_WORDSIZE == 64
+ u64 dlpi_addr;
+#else
+ u32 dlpi_addr;
+#endif
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+#if SANITIZER_WORDSIZE == 64
+ u32 dlpi_phnum;
+#else
+ u16 dlpi_phnum;
+#endif
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ struct __sanitizer_sockaddr *ai_addr;
+ char *ai_canonname;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ int gl_pathc;
+ int gl_matchc;
+ int gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ void **gl_statv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+typedef char __sanitizer_FILE;
+#define SANITIZER_HAS_STRUCT_FILE 0
+
+extern int shmctl_ipc_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME __sigaction14
+
+#endif // SANITIZER_OPENBSD
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.cpp
new file mode 100644
index 0000000000..c052aa2bc9
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -0,0 +1,1275 @@
+//===-- sanitizer_platform_limits_posix.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+// Tests in this file assume that off_t-dependent data structures match the
+// libc ABI. For example, struct dirent here is what readdir() function (as
+// exported from libc) returns, and not the user-facing "dirent", which
+// depends on _FILE_OFFSET_BITS setting.
+// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif
+
+// Must go after undef _FILE_OFFSET_BITS.
+#include "sanitizer_glibc_version.h"
+
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <grp.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netdb.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <termios.h>
+#include <time.h>
+#include <wchar.h>
+#include <regex.h>
+#if !SANITIZER_MAC
+#include <utmp.h>
+#endif
+
+#if !SANITIZER_IOS
+#include <net/route.h>
+#endif
+
+#if !SANITIZER_ANDROID
+#include <fstab.h>
+#include <sys/mount.h>
+#include <sys/timeb.h>
+#include <utmpx.h>
+#endif
+
+#if SANITIZER_LINUX
+#include <malloc.h>
+#include <mntent.h>
+#include <netinet/ether.h>
+#include <sys/sysinfo.h>
+#include <sys/vt.h>
+#include <linux/cdrom.h>
+#include <linux/fd.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/soundcard.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include <linux/posix_types.h>
+#include <net/if_arp.h>
+#endif
+
+#if SANITIZER_IOS
+#undef IOC_DIRMASK
+#endif
+
+#if SANITIZER_LINUX
+# include <utime.h>
+# include <sys/ptrace.h>
+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
+# include <asm/ptrace.h>
+# ifdef __arm__
+typedef struct user_fpregs elf_fpregset_t;
+# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
+# if !defined(ARM_VFPREGS_SIZE)
+# define ARM_VFPREGS_SIZE ARM_VFPREGS_SIZE_ASAN
+# endif
+# endif
+# endif
+# include <semaphore.h>
+#endif
+
+#if !SANITIZER_ANDROID
+#include <ifaddrs.h>
+#include <sys/ucontext.h>
+#include <wordexp.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <glob.h>
+#include <obstack.h>
+#include <mqueue.h>
+#include <net/if_ppp.h>
+#include <netax25/ax25.h>
+#include <netipx/ipx.h>
+#include <netrom/netrom.h>
+#if HAVE_RPC_XDR_H
+# include <rpc/xdr.h>
+#endif
+#include <scsi/scsi.h>
+#include <sys/mtio.h>
+#include <sys/kd.h>
+#include <sys/shm.h>
+#include <sys/statvfs.h>
+#include <sys/timex.h>
+#if defined(__mips64)
+# include <sys/procfs.h>
+#endif
+#include <sys/user.h>
+#include <linux/cyclades.h>
+#include <linux/if_eql.h>
+#include <linux/if_plip.h>
+#include <linux/lp.h>
+#include <linux/mroute.h>
+#include <linux/mroute6.h>
+#include <linux/scc.h>
+#include <linux/serial.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#include <crypt.h>
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_ANDROID
+#include <linux/kd.h>
+#include <linux/mtio.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#endif
+
+#if SANITIZER_LINUX
+#include <link.h>
+#include <sys/vfs.h>
+#include <sys/epoll.h>
+#include <linux/capability.h>
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_MAC
+#include <net/ethernet.h>
+#include <sys/filio.h>
+#include <sys/sockio.h>
+#endif
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_stack_t_sz = sizeof(stack_t);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+ unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+ unsigned pid_t_sz = sizeof(pid_t);
+ unsigned timeval_sz = sizeof(timeval);
+ unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
+ unsigned mbstate_t_sz = sizeof(mbstate_t);
+ unsigned sigset_t_sz = sizeof(sigset_t);
+ unsigned struct_timezone_sz = sizeof(struct timezone);
+ unsigned struct_tms_sz = sizeof(struct tms);
+ unsigned struct_sigevent_sz = sizeof(struct sigevent);
+ unsigned struct_sched_param_sz = sizeof(struct sched_param);
+ unsigned struct_regex_sz = sizeof(regex_t);
+ unsigned struct_regmatch_sz = sizeof(regmatch_t);
+
+#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+
+#if !SANITIZER_ANDROID
+ unsigned struct_fstab_sz = sizeof(struct fstab);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+ unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
+ unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
+ unsigned __user_cap_header_struct_sz =
+ sizeof(struct __user_cap_header_struct);
+ unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
+ unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
+ unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
+ unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+ unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
+ // has been removed from glibc 2.28.
+#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
+ || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
+ || defined(__x86_64__) || (defined(__riscv) && __riscv_xlen == 64)
+#define SIZEOF_STRUCT_USTAT 32
+#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
+ || defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
+#define SIZEOF_STRUCT_USTAT 20
+#else
+#error Unknown size of struct ustat
+#endif
+ unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
+ unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+ unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_timex_sz = sizeof(struct timex);
+ unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+ unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+ unsigned struct_statvfs_sz = sizeof(struct statvfs);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ const uptr sig_ign = (uptr)SIG_IGN;
+ const uptr sig_dfl = (uptr)SIG_DFL;
+ const uptr sig_err = (uptr)SIG_ERR;
+ const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+#if SANITIZER_LINUX
+ int e_tabsz = (int)E_TABSZ;
+#endif
+
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_shminfo_sz = sizeof(struct shminfo);
+ unsigned struct_shm_info_sz = sizeof(struct shm_info);
+ int shmctl_ipc_stat = (int)IPC_STAT;
+ int shmctl_ipc_info = (int)IPC_INFO;
+ int shmctl_shm_info = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
+#endif
+
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+#endif
+#if !SANITIZER_ANDROID
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+#endif
+
+ int map_fixed = MAP_FIXED;
+
+ int af_inet = (int)AF_INET;
+ int af_inet6 = (int)AF_INET6;
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+
+#if SANITIZER_LINUX
+unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
+#elif SANITIZER_FREEBSD
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ int glob_nomatch = GLOB_NOMATCH;
+ int glob_altdirfunc = GLOB_ALTDIRFUNC;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#elif defined(__aarch64__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#elif defined(__s390__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
+#else
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
+#endif // __mips64 || __powerpc64__ || __aarch64__
+#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
+ defined(__aarch64__) || defined(__arm__) || defined(__s390__)
+ unsigned struct_user_fpxregs_struct_sz = 0;
+#else
+ unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
+#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
+// || __s390__
+#ifdef __arm__
+ unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
+#else
+ unsigned struct_user_vfpregs_struct_sz = 0;
+#endif
+
+ int ptrace_peektext = PTRACE_PEEKTEXT;
+ int ptrace_peekdata = PTRACE_PEEKDATA;
+ int ptrace_peekuser = PTRACE_PEEKUSER;
+#if (defined(PTRACE_GETREGS) && defined(PTRACE_SETREGS)) || \
+ (defined(PT_GETREGS) && defined(PT_SETREGS))
+ int ptrace_getregs = PTRACE_GETREGS;
+ int ptrace_setregs = PTRACE_SETREGS;
+#else
+ int ptrace_getregs = -1;
+ int ptrace_setregs = -1;
+#endif
+#if (defined(PTRACE_GETFPREGS) && defined(PTRACE_SETFPREGS)) || \
+ (defined(PT_GETFPREGS) && defined(PT_SETFPREGS))
+ int ptrace_getfpregs = PTRACE_GETFPREGS;
+ int ptrace_setfpregs = PTRACE_SETFPREGS;
+#else
+ int ptrace_getfpregs = -1;
+ int ptrace_setfpregs = -1;
+#endif
+#if (defined(PTRACE_GETFPXREGS) && defined(PTRACE_SETFPXREGS)) || \
+ (defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS))
+ int ptrace_getfpxregs = PTRACE_GETFPXREGS;
+ int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+#else
+ int ptrace_getfpxregs = -1;
+ int ptrace_setfpxregs = -1;
+#endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS
+#if defined(PTRACE_GETVFPREGS) && defined(PTRACE_SETVFPREGS)
+ int ptrace_getvfpregs = PTRACE_GETVFPREGS;
+ int ptrace_setvfpregs = PTRACE_SETVFPREGS;
+#else
+ int ptrace_getvfpregs = -1;
+ int ptrace_setvfpregs = -1;
+#endif
+ int ptrace_geteventmsg = PTRACE_GETEVENTMSG;
+#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
+ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
+ int ptrace_getsiginfo = PTRACE_GETSIGINFO;
+ int ptrace_setsiginfo = PTRACE_SETSIGINFO;
+#else
+ int ptrace_getsiginfo = -1;
+ int ptrace_setsiginfo = -1;
+#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO
+#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
+ int ptrace_getregset = PTRACE_GETREGSET;
+ int ptrace_setregset = PTRACE_SETREGSET;
+#else
+ int ptrace_getregset = -1;
+ int ptrace_setregset = -1;
+#endif // PTRACE_GETREGSET/PTRACE_SETREGSET
+#endif
+
+ unsigned path_max = PATH_MAX;
+
+ // ioctl arguments
+ unsigned struct_ifreq_sz = sizeof(struct ifreq);
+ unsigned struct_termios_sz = sizeof(struct termios);
+ unsigned struct_winsize_sz = sizeof(struct winsize);
+
+#if SANITIZER_LINUX
+ unsigned struct_arpreq_sz = sizeof(struct arpreq);
+ unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf);
+ unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession);
+ unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio);
+ unsigned struct_cdrom_subchnl_sz = sizeof(struct cdrom_subchnl);
+ unsigned struct_cdrom_ti_sz = sizeof(struct cdrom_ti);
+ unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);
+ unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);
+ unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);
+ unsigned struct_ff_effect_sz = sizeof(struct ff_effect);
+ unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);
+ unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);
+ unsigned struct_floppy_fdc_state_sz = sizeof(struct floppy_fdc_state);
+ unsigned struct_floppy_max_errors_sz = sizeof(struct floppy_max_errors);
+ unsigned struct_floppy_raw_cmd_sz = sizeof(struct floppy_raw_cmd);
+ unsigned struct_floppy_struct_sz = sizeof(struct floppy_struct);
+ unsigned struct_floppy_write_errors_sz = sizeof(struct floppy_write_errors);
+ unsigned struct_format_descr_sz = sizeof(struct format_descr);
+ unsigned struct_hd_driveid_sz = sizeof(struct hd_driveid);
+ unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);
+ unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);
+ unsigned struct_input_id_sz = sizeof(struct input_id);
+ unsigned struct_mtpos_sz = sizeof(struct mtpos);
+ unsigned struct_rtentry_sz = sizeof(struct rtentry);
+ unsigned struct_termio_sz = sizeof(struct termio);
+ unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
+ unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
+ unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
+ unsigned struct_midi_info_sz = sizeof(struct midi_info);
+ unsigned struct_mtget_sz = sizeof(struct mtget);
+ unsigned struct_mtop_sz = sizeof(struct mtop);
+ unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
+ unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
+ unsigned struct_synth_info_sz = sizeof(struct synth_info);
+ unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
+ unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
+#if EV_VERSION > (0x010000)
+ unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);
+#else
+ unsigned struct_input_keymap_entry_sz = 0;
+#endif
+ unsigned struct_ipx_config_data_sz = sizeof(struct ipx_config_data);
+ unsigned struct_kbdiacrs_sz = sizeof(struct kbdiacrs);
+ unsigned struct_kbentry_sz = sizeof(struct kbentry);
+ unsigned struct_kbkeycode_sz = sizeof(struct kbkeycode);
+ unsigned struct_kbsentry_sz = sizeof(struct kbsentry);
+ unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);
+ unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);
+ unsigned struct_scc_modem_sz = sizeof(struct scc_modem);
+ unsigned struct_scc_stat_sz = sizeof(struct scc_stat);
+ unsigned struct_serial_multiport_struct_sz
+ = sizeof(struct serial_multiport_struct);
+ unsigned struct_serial_struct_sz = sizeof(struct serial_struct);
+ unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
+ unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
+ unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID && !SANITIZER_MAC
+ unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+ unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+#endif
+
+ const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+ const unsigned IOCTL_NOT_PRESENT = 0;
+
+ unsigned IOCTL_FIOASYNC = FIOASYNC;
+ unsigned IOCTL_FIOCLEX = FIOCLEX;
+ unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+ unsigned IOCTL_FIONCLEX = FIONCLEX;
+ unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+ unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+ unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+ unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+ unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+ unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+ unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+ unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+ unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+ unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+ unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+ unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+ unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+ unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+ unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+ unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+ unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+ unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+ unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+ unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+ unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+ unsigned IOCTL_TIOCCONS = TIOCCONS;
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+ unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+ unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+ unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+ unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+ unsigned IOCTL_TIOCMGET = TIOCMGET;
+ unsigned IOCTL_TIOCMSET = TIOCMSET;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+ unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+ unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+ unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+#endif
+
+#if SANITIZER_LINUX
+ unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);
+ unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);
+ unsigned IOCTL_EVIOCGEFFECTS = EVIOCGEFFECTS;
+ unsigned IOCTL_EVIOCGID = EVIOCGID;
+ unsigned IOCTL_EVIOCGKEY = EVIOCGKEY(0);
+ unsigned IOCTL_EVIOCGKEYCODE = EVIOCGKEYCODE;
+ unsigned IOCTL_EVIOCGLED = EVIOCGLED(0);
+ unsigned IOCTL_EVIOCGNAME = EVIOCGNAME(0);
+ unsigned IOCTL_EVIOCGPHYS = EVIOCGPHYS(0);
+ unsigned IOCTL_EVIOCGRAB = EVIOCGRAB;
+ unsigned IOCTL_EVIOCGREP = EVIOCGREP;
+ unsigned IOCTL_EVIOCGSND = EVIOCGSND(0);
+ unsigned IOCTL_EVIOCGSW = EVIOCGSW(0);
+ unsigned IOCTL_EVIOCGUNIQ = EVIOCGUNIQ(0);
+ unsigned IOCTL_EVIOCGVERSION = EVIOCGVERSION;
+ unsigned IOCTL_EVIOCRMFF = EVIOCRMFF;
+ unsigned IOCTL_EVIOCSABS = EVIOCSABS(0);
+ unsigned IOCTL_EVIOCSFF = EVIOCSFF;
+ unsigned IOCTL_EVIOCSKEYCODE = EVIOCSKEYCODE;
+ unsigned IOCTL_EVIOCSREP = EVIOCSREP;
+ unsigned IOCTL_BLKFLSBUF = BLKFLSBUF;
+ unsigned IOCTL_BLKGETSIZE = BLKGETSIZE;
+ unsigned IOCTL_BLKRAGET = BLKRAGET;
+ unsigned IOCTL_BLKRASET = BLKRASET;
+ unsigned IOCTL_BLKROGET = BLKROGET;
+ unsigned IOCTL_BLKROSET = BLKROSET;
+ unsigned IOCTL_BLKRRPART = BLKRRPART;
+ unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
+ unsigned IOCTL_CDROMEJECT = CDROMEJECT;
+ unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
+ unsigned IOCTL_CDROMMULTISESSION = CDROMMULTISESSION;
+ unsigned IOCTL_CDROMPAUSE = CDROMPAUSE;
+ unsigned IOCTL_CDROMPLAYMSF = CDROMPLAYMSF;
+ unsigned IOCTL_CDROMPLAYTRKIND = CDROMPLAYTRKIND;
+ unsigned IOCTL_CDROMREADAUDIO = CDROMREADAUDIO;
+ unsigned IOCTL_CDROMREADCOOKED = CDROMREADCOOKED;
+ unsigned IOCTL_CDROMREADMODE1 = CDROMREADMODE1;
+ unsigned IOCTL_CDROMREADMODE2 = CDROMREADMODE2;
+ unsigned IOCTL_CDROMREADRAW = CDROMREADRAW;
+ unsigned IOCTL_CDROMREADTOCENTRY = CDROMREADTOCENTRY;
+ unsigned IOCTL_CDROMREADTOCHDR = CDROMREADTOCHDR;
+ unsigned IOCTL_CDROMRESET = CDROMRESET;
+ unsigned IOCTL_CDROMRESUME = CDROMRESUME;
+ unsigned IOCTL_CDROMSEEK = CDROMSEEK;
+ unsigned IOCTL_CDROMSTART = CDROMSTART;
+ unsigned IOCTL_CDROMSTOP = CDROMSTOP;
+ unsigned IOCTL_CDROMSUBCHNL = CDROMSUBCHNL;
+ unsigned IOCTL_CDROMVOLCTRL = CDROMVOLCTRL;
+ unsigned IOCTL_CDROMVOLREAD = CDROMVOLREAD;
+ unsigned IOCTL_CDROM_GET_UPC = CDROM_GET_UPC;
+ unsigned IOCTL_FDCLRPRM = FDCLRPRM;
+ unsigned IOCTL_FDDEFPRM = FDDEFPRM;
+ unsigned IOCTL_FDFLUSH = FDFLUSH;
+ unsigned IOCTL_FDFMTBEG = FDFMTBEG;
+ unsigned IOCTL_FDFMTEND = FDFMTEND;
+ unsigned IOCTL_FDFMTTRK = FDFMTTRK;
+ unsigned IOCTL_FDGETDRVPRM = FDGETDRVPRM;
+ unsigned IOCTL_FDGETDRVSTAT = FDGETDRVSTAT;
+ unsigned IOCTL_FDGETDRVTYP = FDGETDRVTYP;
+ unsigned IOCTL_FDGETFDCSTAT = FDGETFDCSTAT;
+ unsigned IOCTL_FDGETMAXERRS = FDGETMAXERRS;
+ unsigned IOCTL_FDGETPRM = FDGETPRM;
+ unsigned IOCTL_FDMSGOFF = FDMSGOFF;
+ unsigned IOCTL_FDMSGON = FDMSGON;
+ unsigned IOCTL_FDPOLLDRVSTAT = FDPOLLDRVSTAT;
+ unsigned IOCTL_FDRAWCMD = FDRAWCMD;
+ unsigned IOCTL_FDRESET = FDRESET;
+ unsigned IOCTL_FDSETDRVPRM = FDSETDRVPRM;
+ unsigned IOCTL_FDSETEMSGTRESH = FDSETEMSGTRESH;
+ unsigned IOCTL_FDSETMAXERRS = FDSETMAXERRS;
+ unsigned IOCTL_FDSETPRM = FDSETPRM;
+ unsigned IOCTL_FDTWADDLE = FDTWADDLE;
+ unsigned IOCTL_FDWERRORCLR = FDWERRORCLR;
+ unsigned IOCTL_FDWERRORGET = FDWERRORGET;
+ unsigned IOCTL_HDIO_DRIVE_CMD = HDIO_DRIVE_CMD;
+ unsigned IOCTL_HDIO_GETGEO = HDIO_GETGEO;
+ unsigned IOCTL_HDIO_GET_32BIT = HDIO_GET_32BIT;
+ unsigned IOCTL_HDIO_GET_DMA = HDIO_GET_DMA;
+ unsigned IOCTL_HDIO_GET_IDENTITY = HDIO_GET_IDENTITY;
+ unsigned IOCTL_HDIO_GET_KEEPSETTINGS = HDIO_GET_KEEPSETTINGS;
+ unsigned IOCTL_HDIO_GET_MULTCOUNT = HDIO_GET_MULTCOUNT;
+ unsigned IOCTL_HDIO_GET_NOWERR = HDIO_GET_NOWERR;
+ unsigned IOCTL_HDIO_GET_UNMASKINTR = HDIO_GET_UNMASKINTR;
+ unsigned IOCTL_HDIO_SET_32BIT = HDIO_SET_32BIT;
+ unsigned IOCTL_HDIO_SET_DMA = HDIO_SET_DMA;
+ unsigned IOCTL_HDIO_SET_KEEPSETTINGS = HDIO_SET_KEEPSETTINGS;
+ unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;
+ unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;
+ unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;
+ unsigned IOCTL_MTIOCPOS = MTIOCPOS;
+ unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
+ unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;
+ unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
+ unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;
+ unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;
+ unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;
+ unsigned IOCTL_PPPIOCSDEBUG = PPPIOCSDEBUG;
+ unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;
+ unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
+ unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
+ unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
+ unsigned IOCTL_SIOCADDRT = SIOCADDRT;
+ unsigned IOCTL_SIOCDARP = SIOCDARP;
+ unsigned IOCTL_SIOCDELRT = SIOCDELRT;
+ unsigned IOCTL_SIOCDRARP = SIOCDRARP;
+ unsigned IOCTL_SIOCGARP = SIOCGARP;
+ unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP;
+ unsigned IOCTL_SIOCGIFHWADDR = SIOCGIFHWADDR;
+ unsigned IOCTL_SIOCGIFMAP = SIOCGIFMAP;
+ unsigned IOCTL_SIOCGIFMEM = SIOCGIFMEM;
+ unsigned IOCTL_SIOCGIFNAME = SIOCGIFNAME;
+ unsigned IOCTL_SIOCGIFSLAVE = SIOCGIFSLAVE;
+ unsigned IOCTL_SIOCGRARP = SIOCGRARP;
+ unsigned IOCTL_SIOCGSTAMP = SIOCGSTAMP;
+ unsigned IOCTL_SIOCSARP = SIOCSARP;
+ unsigned IOCTL_SIOCSIFENCAP = SIOCSIFENCAP;
+ unsigned IOCTL_SIOCSIFHWADDR = SIOCSIFHWADDR;
+ unsigned IOCTL_SIOCSIFLINK = SIOCSIFLINK;
+ unsigned IOCTL_SIOCSIFMAP = SIOCSIFMAP;
+ unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;
+ unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;
+ unsigned IOCTL_SIOCSRARP = SIOCSRARP;
+# if SOUND_VERSION >= 0x040000
+ unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RCVMSG = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RDATA = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RESET = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RUN = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_SENDMSG = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_WCODE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_WDATA = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_BITS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_CHANNELS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_FILTER = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;
+# else // SOUND_VERSION
+ unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;
+ unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;
+ unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;
+ unsigned IOCTL_SNDCTL_COPR_RCVMSG = SNDCTL_COPR_RCVMSG;
+ unsigned IOCTL_SNDCTL_COPR_RDATA = SNDCTL_COPR_RDATA;
+ unsigned IOCTL_SNDCTL_COPR_RESET = SNDCTL_COPR_RESET;
+ unsigned IOCTL_SNDCTL_COPR_RUN = SNDCTL_COPR_RUN;
+ unsigned IOCTL_SNDCTL_COPR_SENDMSG = SNDCTL_COPR_SENDMSG;
+ unsigned IOCTL_SNDCTL_COPR_WCODE = SNDCTL_COPR_WCODE;
+ unsigned IOCTL_SNDCTL_COPR_WDATA = SNDCTL_COPR_WDATA;
+ unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;
+ unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;
+ unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;
+ unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
+ unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;
+ unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
+#endif // SOUND_VERSION
+ unsigned IOCTL_TCFLSH = TCFLSH;
+ unsigned IOCTL_TCGETA = TCGETA;
+ unsigned IOCTL_TCGETS = TCGETS;
+ unsigned IOCTL_TCSBRK = TCSBRK;
+ unsigned IOCTL_TCSBRKP = TCSBRKP;
+ unsigned IOCTL_TCSETA = TCSETA;
+ unsigned IOCTL_TCSETAF = TCSETAF;
+ unsigned IOCTL_TCSETAW = TCSETAW;
+ unsigned IOCTL_TCSETS = TCSETS;
+ unsigned IOCTL_TCSETSF = TCSETSF;
+ unsigned IOCTL_TCSETSW = TCSETSW;
+ unsigned IOCTL_TCXONC = TCXONC;
+ unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
+ unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
+ unsigned IOCTL_TIOCINQ = TIOCINQ;
+ unsigned IOCTL_TIOCLINUX = TIOCLINUX;
+ unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
+ unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
+ unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
+ unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
+ unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
+ unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
+ unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
+ unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+ unsigned IOCTL_VT_RESIZE = VT_RESIZE;
+ unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
+ unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+ unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+ unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+ unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+ unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+ unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+ unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+ unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+ unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+ unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+ unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+ unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+ unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;
+ unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;
+ unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;
+ unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;
+ unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;
+ unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;
+ unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;
+ unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;
+ unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;
+ unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;
+ unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;
+ unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;
+ unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;
+ unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;
+ unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;
+ unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;
+ unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;
+ unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;
+ unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;
+ unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;
+ unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;
+ unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;
+ unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;
+ unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;
+ unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;
+ unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;
+ unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;
+ unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;
+ unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;
+ unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;
+ unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;
+ unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;
+ unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;
+ unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;
+ unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;
+ unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
+ unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+ unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+ unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+ unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+ unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+ unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
+ unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
+ unsigned IOCTL_CYGETMON = CYGETMON;
+ unsigned IOCTL_CYGETTHRESH = CYGETTHRESH;
+ unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT;
+ unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH;
+ unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT;
+ unsigned IOCTL_CYSETTHRESH = CYSETTHRESH;
+ unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT;
+ unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;
+ unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;
+ unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;
+ unsigned IOCTL_EQL_GETSLAVECFG = EQL_GETSLAVECFG;
+ unsigned IOCTL_EQL_SETMASTRCFG = EQL_SETMASTRCFG;
+ unsigned IOCTL_EQL_SETSLAVECFG = EQL_SETSLAVECFG;
+#if EV_VERSION > (0x010000)
+ unsigned IOCTL_EVIOCGKEYCODE_V2 = EVIOCGKEYCODE_V2;
+ unsigned IOCTL_EVIOCGPROP = EVIOCGPROP(0);
+ unsigned IOCTL_EVIOCSKEYCODE_V2 = EVIOCSKEYCODE_V2;
+#else
+ unsigned IOCTL_EVIOCGKEYCODE_V2 = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;
+#endif
+ unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;
+ unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;
+ unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;
+ unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
+ unsigned IOCTL_GIO_CMAP = GIO_CMAP;
+ unsigned IOCTL_GIO_FONT = GIO_FONT;
+ unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
+ unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;
+ unsigned IOCTL_KDADDIO = KDADDIO;
+ unsigned IOCTL_KDDELIO = KDDELIO;
+ unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;
+ unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;
+ unsigned IOCTL_KDGKBENT = KDGKBENT;
+ unsigned IOCTL_KDGKBLED = KDGKBLED;
+ unsigned IOCTL_KDGKBMETA = KDGKBMETA;
+ unsigned IOCTL_KDGKBSENT = KDGKBSENT;
+ unsigned IOCTL_KDMAPDISP = KDMAPDISP;
+ unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;
+ unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;
+ unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;
+ unsigned IOCTL_KDSKBENT = KDSKBENT;
+ unsigned IOCTL_KDSKBLED = KDSKBLED;
+ unsigned IOCTL_KDSKBMETA = KDSKBMETA;
+ unsigned IOCTL_KDSKBSENT = KDSKBSENT;
+ unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;
+ unsigned IOCTL_LPABORT = LPABORT;
+ unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;
+ unsigned IOCTL_LPCAREFUL = LPCAREFUL;
+ unsigned IOCTL_LPCHAR = LPCHAR;
+ unsigned IOCTL_LPGETIRQ = LPGETIRQ;
+ unsigned IOCTL_LPGETSTATUS = LPGETSTATUS;
+ unsigned IOCTL_LPRESET = LPRESET;
+ unsigned IOCTL_LPSETIRQ = LPSETIRQ;
+ unsigned IOCTL_LPTIME = LPTIME;
+ unsigned IOCTL_LPWAIT = LPWAIT;
+ unsigned IOCTL_MTIOCGETCONFIG = MTIOCGETCONFIG;
+ unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;
+ unsigned IOCTL_PIO_CMAP = PIO_CMAP;
+ unsigned IOCTL_PIO_FONT = PIO_FONT;
+ unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
+ unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
+ unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
+ unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;
+ unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;
+ unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;
+ unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE;
+ unsigned IOCTL_SIOCAIPXITFCRT = SIOCAIPXITFCRT;
+ unsigned IOCTL_SIOCAIPXPRISLT = SIOCAIPXPRISLT;
+ unsigned IOCTL_SIOCAX25ADDUID = SIOCAX25ADDUID;
+ unsigned IOCTL_SIOCAX25DELUID = SIOCAX25DELUID;
+ unsigned IOCTL_SIOCAX25GETPARMS = SIOCAX25GETPARMS;
+ unsigned IOCTL_SIOCAX25GETUID = SIOCAX25GETUID;
+ unsigned IOCTL_SIOCAX25NOUID = SIOCAX25NOUID;
+ unsigned IOCTL_SIOCAX25SETPARMS = SIOCAX25SETPARMS;
+ unsigned IOCTL_SIOCDEVPLIP = SIOCDEVPLIP;
+ unsigned IOCTL_SIOCIPXCFGDATA = SIOCIPXCFGDATA;
+ unsigned IOCTL_SIOCNRDECOBS = SIOCNRDECOBS;
+ unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
+ unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
+ unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
+ unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
+ unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
+ unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
+ unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+ unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+#if SANITIZER_LINUX
+// FIXME: We define those on Linux and Mac, but only check on Linux.
+COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);
+COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);
+COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);
+COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);
+COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);
+COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);
+COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);
+COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);
+COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);
+COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);
+COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);
+COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);
+COMPILER_CHECK(IOC_NONE == _IOC_NONE);
+COMPILER_CHECK(IOC_WRITE == _IOC_WRITE);
+COMPILER_CHECK(IOC_READ == _IOC_READ);
+COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);
+COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);
+COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
+COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));
+COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));
+COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+#endif
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+#if SANITIZER_LINUX && (__ANDROID_API__ >= 21 || __GLIBC_PREREQ (2, 14))
+CHECK_TYPE_SIZE(mmsghdr);
+CHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr);
+CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
+#endif
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
+#elif SANITIZER_FREEBSD
+// There is no 'd_off' field on FreeBSD.
+#else
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+#endif
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
+CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent64, d_off);
+CHECK_SIZE_AND_OFFSET(dirent64, d_reclen);
+#endif
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+#if !defined(__s390x__) || __GLIBC_PREREQ (2, 20)
+// On s390x glibc 2.19 and earlier sa_flags was unsigned long, and sa_resv
+// didn't exist.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
+#endif
+#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32)
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
+#endif
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(__sysctl_args);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, nlen);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, oldval);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, oldlenp);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, newval);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, newlen);
+
+CHECK_TYPE_SIZE(__kernel_uid_t);
+CHECK_TYPE_SIZE(__kernel_gid_t);
+
+#if SANITIZER_USES_UID16_SYSCALLS
+CHECK_TYPE_SIZE(__kernel_old_uid_t);
+CHECK_TYPE_SIZE(__kernel_old_gid_t);
+#endif
+
+CHECK_TYPE_SIZE(__kernel_off_t);
+CHECK_TYPE_SIZE(__kernel_loff_t);
+CHECK_TYPE_SIZE(__kernel_fd_set);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+#endif
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(mntent);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_fsname);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_dir);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_type);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_opts);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_freq);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
+#endif
+
+CHECK_TYPE_SIZE(ether_addr);
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ipc_perm);
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+# else
+CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+# endif
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+#if !SANITIZER_LINUX || __GLIBC_PREREQ (2, 31)
+/* glibc 2.30 and earlier provided 16-bit mode field instead of 32-bit
+ on many architectures. */
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+#endif
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+#endif
+
+CHECK_TYPE_SIZE(clock_t);
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(clockid_t);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+# else
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)nullptr)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+# endif // SANITIZER_FREEBSD
+#else
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+#endif // SANITIZER_LINUX
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+#endif
+
+#if SANITIZER_LINUX
+COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+#endif
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+#if !SANITIZER_ANDROID
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+#endif
+
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(passwd, pw_change);
+CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
+CHECK_SIZE_AND_OFFSET(passwd, pw_class);
+#endif
+
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if HAVE_RPC_XDR_H
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
+CHECK_SIZE_AND_OFFSET(FILE, _markers);
+CHECK_SIZE_AND_OFFSET(FILE, _chain);
+CHECK_SIZE_AND_OFFSET(FILE, _fileno);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
+CHECK_TYPE_SIZE(obstack);
+CHECK_SIZE_AND_OFFSET(obstack, chunk_size);
+CHECK_SIZE_AND_OFFSET(obstack, chunk);
+CHECK_SIZE_AND_OFFSET(obstack, object_base);
+CHECK_SIZE_AND_OFFSET(obstack, next_free);
+
+CHECK_TYPE_SIZE(cookie_io_functions_t);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+CHECK_TYPE_SIZE(sem_t);
+#endif
+
+#if SANITIZER_LINUX && defined(__arm__)
+COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
+#endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.h
new file mode 100644
index 0000000000..658b0abaec
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -0,0 +1,1453 @@
+//===-- sanitizer_platform_limits_posix.h ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
+#define SANITIZER_PLATFORM_LIMITS_POSIX_H
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#if defined(__sparc__)
+// FIXME: This can't be included from tsan which does not support sparc yet.
+#include "sanitizer_glibc_version.h"
+#endif
+
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+#if !SANITIZER_IOS
+extern unsigned struct_stat64_sz;
+#endif
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_stack_t_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs64_sz;
+extern unsigned struct_regex_sz;
+extern unsigned struct_regmatch_sz;
+
+#if !SANITIZER_ANDROID
+extern unsigned struct_fstab_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+
+#if defined(__x86_64__)
+const unsigned struct_kernel_stat_sz = 144;
+const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__i386__)
+const unsigned struct_kernel_stat_sz = 64;
+const unsigned struct_kernel_stat64_sz = 96;
+#elif defined(__arm__)
+const unsigned struct_kernel_stat_sz = 64;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__aarch64__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__powerpc__) && !defined(__powerpc64__)
+const unsigned struct_kernel_stat_sz = 72;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__powerpc64__)
+const unsigned struct_kernel_stat_sz = 144;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__mips__)
+const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
+ ? FIRST_32_SECOND_64(104, 128)
+ : FIRST_32_SECOND_64(160, 216);
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__s390__) && !defined(__s390x__)
+const unsigned struct_kernel_stat_sz = 64;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__s390x__)
+const unsigned struct_kernel_stat_sz = 144;
+const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__sparc__) && defined(__arch64__)
+const unsigned struct___old_kernel_stat_sz = 0;
+const unsigned struct_kernel_stat_sz = 104;
+const unsigned struct_kernel_stat64_sz = 144;
+#elif defined(__sparc__) && !defined(__arch64__)
+const unsigned struct___old_kernel_stat_sz = 0;
+const unsigned struct_kernel_stat_sz = 64;
+const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__riscv) && __riscv_xlen == 64
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 104;
+#endif
+struct __sanitizer_perf_event_attr {
+ unsigned type;
+ unsigned size;
+ // More fields that vary with the kernel version.
+};
+
+extern unsigned struct_epoll_event_sz;
+extern unsigned struct_sysinfo_sz;
+extern unsigned __user_cap_header_struct_sz;
+extern unsigned __user_cap_data_struct_sz;
+extern unsigned struct_new_utsname_sz;
+extern unsigned struct_old_utsname_sz;
+extern unsigned struct_oldold_utsname_sz;
+
+const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+
+#if defined(__powerpc64__) || defined(__s390__)
+const unsigned struct___old_kernel_stat_sz = 0;
+#elif !defined(__sparc__)
+const unsigned struct___old_kernel_stat_sz = 32;
+#endif
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+
+struct __sanitizer_iocb {
+ u64 aio_data;
+ u32 aio_key_or_aio_reserved1; // Simply crazy.
+ u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
+ u16 aio_lio_opcode;
+ s16 aio_reqprio;
+ u32 aio_fildes;
+ u64 aio_buf;
+ u64 aio_nbytes;
+ s64 aio_offset;
+ u64 aio_reserved2;
+ u64 aio_reserved3;
+};
+
+struct __sanitizer_io_event {
+ u64 data;
+ u64 obj;
+ u64 res;
+ u64 res2;
+};
+
+const unsigned iocb_cmd_pread = 0;
+const unsigned iocb_cmd_pwrite = 1;
+const unsigned iocb_cmd_preadv = 7;
+const unsigned iocb_cmd_pwritev = 8;
+
+struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+ unsigned long ___unused[4];
+};
+
+const unsigned old_sigset_t_sz = sizeof(unsigned long);
+
+struct __sanitizer_sem_t {
+#if SANITIZER_ANDROID && defined(_LP64)
+ int data[4];
+#elif SANITIZER_ANDROID && !defined(_LP64)
+ int data;
+#elif SANITIZER_LINUX
+ uptr data[4];
+#endif
+};
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+struct __sanitizer_struct_mallinfo {
+ uptr v[10];
+};
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer_struct_mallinfo {
+ int v[10];
+};
+
+extern unsigned struct_ustat_sz;
+extern unsigned struct_rlimit64_sz;
+extern unsigned struct_statvfs64_sz;
+
+struct __sanitizer_ipc_perm {
+ int __key;
+ int uid;
+ int gid;
+ int cuid;
+ int cgid;
+#ifdef __powerpc__
+ unsigned mode;
+ unsigned __seq;
+ u64 __unused1;
+ u64 __unused2;
+#elif defined(__sparc__)
+ unsigned mode;
+ unsigned short __pad2;
+ unsigned short __seq;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+#else
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad2;
+#if defined(__x86_64__) && !defined(_LP64)
+ u64 __unused1;
+ u64 __unused2;
+#else
+ unsigned long __unused1;
+ unsigned long __unused2;
+#endif
+#endif
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+#if defined(__sparc__)
+#if !defined(__arch64__)
+ u32 __pad1;
+#endif
+ long shm_atime;
+#if !defined(__arch64__)
+ u32 __pad2;
+#endif
+ long shm_dtime;
+#if !defined(__arch64__)
+ u32 __pad3;
+#endif
+ long shm_ctime;
+ uptr shm_segsz;
+ int shm_cpid;
+ int shm_lpid;
+ unsigned long shm_nattch;
+ unsigned long __glibc_reserved1;
+ unsigned long __glibc_reserved2;
+#else
+#ifndef __powerpc__
+ uptr shm_segsz;
+#elif !defined(__powerpc64__)
+ uptr __unused0;
+#endif
+#if defined(__x86_64__) && !defined(_LP64)
+ u64 shm_atime;
+ u64 shm_dtime;
+ u64 shm_ctime;
+#else
+ uptr shm_atime;
+#if !defined(_LP64) && !defined(__mips__)
+ uptr __unused1;
+#endif
+ uptr shm_dtime;
+#if !defined(_LP64) && !defined(__mips__)
+ uptr __unused2;
+#endif
+ uptr shm_ctime;
+#if !defined(_LP64) && !defined(__mips__)
+ uptr __unused3;
+#endif
+#endif
+#ifdef __powerpc__
+ uptr shm_segsz;
+#endif
+ int shm_cpid;
+ int shm_lpid;
+#if defined(__x86_64__) && !defined(_LP64)
+ u64 shm_nattch;
+ u64 __unused4;
+ u64 __unused5;
+#else
+ uptr shm_nattch;
+ uptr __unused4;
+ uptr __unused5;
+#endif
+#endif
+};
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+extern unsigned struct_crypt_data_sz;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+#if !SANITIZER_ANDROID
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_MAC
+typedef unsigned long __sanitizer_pthread_key_t;
+#else
+typedef unsigned __sanitizer_pthread_key_t;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+};
+
+const int __sanitizer_XDR_ENCODE = 0;
+const int __sanitizer_XDR_DECODE = 1;
+const int __sanitizer_XDR_FREE = 2;
+#endif
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+#if SANITIZER_MAC
+ long pw_change;
+ char *pw_class;
+#endif
+#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32))
+ char *pw_gecos;
+#endif
+ char *pw_dir;
+ char *pw_shell;
+#if SANITIZER_MAC
+ long pw_expire;
+#endif
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+#if defined(__x86_64__) && !defined(_LP64)
+typedef long long __sanitizer_time_t;
+#else
+typedef long __sanitizer_time_t;
+#endif
+
+typedef long __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+#if SANITIZER_LINUX
+struct __sanitizer_mntent {
+ char *mnt_fsname;
+ char *mnt_dir;
+ char *mnt_type;
+ char *mnt_opts;
+ int mnt_freq;
+ int mnt_passno;
+};
+
+struct __sanitizer_file_handle {
+ unsigned int handle_bytes;
+ int handle_type;
+ unsigned char f_handle[1]; // variable sized
+};
+#endif
+
+#if SANITIZER_MAC
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+#else
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ uptr msg_iovlen;
+ void *msg_control;
+ uptr msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ uptr cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+#endif
+
+#if SANITIZER_LINUX
+struct __sanitizer_mmsghdr {
+ __sanitizer_msghdr msg_hdr;
+ unsigned int msg_len;
+};
+#endif
+
+#if SANITIZER_MAC
+struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ unsigned long long d_seekoff;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#elif SANITIZER_ANDROID || defined(__x86_64__)
+struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#else
+struct __sanitizer_dirent {
+ uptr d_ino;
+ uptr d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer_dirent64 {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+typedef long long __sanitizer_clock_t;
+#else
+typedef long __sanitizer_clock_t;
+#endif
+
+#if SANITIZER_LINUX
+typedef int __sanitizer_clockid_t;
+#endif
+
+#if SANITIZER_LINUX
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__)
+typedef unsigned __sanitizer___kernel_uid_t;
+typedef unsigned __sanitizer___kernel_gid_t;
+#else
+typedef unsigned short __sanitizer___kernel_uid_t;
+typedef unsigned short __sanitizer___kernel_gid_t;
+#endif
+#if defined(__x86_64__) && !defined(_LP64)
+typedef long long __sanitizer___kernel_off_t;
+#else
+typedef long __sanitizer___kernel_off_t;
+#endif
+
+#if defined(__powerpc__) || defined(__mips__)
+typedef unsigned int __sanitizer___kernel_old_uid_t;
+typedef unsigned int __sanitizer___kernel_old_gid_t;
+#else
+typedef unsigned short __sanitizer___kernel_old_uid_t;
+typedef unsigned short __sanitizer___kernel_old_gid_t;
+#endif
+
+typedef long long __sanitizer___kernel_loff_t;
+typedef struct {
+ unsigned long fds_bits[1024 / (8 * sizeof(long))];
+} __sanitizer___kernel_fd_set;
+#endif
+
+// This thing depends on the platform. We are only interested in the upper
+// limit. Verified with a compiler assert in .cpp.
+union __sanitizer_pthread_attr_t {
+ char size[128];
+ void *align;
+};
+
+#if SANITIZER_ANDROID
+# if SANITIZER_MIPS
+typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];
+# else
+typedef unsigned long __sanitizer_sigset_t;
+# endif
+#elif SANITIZER_MAC
+typedef unsigned __sanitizer_sigset_t;
+#elif SANITIZER_LINUX
+struct __sanitizer_sigset_t {
+ // The size is determined by looking at sizeof of real sigset_t on linux.
+ uptr val[128 / sizeof(uptr)];
+};
+#endif
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)
+struct __sanitizer_sigaction {
+ unsigned sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+ void (*sa_restorer)();
+};
+#elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32
+struct __sanitizer_sigaction {
+ unsigned sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+};
+#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+ uptr sa_flags;
+ void (*sa_restorer)();
+};
+#else // !SANITIZER_ANDROID
+struct __sanitizer_sigaction {
+#if defined(__mips__) && !SANITIZER_FREEBSD
+ unsigned int sa_flags;
+#endif
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
+#if defined(__s390x__)
+ int sa_resv;
+#else
+ __sanitizer_sigset_t sa_mask;
+#endif
+#ifndef __mips__
+#if defined(__sparc__)
+#if __GLIBC_PREREQ (2, 20)
+ // On sparc glibc 2.19 and earlier sa_flags was unsigned long.
+#if defined(__arch64__)
+ // To maintain ABI compatibility on sparc64 when switching to an int,
+ // __glibc_reserved0 was added.
+ int __glibc_reserved0;
+#endif
+ int sa_flags;
+#else
+ unsigned long sa_flags;
+#endif
+#else
+ int sa_flags;
+#endif
+#endif
+#endif
+#if SANITIZER_LINUX
+ void (*sa_restorer)();
+#endif
+#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+ int sa_resv[1];
+#endif
+#if defined(__s390x__)
+ __sanitizer_sigset_t sa_mask;
+#endif
+};
+#endif // !SANITIZER_ANDROID
+
+#if defined(__mips__)
+struct __sanitizer_kernel_sigset_t {
+ uptr sig[2];
+};
+#else
+struct __sanitizer_kernel_sigset_t {
+ u8 sig[8];
+};
+#endif
+
+// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_MIPS
+struct __sanitizer_kernel_sigaction_t {
+ unsigned int sa_flags;
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ __sanitizer_kernel_sigset_t sa_mask;
+ void (*sa_restorer)(void);
+};
+#else
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+#endif
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+#if SANITIZER_LINUX
+extern int e_tabsz;
+#endif
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+#if SANITIZER_LINUX
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+#endif
+
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if SANITIZER_ANDROID || SANITIZER_MAC
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+#else // LINUX
+ unsigned ai_addrlen;
+ void *ai_addr;
+ char *ai_canonname;
+#endif
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#if SANITIZER_ANDROID || SANITIZER_MAC
+typedef unsigned __sanitizer_nfds_t;
+#else
+typedef unsigned long __sanitizer_nfds_t;
+#endif
+
+#if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ char **gl_pathv;
+ uptr gl_offs;
+ int gl_flags;
+
+ void (*gl_closedir)(void *dirp);
+ void *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void *);
+ int (*gl_stat)(const char *, void *);
+};
+# endif // SANITIZER_LINUX
+
+# if SANITIZER_LINUX
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+# endif
+#endif // !SANITIZER_ANDROID
+
+extern unsigned path_max;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+};
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer_FILE {
+ int _flags;
+ char *_IO_read_ptr;
+ char *_IO_read_end;
+ char *_IO_read_base;
+ char *_IO_write_base;
+ char *_IO_write_ptr;
+ char *_IO_write_end;
+ char *_IO_buf_base;
+ char *_IO_buf_end;
+ char *_IO_save_base;
+ char *_IO_backup_base;
+ char *_IO_save_end;
+ void *_markers;
+ __sanitizer_FILE *_chain;
+ int _fileno;
+};
+# define SANITIZER_HAS_STRUCT_FILE 1
+#else
+typedef void __sanitizer_FILE;
+# define SANITIZER_HAS_STRUCT_FILE 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+extern unsigned struct_user_regs_struct_sz;
+extern unsigned struct_user_fpregs_struct_sz;
+extern unsigned struct_user_fpxregs_struct_sz;
+extern unsigned struct_user_vfpregs_struct_sz;
+
+extern int ptrace_peektext;
+extern int ptrace_peekdata;
+extern int ptrace_peekuser;
+extern int ptrace_getregs;
+extern int ptrace_setregs;
+extern int ptrace_getfpregs;
+extern int ptrace_setfpregs;
+extern int ptrace_getfpxregs;
+extern int ptrace_setfpxregs;
+extern int ptrace_getvfpregs;
+extern int ptrace_setvfpregs;
+extern int ptrace_getsiginfo;
+extern int ptrace_setsiginfo;
+extern int ptrace_getregset;
+extern int ptrace_setregset;
+extern int ptrace_geteventmsg;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned struct_shminfo_sz;
+extern unsigned struct_shm_info_sz;
+extern int shmctl_ipc_stat;
+extern int shmctl_ipc_info;
+extern int shmctl_shm_info;
+extern int shmctl_shm_stat;
+#endif
+
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+extern unsigned struct_utmp_sz;
+#endif
+#if !SANITIZER_ANDROID
+extern unsigned struct_utmpx_sz;
+#endif
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+#if SANITIZER_MAC
+} __attribute__((packed));
+#else
+};
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer__obstack_chunk {
+ char *limit;
+ struct __sanitizer__obstack_chunk *prev;
+};
+
+struct __sanitizer_obstack {
+ long chunk_size;
+ struct __sanitizer__obstack_chunk *chunk;
+ char *object_base;
+ char *next_free;
+ uptr more_fields[7];
+};
+
+typedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size);
+typedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf,
+ uptr size);
+typedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset,
+ int whence);
+typedef int (*__sanitizer_cookie_io_close)(void *cookie);
+
+struct __sanitizer_cookie_io_functions_t {
+ __sanitizer_cookie_io_read read;
+ __sanitizer_cookie_io_write write;
+ __sanitizer_cookie_io_seek seek;
+ __sanitizer_cookie_io_close close;
+};
+#endif
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
+
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_winsize_sz;
+
+#if SANITIZER_LINUX
+extern unsigned struct_arpreq_sz;
+extern unsigned struct_cdrom_msf_sz;
+extern unsigned struct_cdrom_multisession_sz;
+extern unsigned struct_cdrom_read_audio_sz;
+extern unsigned struct_cdrom_subchnl_sz;
+extern unsigned struct_cdrom_ti_sz;
+extern unsigned struct_cdrom_tocentry_sz;
+extern unsigned struct_cdrom_tochdr_sz;
+extern unsigned struct_cdrom_volctrl_sz;
+extern unsigned struct_ff_effect_sz;
+extern unsigned struct_floppy_drive_params_sz;
+extern unsigned struct_floppy_drive_struct_sz;
+extern unsigned struct_floppy_fdc_state_sz;
+extern unsigned struct_floppy_max_errors_sz;
+extern unsigned struct_floppy_raw_cmd_sz;
+extern unsigned struct_floppy_struct_sz;
+extern unsigned struct_floppy_write_errors_sz;
+extern unsigned struct_format_descr_sz;
+extern unsigned struct_hd_driveid_sz;
+extern unsigned struct_hd_geometry_sz;
+extern unsigned struct_input_absinfo_sz;
+extern unsigned struct_input_id_sz;
+extern unsigned struct_mtpos_sz;
+extern unsigned struct_termio_sz;
+extern unsigned struct_vt_consize_sz;
+extern unsigned struct_vt_sizes_sz;
+extern unsigned struct_vt_stat_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+extern unsigned struct_copr_buffer_sz;
+extern unsigned struct_copr_debug_buf_sz;
+extern unsigned struct_copr_msg_sz;
+extern unsigned struct_midi_info_sz;
+extern unsigned struct_mtget_sz;
+extern unsigned struct_mtop_sz;
+extern unsigned struct_rtentry_sz;
+extern unsigned struct_sbi_instrument_sz;
+extern unsigned struct_seq_event_rec_sz;
+extern unsigned struct_synth_info_sz;
+extern unsigned struct_vt_mode_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned struct_ax25_parms_struct_sz;
+extern unsigned struct_cyclades_monitor_sz;
+extern unsigned struct_input_keymap_entry_sz;
+extern unsigned struct_ipx_config_data_sz;
+extern unsigned struct_kbdiacrs_sz;
+extern unsigned struct_kbentry_sz;
+extern unsigned struct_kbkeycode_sz;
+extern unsigned struct_kbsentry_sz;
+extern unsigned struct_mtconfiginfo_sz;
+extern unsigned struct_nr_parms_struct_sz;
+extern unsigned struct_scc_modem_sz;
+extern unsigned struct_scc_stat_sz;
+extern unsigned struct_serial_multiport_struct_sz;
+extern unsigned struct_serial_struct_sz;
+extern unsigned struct_sockaddr_ax25_sz;
+extern unsigned struct_unimapdesc_sz;
+extern unsigned struct_unimapinit_sz;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+extern const unsigned long __sanitizer_bufsiz;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned struct_audio_buf_info_sz;
+extern unsigned struct_ppp_stats_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID && !SANITIZER_MAC
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+#endif
+
+// ioctl request identifiers
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_TIOCCONS;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCSWINSZ;
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned IOCTL_SIOCGETSGCNT;
+extern unsigned IOCTL_SIOCGETVIFCNT;
+#endif
+#if SANITIZER_LINUX
+extern unsigned IOCTL_EVIOCGABS;
+extern unsigned IOCTL_EVIOCGBIT;
+extern unsigned IOCTL_EVIOCGEFFECTS;
+extern unsigned IOCTL_EVIOCGID;
+extern unsigned IOCTL_EVIOCGKEY;
+extern unsigned IOCTL_EVIOCGKEYCODE;
+extern unsigned IOCTL_EVIOCGLED;
+extern unsigned IOCTL_EVIOCGNAME;
+extern unsigned IOCTL_EVIOCGPHYS;
+extern unsigned IOCTL_EVIOCGRAB;
+extern unsigned IOCTL_EVIOCGREP;
+extern unsigned IOCTL_EVIOCGSND;
+extern unsigned IOCTL_EVIOCGSW;
+extern unsigned IOCTL_EVIOCGUNIQ;
+extern unsigned IOCTL_EVIOCGVERSION;
+extern unsigned IOCTL_EVIOCRMFF;
+extern unsigned IOCTL_EVIOCSABS;
+extern unsigned IOCTL_EVIOCSFF;
+extern unsigned IOCTL_EVIOCSKEYCODE;
+extern unsigned IOCTL_EVIOCSREP;
+extern unsigned IOCTL_BLKFLSBUF;
+extern unsigned IOCTL_BLKGETSIZE;
+extern unsigned IOCTL_BLKRAGET;
+extern unsigned IOCTL_BLKRASET;
+extern unsigned IOCTL_BLKROGET;
+extern unsigned IOCTL_BLKROSET;
+extern unsigned IOCTL_BLKRRPART;
+extern unsigned IOCTL_CDROMAUDIOBUFSIZ;
+extern unsigned IOCTL_CDROMEJECT;
+extern unsigned IOCTL_CDROMEJECT_SW;
+extern unsigned IOCTL_CDROMMULTISESSION;
+extern unsigned IOCTL_CDROMPAUSE;
+extern unsigned IOCTL_CDROMPLAYMSF;
+extern unsigned IOCTL_CDROMPLAYTRKIND;
+extern unsigned IOCTL_CDROMREADAUDIO;
+extern unsigned IOCTL_CDROMREADCOOKED;
+extern unsigned IOCTL_CDROMREADMODE1;
+extern unsigned IOCTL_CDROMREADMODE2;
+extern unsigned IOCTL_CDROMREADRAW;
+extern unsigned IOCTL_CDROMREADTOCENTRY;
+extern unsigned IOCTL_CDROMREADTOCHDR;
+extern unsigned IOCTL_CDROMRESET;
+extern unsigned IOCTL_CDROMRESUME;
+extern unsigned IOCTL_CDROMSEEK;
+extern unsigned IOCTL_CDROMSTART;
+extern unsigned IOCTL_CDROMSTOP;
+extern unsigned IOCTL_CDROMSUBCHNL;
+extern unsigned IOCTL_CDROMVOLCTRL;
+extern unsigned IOCTL_CDROMVOLREAD;
+extern unsigned IOCTL_CDROM_GET_UPC;
+extern unsigned IOCTL_FDCLRPRM;
+extern unsigned IOCTL_FDDEFPRM;
+extern unsigned IOCTL_FDFLUSH;
+extern unsigned IOCTL_FDFMTBEG;
+extern unsigned IOCTL_FDFMTEND;
+extern unsigned IOCTL_FDFMTTRK;
+extern unsigned IOCTL_FDGETDRVPRM;
+extern unsigned IOCTL_FDGETDRVSTAT;
+extern unsigned IOCTL_FDGETDRVTYP;
+extern unsigned IOCTL_FDGETFDCSTAT;
+extern unsigned IOCTL_FDGETMAXERRS;
+extern unsigned IOCTL_FDGETPRM;
+extern unsigned IOCTL_FDMSGOFF;
+extern unsigned IOCTL_FDMSGON;
+extern unsigned IOCTL_FDPOLLDRVSTAT;
+extern unsigned IOCTL_FDRAWCMD;
+extern unsigned IOCTL_FDRESET;
+extern unsigned IOCTL_FDSETDRVPRM;
+extern unsigned IOCTL_FDSETEMSGTRESH;
+extern unsigned IOCTL_FDSETMAXERRS;
+extern unsigned IOCTL_FDSETPRM;
+extern unsigned IOCTL_FDTWADDLE;
+extern unsigned IOCTL_FDWERRORCLR;
+extern unsigned IOCTL_FDWERRORGET;
+extern unsigned IOCTL_HDIO_DRIVE_CMD;
+extern unsigned IOCTL_HDIO_GETGEO;
+extern unsigned IOCTL_HDIO_GET_32BIT;
+extern unsigned IOCTL_HDIO_GET_DMA;
+extern unsigned IOCTL_HDIO_GET_IDENTITY;
+extern unsigned IOCTL_HDIO_GET_KEEPSETTINGS;
+extern unsigned IOCTL_HDIO_GET_MULTCOUNT;
+extern unsigned IOCTL_HDIO_GET_NOWERR;
+extern unsigned IOCTL_HDIO_GET_UNMASKINTR;
+extern unsigned IOCTL_HDIO_SET_32BIT;
+extern unsigned IOCTL_HDIO_SET_DMA;
+extern unsigned IOCTL_HDIO_SET_KEEPSETTINGS;
+extern unsigned IOCTL_HDIO_SET_MULTCOUNT;
+extern unsigned IOCTL_HDIO_SET_NOWERR;
+extern unsigned IOCTL_HDIO_SET_UNMASKINTR;
+extern unsigned IOCTL_MTIOCPOS;
+extern unsigned IOCTL_PPPIOCGASYNCMAP;
+extern unsigned IOCTL_PPPIOCGDEBUG;
+extern unsigned IOCTL_PPPIOCGFLAGS;
+extern unsigned IOCTL_PPPIOCGUNIT;
+extern unsigned IOCTL_PPPIOCGXASYNCMAP;
+extern unsigned IOCTL_PPPIOCSASYNCMAP;
+extern unsigned IOCTL_PPPIOCSDEBUG;
+extern unsigned IOCTL_PPPIOCSFLAGS;
+extern unsigned IOCTL_PPPIOCSMAXCID;
+extern unsigned IOCTL_PPPIOCSMRU;
+extern unsigned IOCTL_PPPIOCSXASYNCMAP;
+extern unsigned IOCTL_SIOCDARP;
+extern unsigned IOCTL_SIOCDRARP;
+extern unsigned IOCTL_SIOCGARP;
+extern unsigned IOCTL_SIOCGIFENCAP;
+extern unsigned IOCTL_SIOCGIFHWADDR;
+extern unsigned IOCTL_SIOCGIFMAP;
+extern unsigned IOCTL_SIOCGIFMEM;
+extern unsigned IOCTL_SIOCGIFNAME;
+extern unsigned IOCTL_SIOCGIFSLAVE;
+extern unsigned IOCTL_SIOCGRARP;
+extern unsigned IOCTL_SIOCGSTAMP;
+extern unsigned IOCTL_SIOCSARP;
+extern unsigned IOCTL_SIOCSIFENCAP;
+extern unsigned IOCTL_SIOCSIFHWADDR;
+extern unsigned IOCTL_SIOCSIFLINK;
+extern unsigned IOCTL_SIOCSIFMAP;
+extern unsigned IOCTL_SIOCSIFMEM;
+extern unsigned IOCTL_SIOCSIFSLAVE;
+extern unsigned IOCTL_SIOCSRARP;
+extern unsigned IOCTL_SNDCTL_COPR_HALT;
+extern unsigned IOCTL_SNDCTL_COPR_LOAD;
+extern unsigned IOCTL_SNDCTL_COPR_RCODE;
+extern unsigned IOCTL_SNDCTL_COPR_RCVMSG;
+extern unsigned IOCTL_SNDCTL_COPR_RDATA;
+extern unsigned IOCTL_SNDCTL_COPR_RESET;
+extern unsigned IOCTL_SNDCTL_COPR_RUN;
+extern unsigned IOCTL_SNDCTL_COPR_SENDMSG;
+extern unsigned IOCTL_SNDCTL_COPR_WCODE;
+extern unsigned IOCTL_SNDCTL_COPR_WDATA;
+extern unsigned IOCTL_TCFLSH;
+extern unsigned IOCTL_TCGETA;
+extern unsigned IOCTL_TCGETS;
+extern unsigned IOCTL_TCSBRK;
+extern unsigned IOCTL_TCSBRKP;
+extern unsigned IOCTL_TCSETA;
+extern unsigned IOCTL_TCSETAF;
+extern unsigned IOCTL_TCSETAW;
+extern unsigned IOCTL_TCSETS;
+extern unsigned IOCTL_TCSETSF;
+extern unsigned IOCTL_TCSETSW;
+extern unsigned IOCTL_TCXONC;
+extern unsigned IOCTL_TIOCGLCKTRMIOS;
+extern unsigned IOCTL_TIOCGSOFTCAR;
+extern unsigned IOCTL_TIOCINQ;
+extern unsigned IOCTL_TIOCLINUX;
+extern unsigned IOCTL_TIOCSERCONFIG;
+extern unsigned IOCTL_TIOCSERGETLSR;
+extern unsigned IOCTL_TIOCSERGWILD;
+extern unsigned IOCTL_TIOCSERSWILD;
+extern unsigned IOCTL_TIOCSLCKTRMIOS;
+extern unsigned IOCTL_TIOCSSOFTCAR;
+extern unsigned IOCTL_VT_DISALLOCATE;
+extern unsigned IOCTL_VT_GETSTATE;
+extern unsigned IOCTL_VT_RESIZE;
+extern unsigned IOCTL_VT_RESIZEX;
+extern unsigned IOCTL_VT_SENDSIG;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCTOP;
+extern unsigned IOCTL_SIOCADDRT;
+extern unsigned IOCTL_SIOCDELRT;
+extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+extern unsigned IOCTL_SNDCTL_DSP_POST;
+extern unsigned IOCTL_SNDCTL_DSP_RESET;
+extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;
+extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;
+extern unsigned IOCTL_SNDCTL_MIDI_INFO;
+extern unsigned IOCTL_SNDCTL_MIDI_PRETIME;
+extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;
+extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;
+extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;
+extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;
+extern unsigned IOCTL_SNDCTL_SEQ_PANIC;
+extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;
+extern unsigned IOCTL_SNDCTL_SEQ_RESET;
+extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;
+extern unsigned IOCTL_SNDCTL_SEQ_SYNC;
+extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;
+extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;
+extern unsigned IOCTL_SNDCTL_SYNTH_INFO;
+extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;
+extern unsigned IOCTL_SNDCTL_TMR_CONTINUE;
+extern unsigned IOCTL_SNDCTL_TMR_METRONOME;
+extern unsigned IOCTL_SNDCTL_TMR_SELECT;
+extern unsigned IOCTL_SNDCTL_TMR_SOURCE;
+extern unsigned IOCTL_SNDCTL_TMR_START;
+extern unsigned IOCTL_SNDCTL_TMR_STOP;
+extern unsigned IOCTL_SNDCTL_TMR_TEMPO;
+extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;
+extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_BASS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CAPS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CD;
+extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE;
+extern unsigned IOCTL_SOUND_MIXER_READ_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_READ_MIC;
+extern unsigned IOCTL_SOUND_MIXER_READ_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_PCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;
+extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_CD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;
+extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+extern unsigned IOCTL_VT_ACTIVATE;
+extern unsigned IOCTL_VT_GETMODE;
+extern unsigned IOCTL_VT_OPENQRY;
+extern unsigned IOCTL_VT_RELDISP;
+extern unsigned IOCTL_VT_SETMODE;
+extern unsigned IOCTL_VT_WAITACTIVE;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern unsigned IOCTL_CYGETDEFTHRESH;
+extern unsigned IOCTL_CYGETDEFTIMEOUT;
+extern unsigned IOCTL_CYGETMON;
+extern unsigned IOCTL_CYGETTHRESH;
+extern unsigned IOCTL_CYGETTIMEOUT;
+extern unsigned IOCTL_CYSETDEFTHRESH;
+extern unsigned IOCTL_CYSETDEFTIMEOUT;
+extern unsigned IOCTL_CYSETTHRESH;
+extern unsigned IOCTL_CYSETTIMEOUT;
+extern unsigned IOCTL_EQL_EMANCIPATE;
+extern unsigned IOCTL_EQL_ENSLAVE;
+extern unsigned IOCTL_EQL_GETMASTRCFG;
+extern unsigned IOCTL_EQL_GETSLAVECFG;
+extern unsigned IOCTL_EQL_SETMASTRCFG;
+extern unsigned IOCTL_EQL_SETSLAVECFG;
+extern unsigned IOCTL_EVIOCGKEYCODE_V2;
+extern unsigned IOCTL_EVIOCGPROP;
+extern unsigned IOCTL_EVIOCSKEYCODE_V2;
+extern unsigned IOCTL_FS_IOC_GETFLAGS;
+extern unsigned IOCTL_FS_IOC_GETVERSION;
+extern unsigned IOCTL_FS_IOC_SETFLAGS;
+extern unsigned IOCTL_FS_IOC_SETVERSION;
+extern unsigned IOCTL_GIO_CMAP;
+extern unsigned IOCTL_GIO_FONT;
+extern unsigned IOCTL_GIO_UNIMAP;
+extern unsigned IOCTL_GIO_UNISCRNMAP;
+extern unsigned IOCTL_KDADDIO;
+extern unsigned IOCTL_KDDELIO;
+extern unsigned IOCTL_KDGETKEYCODE;
+extern unsigned IOCTL_KDGKBDIACR;
+extern unsigned IOCTL_KDGKBENT;
+extern unsigned IOCTL_KDGKBLED;
+extern unsigned IOCTL_KDGKBMETA;
+extern unsigned IOCTL_KDGKBSENT;
+extern unsigned IOCTL_KDMAPDISP;
+extern unsigned IOCTL_KDSETKEYCODE;
+extern unsigned IOCTL_KDSIGACCEPT;
+extern unsigned IOCTL_KDSKBDIACR;
+extern unsigned IOCTL_KDSKBENT;
+extern unsigned IOCTL_KDSKBLED;
+extern unsigned IOCTL_KDSKBMETA;
+extern unsigned IOCTL_KDSKBSENT;
+extern unsigned IOCTL_KDUNMAPDISP;
+extern unsigned IOCTL_LPABORT;
+extern unsigned IOCTL_LPABORTOPEN;
+extern unsigned IOCTL_LPCAREFUL;
+extern unsigned IOCTL_LPCHAR;
+extern unsigned IOCTL_LPGETIRQ;
+extern unsigned IOCTL_LPGETSTATUS;
+extern unsigned IOCTL_LPRESET;
+extern unsigned IOCTL_LPSETIRQ;
+extern unsigned IOCTL_LPTIME;
+extern unsigned IOCTL_LPWAIT;
+extern unsigned IOCTL_MTIOCGETCONFIG;
+extern unsigned IOCTL_MTIOCSETCONFIG;
+extern unsigned IOCTL_PIO_CMAP;
+extern unsigned IOCTL_PIO_FONT;
+extern unsigned IOCTL_PIO_UNIMAP;
+extern unsigned IOCTL_PIO_UNIMAPCLR;
+extern unsigned IOCTL_PIO_UNISCRNMAP;
+extern unsigned IOCTL_SCSI_IOCTL_GET_IDLUN;
+extern unsigned IOCTL_SCSI_IOCTL_PROBE_HOST;
+extern unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE;
+extern unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE;
+extern unsigned IOCTL_SIOCAIPXITFCRT;
+extern unsigned IOCTL_SIOCAIPXPRISLT;
+extern unsigned IOCTL_SIOCAX25ADDUID;
+extern unsigned IOCTL_SIOCAX25DELUID;
+extern unsigned IOCTL_SIOCAX25GETPARMS;
+extern unsigned IOCTL_SIOCAX25GETUID;
+extern unsigned IOCTL_SIOCAX25NOUID;
+extern unsigned IOCTL_SIOCAX25SETPARMS;
+extern unsigned IOCTL_SIOCDEVPLIP;
+extern unsigned IOCTL_SIOCIPXCFGDATA;
+extern unsigned IOCTL_SIOCNRDECOBS;
+extern unsigned IOCTL_SIOCNRGETPARMS;
+extern unsigned IOCTL_SIOCNRRTCTL;
+extern unsigned IOCTL_SIOCNRSETPARMS;
+extern unsigned IOCTL_SNDCTL_DSP_GETISPACE;
+extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;
+extern unsigned IOCTL_TIOCGSERIAL;
+extern unsigned IOCTL_TIOCSERGETMULTI;
+extern unsigned IOCTL_TIOCSERSETMULTI;
+extern unsigned IOCTL_TIOCSSERIAL;
+extern unsigned IOCTL_GIO_SCRNMAP;
+extern unsigned IOCTL_KDDISABIO;
+extern unsigned IOCTL_KDENABIO;
+extern unsigned IOCTL_KDGETLED;
+extern unsigned IOCTL_KDGETMODE;
+extern unsigned IOCTL_KDGKBMODE;
+extern unsigned IOCTL_KDGKBTYPE;
+extern unsigned IOCTL_KDMKTONE;
+extern unsigned IOCTL_KDSETLED;
+extern unsigned IOCTL_KDSETMODE;
+extern unsigned IOCTL_KDSKBMODE;
+extern unsigned IOCTL_KIOCSOUND;
+extern unsigned IOCTL_PIO_SCRNMAP;
+#endif
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME sigaction
+
+#endif // SANITIZER_LINUX || SANITIZER_MAC
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.cpp
new file mode 100644
index 0000000000..6ec1a1bdd1
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.cpp
@@ -0,0 +1,366 @@
+//===-- sanitizer_platform_limits_solaris.cpp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific Solaris data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SOLARIS
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/ip_mroute.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <rpc/xdr.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/ethernet.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/mtio.h>
+#include <sys/ptyvar.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <termios.h>
+#include <time.h>
+#include <utmp.h>
+#include <utmpx.h>
+#include <wchar.h>
+#include <wordexp.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_solaris.h"
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_stack_t_sz = sizeof(stack_t);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+ unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+ unsigned pid_t_sz = sizeof(pid_t);
+ unsigned timeval_sz = sizeof(timeval);
+ unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
+ unsigned mbstate_t_sz = sizeof(mbstate_t);
+ unsigned sigset_t_sz = sizeof(sigset_t);
+ unsigned struct_timezone_sz = sizeof(struct timezone);
+ unsigned struct_tms_sz = sizeof(struct tms);
+ unsigned struct_sigevent_sz = sizeof(struct sigevent);
+ unsigned struct_sched_param_sz = sizeof(struct sched_param);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+#if SANITIZER_SOLARIS32
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif
+ unsigned struct_statvfs_sz = sizeof(struct statvfs);
+
+ const uptr sig_ign = (uptr)SIG_IGN;
+ const uptr sig_dfl = (uptr)SIG_DFL;
+ const uptr sig_err = (uptr)SIG_ERR;
+ const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+ int shmctl_ipc_stat = (int)IPC_STAT;
+
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+ int map_fixed = MAP_FIXED;
+
+ int af_inet = (int)AF_INET;
+ int af_inet6 = (int)AF_INET6;
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+
+ unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
+
+ int glob_nomatch = GLOB_NOMATCH;
+
+ unsigned path_max = PATH_MAX;
+
+ // ioctl arguments
+ unsigned struct_ifreq_sz = sizeof(struct ifreq);
+ unsigned struct_termios_sz = sizeof(struct termios);
+ unsigned struct_winsize_sz = sizeof(struct winsize);
+
+ unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+ unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+
+ const unsigned IOCTL_NOT_PRESENT = 0;
+
+ unsigned IOCTL_FIOASYNC = FIOASYNC;
+ unsigned IOCTL_FIOCLEX = FIOCLEX;
+ unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+ unsigned IOCTL_FIONCLEX = FIONCLEX;
+ unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+ unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+ unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+ unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+ unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+ unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+ unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+ unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+ unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+ unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+ unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+ unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+ unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+ unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+ unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+ unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+ unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+ unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+ unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+ unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+ unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+ unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+ unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+ unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+ unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+ unsigned IOCTL_TIOCMGET = TIOCMGET;
+ unsigned IOCTL_TIOCMSET = TIOCMSET;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+ unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+ unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+#if SANITIZER_SOLARIS32
+COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
+CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent64, d_off);
+CHECK_SIZE_AND_OFFSET(dirent64, d_reclen);
+#endif
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)nullptr)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+
+CHECK_TYPE_SIZE(sem_t);
+
+#endif // SANITIZER_SOLARIS
diff --git a/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.h b/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.h
new file mode 100644
index 0000000000..85995e7979
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_platform_limits_solaris.h
@@ -0,0 +1,495 @@
+//===-- sanitizer_platform_limits_solaris.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific Solaris data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_SOLARIS_H
+#define SANITIZER_PLATFORM_LIMITS_SOLARIS_H
+
+#if SANITIZER_SOLARIS
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_stat64_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_stack_t_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs64_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+
+extern unsigned struct_timespec_sz;
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+
+struct __sanitizer_sem_t {
+ //u64 data[6];
+ u32 sem_count;
+ u16 sem_type;
+ u16 sem_magic;
+ u64 sem_pad1[3];
+ u64 sem_pad2[2];
+};
+
+struct __sanitizer_ipc_perm {
+ unsigned int uid; // uid_t
+ unsigned int gid; // gid_t
+ unsigned int cuid; // uid_t
+ unsigned int cgid; // gid_t
+ unsigned int mode; // mode_t
+ unsigned int seq; // uint_t
+ int key; // key_t
+#if !defined(_LP64)
+ int pad[4];
+#endif
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz; // size_t
+ unsigned long shm_flags; // uintptr_t
+ unsigned short shm_lkcnt; // ushort_t
+ int shm_lpid; // pid_t
+ int shm_cpid; // pid_t
+ unsigned long shm_nattch; // shmatt_t
+ unsigned long shm_cnattch; // ulong_t
+#if defined(_LP64)
+ long shm_atime; // time_t
+ long shm_dtime;
+ long shm_ctime;
+ void *shm_amp;
+ u64 shm_gransize; // uint64_t
+ u64 shm_allocated; // uint64_t
+ u64 shm_pad4[1]; // int64_t
+#else
+ long shm_atime; // time_t
+ int shm_pad1; // int32_t
+ long shm_dtime; // time_t
+ int shm_pad2; // int32_t
+ long shm_ctime; // time_t
+ void *shm_amp;
+ u64 shm_gransize; // uint64_t
+ u64 shm_allocated; // uint64_t
+#endif
+};
+
+extern unsigned struct_statvfs_sz;
+#if SANITIZER_SOLARIS32
+extern unsigned struct_statvfs64_sz;
+#endif
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ u64 ifa_flags; // uint64_t
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+};
+
+const int __sanitizer_XDR_ENCODE = 0;
+const int __sanitizer_XDR_DECODE = 1;
+const int __sanitizer_XDR_FREE = 2;
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ unsigned int pw_uid; // uid_t
+ unsigned int pw_gid; // gid_t
+ char *pw_age;
+ char *pw_comment;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+typedef long __sanitizer_time_t;
+
+typedef long __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+#if SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)
+struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#else
+struct __sanitizer_dirent {
+ unsigned long d_ino;
+ long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#endif
+
+struct __sanitizer_dirent64 {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+
+typedef long __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+// This thing depends on the platform. We are only interested in the upper
+// limit. Verified with a compiler assert in .cpp.
+union __sanitizer_pthread_attr_t {
+ char size[128];
+ void *align;
+};
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr =
+ void (*)(int sig, __sanitizer_siginfo *siginfo, void *uctx);
+
+struct __sanitizer_sigaction {
+ int sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+#if !defined(_LP64)
+ int sa_resv[2];
+#endif
+};
+
+struct __sanitizer_kernel_sigset_t {
+ u8 sig[8];
+};
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if defined(__sparcv9)
+ int _ai_pad;
+#endif
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned long __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ char **gl_pathv;
+ uptr gl_offs;
+ char **gl_pathp;
+ int gl_pathn;
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char **we_wordp;
+ int we_wordn;
+};
+
+typedef void __sanitizer_FILE;
+#define SANITIZER_HAS_STRUCT_FILE 0
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern int shmctl_ipc_stat;
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+// <sys/ioccom.h>
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#define IOC_SIZEBITS 12
+#define IOC_DIRBITS 4
+#undef IOC_NONE
+#define IOC_NONE 2U // IOC_VOID
+#define IOC_READ 4U // IOC_OUT
+#define IOC_WRITE 8U // IOC_IN
+
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
+
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_winsize_sz;
+
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+
+// ioctl request identifiers
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCTOP;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((struct CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#endif // SANITIZER_SOLARIS
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_posix.cpp b/lib/tsan/sanitizer_common/sanitizer_posix.cpp
new file mode 100644
index 0000000000..e21661b42f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_posix.cpp
@@ -0,0 +1,398 @@
+//===-- sanitizer_posix.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements POSIX-specific functions from
+// sanitizer_posix.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_POSIX
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
+namespace __sanitizer {
+
+// ------------- sanitizer_common.h
+uptr GetMmapGranularity() {
+ return GetPageSize();
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
+ IncreaseTotalMmap(size);
+ return (void *)res;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ uptr res = internal_munmap(addr, size);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+ DecreaseTotalMmap(size);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno))) {
+ if (reserrno == ENOMEM)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)res;
+}
+
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by mapping a bit more and then unmapping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
+ if (UNLIKELY(!map_res))
+ return nullptr;
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (!IsAligned(res, alignment)) {
+ res = (map_res + alignment - 1) & ~(alignment - 1);
+ UnmapOrDie((void*)map_res, res - map_res);
+ }
+ uptr end = res + size;
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(p, &reserrno)))
+ ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
+ IncreaseTotalMmap(size);
+ return (void *)p;
+}
+
+static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
+ const char *name) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
+ uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(p, &reserrno))) {
+ if (tolerate_enomem && reserrno == ENOMEM)
+ return nullptr;
+ char mem_type[40];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
+ return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
+ return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
+}
+
+bool MprotectNoAccess(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
+}
+
+bool MprotectReadOnly(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ);
+}
+
+#if !SANITIZER_MAC
+void MprotectMallocZones(void *addr, int prot) {}
+#endif
+
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
+ if (ShouldMockFailureToOpen(filename))
+ return kInvalidFd;
+ int flags;
+ switch (mode) {
+ case RdOnly: flags = O_RDONLY; break;
+ case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
+ case RdWr: flags = O_RDWR | O_CREAT; break;
+ }
+ fd_t res = internal_open(filename, flags, 0660);
+ if (internal_iserror(res, errno_p))
+ return kInvalidFd;
+ return ReserveStandardFds(res);
+}
+
+void CloseFile(fd_t fd) {
+ internal_close(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ uptr res = internal_read(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_read)
+ *bytes_read = res;
+ return true;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ uptr res = internal_write(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_written)
+ *bytes_written = res;
+ return true;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ fd_t fd = OpenFile(file_name, RdOnly);
+ CHECK(fd != kInvalidFd);
+ uptr fsize = internal_filesize(fd);
+ CHECK_NE(fsize, (uptr)-1);
+ CHECK_GT(fsize, 0);
+ *buff_size = RoundUpTo(fsize, GetPageSizeCached());
+ uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return internal_iserror(map) ? nullptr : (void *)map;
+}
+
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ uptr flags = MAP_SHARED;
+ if (addr) flags |= MAP_FIXED;
+ uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
+ int mmap_errno = 0;
+ if (internal_iserror(p, &mmap_errno)) {
+ Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
+ fd, (long long)offset, size, p, mmap_errno);
+ return nullptr;
+ }
+ return (void *)p;
+}
+
+static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
+ uptr start2, uptr end2) {
+ CHECK(start1 <= end1);
+ CHECK(start2 <= end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
+// FIXME: this is thread-unsafe, but should not cause problems most of the time.
+// When the shadow is mapped only a single thread usually exists (plus maybe
+// several worker threads on Mac, which aren't expected to map big chunks of
+// memory).
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ if (proc_maps.Error())
+ return true; // and hope for the best
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (segment.start == segment.end) continue; // Empty range.
+ CHECK_NE(0, segment.end);
+ if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
+ range_end))
+ return false;
+ }
+ return true;
+}
+
+void DumpProcessMap() {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ const sptr kBufSize = 4095;
+ char *filename = (char*)MmapOrDie(kBufSize, __func__);
+ MemoryMappedSegment segment(filename, kBufSize);
+ Report("Process memory map follows:\n");
+ while (proc_maps.Next(&segment)) {
+ Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
+ segment.filename);
+ }
+ Report("End of process memory map.\n");
+ UnmapOrDie(filename, kBufSize);
+}
+
+const char *GetPwd() {
+ return GetEnv("PWD");
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '/';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsPathSeparator(path[0]);
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ internal_write(fd, buffer, length);
+}
+
+bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/false);
+ InternalScopedString buff(kMaxPathLength);
+ MemoryMappedSegment segment(buff.data(), kMaxPathLength);
+ while (proc_maps.Next(&segment)) {
+ if (segment.IsExecutable() &&
+ internal_strcmp(module, segment.filename) == 0) {
+ *start = segment.start;
+ *end = segment.end;
+ return true;
+ }
+ }
+ return false;
+}
+
+uptr SignalContext::GetAddress() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return (uptr)si->si_addr;
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return si->si_signo == SIGSEGV;
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const siginfo_t *>(siginfo)->si_signo;
+}
+
+const char *SignalContext::Describe() const {
+ switch (GetType()) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ case SIGSEGV:
+ return "SEGV";
+ case SIGBUS:
+ return "BUS";
+ case SIGTRAP:
+ return "TRAP";
+ }
+ return "UNKNOWN SIGNAL";
+}
+
+fd_t ReserveStandardFds(fd_t fd) {
+ CHECK_GE(fd, 0);
+ if (fd > 2)
+ return fd;
+ bool used[3];
+ internal_memset(used, 0, sizeof(used));
+ while (fd <= 2) {
+ used[fd] = true;
+ fd = internal_dup(fd);
+ }
+ for (int i = 0; i <= 2; ++i)
+ if (used[i])
+ internal_close(i);
+ return fd;
+}
+
+bool ShouldMockFailureToOpen(const char *path) {
+ return common_flags()->test_only_emulate_no_memorymap &&
+ internal_strncmp(path, "/proc/", 6) == 0;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
+int GetNamedMappingFd(const char *name, uptr size, int *flags) {
+ if (!common_flags()->decorate_proc_maps || !name)
+ return -1;
+ char shmname[200];
+ CHECK(internal_strlen(name) < sizeof(shmname) - 10);
+ internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
+ internal_getpid(), name);
+ int o_cloexec = 0;
+#if defined(O_CLOEXEC)
+ o_cloexec = O_CLOEXEC;
+#endif
+ int fd = ReserveStandardFds(
+ internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
+ CHECK_GE(fd, 0);
+ if (!o_cloexec) {
+ int res = fcntl(fd, F_SETFD, FD_CLOEXEC);
+ CHECK_EQ(0, res);
+ }
+ int res = internal_ftruncate(fd, size);
+ CHECK_EQ(0, res);
+ res = internal_unlink(shmname);
+ CHECK_EQ(0, res);
+ *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
+ return fd;
+}
+#else
+int GetNamedMappingFd(const char *name, uptr size, int *flags) {
+ return -1;
+}
+#endif
+
+#if SANITIZER_ANDROID
+#define PR_SET_VMA 0x53564d41
+#define PR_SET_VMA_ANON_NAME 0
+void DecorateMapping(uptr addr, uptr size, const char *name) {
+ if (!common_flags()->decorate_proc_maps || !name)
+ return;
+ internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
+}
+#else
+void DecorateMapping(uptr addr, uptr size, const char *name) {
+}
+#endif
+
+uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
+ int fd = GetNamedMappingFd(name, length, &flags);
+ uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
+ if (!internal_iserror(res))
+ DecorateMapping(res, length, name);
+ return res;
+}
+
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
diff --git a/lib/tsan/sanitizer_common/sanitizer_posix.h b/lib/tsan/sanitizer_common/sanitizer_posix.h
new file mode 100644
index 0000000000..a1b49702da
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_posix.h
@@ -0,0 +1,125 @@
+//===-- sanitizer_posix.h -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and declares some useful POSIX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_POSIX_H
+#define SANITIZER_POSIX_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_freebsd.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_openbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
+
+#if !SANITIZER_POSIX
+// Make it hard to accidentally use any of functions declared in this file:
+#error This file should only be included on POSIX
+#endif
+
+namespace __sanitizer {
+
+// I/O
+// Don't use directly, use __sanitizer::OpenFile() instead.
+uptr internal_open(const char *filename, int flags);
+uptr internal_open(const char *filename, int flags, u32 mode);
+uptr internal_close(fd_t fd);
+
+uptr internal_read(fd_t fd, void *buf, uptr count);
+uptr internal_write(fd_t fd, const void *buf, uptr count);
+
+// Memory
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset);
+uptr internal_munmap(void *addr, uptr length);
+int internal_mprotect(void *addr, uptr length, int prot);
+
+// OS
+uptr internal_filesize(fd_t fd); // -1 on error.
+uptr internal_stat(const char *path, void *buf);
+uptr internal_lstat(const char *path, void *buf);
+uptr internal_fstat(fd_t fd, void *buf);
+uptr internal_dup(int oldfd);
+uptr internal_dup2(int oldfd, int newfd);
+uptr internal_readlink(const char *path, char *buf, uptr bufsize);
+uptr internal_unlink(const char *path);
+uptr internal_rename(const char *oldpath, const char *newpath);
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
+
+#if SANITIZER_NETBSD
+uptr internal_ptrace(int request, int pid, void *addr, int data);
+#else
+uptr internal_ptrace(int request, int pid, void *addr, void *data);
+#endif
+uptr internal_waitpid(int pid, int *status, int options);
+
+int internal_fork();
+fd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid);
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen);
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen);
+
+// These functions call appropriate pthread_ functions directly, bypassing
+// the interceptor. They are weak and may not be present in some tools.
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
+ void *param);
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_join(void *th, void **ret);
+
+#define DEFINE_REAL_PTHREAD_FUNCTIONS \
+ namespace __sanitizer { \
+ int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
+ void *param) { \
+ return REAL(pthread_create)(th, attr, callback, param); \
+ } \
+ int real_pthread_join(void *th, void **ret) { \
+ return REAL(pthread_join(th, ret)); \
+ } \
+ } // namespace __sanitizer
+
+int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+
+// A routine named real_sigaction() must be implemented by each sanitizer in
+// order for internal_sigaction() to bypass interceptors.
+int internal_sigaction(int signum, const void *act, void *oldact);
+void internal_sigfillset(__sanitizer_sigset_t *set);
+void internal_sigemptyset(__sanitizer_sigset_t *set);
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum);
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]);
+
+bool IsStateDetached(int state);
+
+// Move the fd out of {0, 1, 2} range.
+fd_t ReserveStandardFds(fd_t fd);
+
+bool ShouldMockFailureToOpen(const char *path);
+
+// Create a non-file mapping with a given /proc/self/maps name.
+uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name);
+
+// Platforms should implement at most one of these.
+// 1. Provide a pre-decorated file descriptor to use instead of an anonymous
+// mapping.
+int GetNamedMappingFd(const char *name, uptr size, int *flags);
+// 2. Add name to an existing anonymous mapping. The caller must keep *name
+// alive at least as long as the mapping exists.
+void DecorateMapping(uptr addr, uptr size, const char *name);
+
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_printf.cpp b/lib/tsan/sanitizer_common/sanitizer_printf.cpp
new file mode 100644
index 0000000000..a032787114
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_printf.cpp
@@ -0,0 +1,358 @@
+//===-- sanitizer_printf.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Internal printf function, used inside run-time libraries.
+// We can't use libc printf because we intercept some of the functions used
+// inside it.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
+ !defined(va_copy)
+# define va_copy(dst, src) ((dst) = (src))
+#endif
+
+namespace __sanitizer {
+
+static int AppendChar(char **buff, const char *buff_end, char c) {
+ if (*buff < buff_end) {
+ **buff = c;
+ (*buff)++;
+ }
+ return 1;
+}
+
+// Appends number in a given base to buffer. If its length is less than
+// |minimal_num_length|, it is padded with leading zeroes or spaces, depending
+// on the value of |pad_with_zero|.
+static int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,
+ u8 base, u8 minimal_num_length, bool pad_with_zero,
+ bool negative, bool uppercase) {
+ uptr const kMaxLen = 30;
+ RAW_CHECK(base == 10 || base == 16);
+ RAW_CHECK(base == 10 || !negative);
+ RAW_CHECK(absolute_value || !negative);
+ RAW_CHECK(minimal_num_length < kMaxLen);
+ int result = 0;
+ if (negative && minimal_num_length)
+ --minimal_num_length;
+ if (negative && pad_with_zero)
+ result += AppendChar(buff, buff_end, '-');
+ uptr num_buffer[kMaxLen];
+ int pos = 0;
+ do {
+ RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow");
+ num_buffer[pos++] = absolute_value % base;
+ absolute_value /= base;
+ } while (absolute_value > 0);
+ if (pos < minimal_num_length) {
+ // Make sure compiler doesn't insert call to memset here.
+ internal_memset(&num_buffer[pos], 0,
+ sizeof(num_buffer[0]) * (minimal_num_length - pos));
+ pos = minimal_num_length;
+ }
+ RAW_CHECK(pos > 0);
+ pos--;
+ for (; pos >= 0 && num_buffer[pos] == 0; pos--) {
+ char c = (pad_with_zero || pos == 0) ? '0' : ' ';
+ result += AppendChar(buff, buff_end, c);
+ }
+ if (negative && !pad_with_zero) result += AppendChar(buff, buff_end, '-');
+ for (; pos >= 0; pos--) {
+ char digit = static_cast<char>(num_buffer[pos]);
+ digit = (digit < 10) ? '0' + digit : (uppercase ? 'A' : 'a') + digit - 10;
+ result += AppendChar(buff, buff_end, digit);
+ }
+ return result;
+}
+
+static int AppendUnsigned(char **buff, const char *buff_end, u64 num, u8 base,
+ u8 minimal_num_length, bool pad_with_zero,
+ bool uppercase) {
+ return AppendNumber(buff, buff_end, num, base, minimal_num_length,
+ pad_with_zero, false /* negative */, uppercase);
+}
+
+static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
+ u8 minimal_num_length, bool pad_with_zero) {
+ bool negative = (num < 0);
+ return AppendNumber(buff, buff_end, (u64)(negative ? -num : num), 10,
+ minimal_num_length, pad_with_zero, negative,
+ false /* uppercase */);
+}
+
+
+// Use the fact that explicitly requesting 0 width (%0s) results in UB and
+// interpret width == 0 as "no width requested":
+// width == 0 - no width requested
+// width < 0 - left-justify s within and pad it to -width chars, if necessary
+// width > 0 - right-justify s, not implemented yet
+static int AppendString(char **buff, const char *buff_end, int width,
+ int max_chars, const char *s) {
+ if (!s)
+ s = "<null>";
+ int result = 0;
+ for (; *s; s++) {
+ if (max_chars >= 0 && result >= max_chars)
+ break;
+ result += AppendChar(buff, buff_end, *s);
+ }
+ // Only the left justified strings are supported.
+ while (width < -result)
+ result += AppendChar(buff, buff_end, ' ');
+ return result;
+}
+
+static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
+ int result = 0;
+ result += AppendString(buff, buff_end, 0, -1, "0x");
+ result += AppendUnsigned(buff, buff_end, ptr_value, 16,
+ SANITIZER_POINTER_FORMAT_LENGTH,
+ true /* pad_with_zero */, false /* uppercase */);
+ return result;
+}
+
+int VSNPrintf(char *buff, int buff_length,
+ const char *format, va_list args) {
+ static const char *kPrintfFormatsHelp =
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(format);
+ RAW_CHECK(buff_length > 0);
+ const char *buff_end = &buff[buff_length - 1];
+ const char *cur = format;
+ int result = 0;
+ for (; *cur; cur++) {
+ if (*cur != '%') {
+ result += AppendChar(&buff, buff_end, *cur);
+ continue;
+ }
+ cur++;
+ bool left_justified = *cur == '-';
+ if (left_justified)
+ cur++;
+ bool have_width = (*cur >= '0' && *cur <= '9');
+ bool pad_with_zero = (*cur == '0');
+ int width = 0;
+ if (have_width) {
+ while (*cur >= '0' && *cur <= '9') {
+ width = width * 10 + *cur++ - '0';
+ }
+ }
+ bool have_precision = (cur[0] == '.' && cur[1] == '*');
+ int precision = -1;
+ if (have_precision) {
+ cur += 2;
+ precision = va_arg(args, int);
+ }
+ bool have_z = (*cur == 'z');
+ cur += have_z;
+ bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ cur += have_ll * 2;
+ s64 dval;
+ u64 uval;
+ const bool have_length = have_z || have_ll;
+ const bool have_flags = have_width || have_length;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
+ switch (*cur) {
+ case 'd': {
+ dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
+ result += AppendSignedDecimal(&buff, buff_end, dval, width,
+ pad_with_zero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
+ bool uppercase = (*cur == 'X');
+ result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
+ width, pad_with_zero, uppercase);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
+ // Only left-justified width is supported.
+ CHECK(!have_width || left_justified);
+ result += AppendString(&buff, buff_end, left_justified ? -width : width,
+ precision, va_arg(args, char*));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, va_arg(args, int));
+ break;
+ }
+ case '%' : {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(buff <= buff_end);
+ AppendChar(&buff, buff_end + 1, '\0');
+ return result;
+}
+
+static void (*PrintfAndReportCallback)(const char *);
+void SetPrintfAndReportCallback(void (*callback)(const char *)) {
+ PrintfAndReportCallback = callback;
+}
+
+// Can be overriden in frontend.
+#if SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
+// Implementation must be defined in frontend.
+extern "C" void __sanitizer_on_print(const char *str);
+#else
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_on_print, const char *str) {
+ (void)str;
+}
+#endif
+
+static void CallPrintfAndReportCallback(const char *str) {
+ __sanitizer_on_print(str);
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(str);
+}
+
+static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
+ char *local_buffer,
+ int buffer_size,
+ const char *format,
+ va_list args) {
+ va_list args2;
+ va_copy(args2, args);
+ const int kLen = 16 * 1024;
+ int needed_length;
+ char *buffer = local_buffer;
+ // First try to print a message using a local buffer, and then fall back to
+ // mmaped buffer.
+ for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ if (use_mmap) {
+ va_end(args);
+ va_copy(args, args2);
+ buffer = (char*)MmapOrDie(kLen, "Report");
+ buffer_size = kLen;
+ }
+ needed_length = 0;
+ // Check that data fits into the current buffer.
+# define CHECK_NEEDED_LENGTH \
+ if (needed_length >= buffer_size) { \
+ if (!use_mmap) continue; \
+ RAW_CHECK_MSG(needed_length < kLen, \
+ "Buffer in Report is too short!\n"); \
+ }
+ // Fuchsia's logging infrastructure always keeps track of the logging
+ // process, thread, and timestamp, so never prepend such information.
+ if (!SANITIZER_FUCHSIA && append_pid) {
+ int pid = internal_getpid();
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ needed_length += internal_snprintf(buffer, buffer_size,
+ "==%s", exe_name);
+ CHECK_NEEDED_LENGTH
+ }
+ needed_length += internal_snprintf(
+ buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
+ CHECK_NEEDED_LENGTH
+ }
+ needed_length += VSNPrintf(buffer + needed_length,
+ buffer_size - needed_length, format, args);
+ CHECK_NEEDED_LENGTH
+ // If the message fit into the buffer, print it and exit.
+ break;
+# undef CHECK_NEEDED_LENGTH
+ }
+ RawWrite(buffer);
+
+ // Remove color sequences from the message.
+ RemoveANSIEscapeSequencesFromString(buffer);
+ CallPrintfAndReportCallback(buffer);
+ LogMessageOnPrintf(buffer);
+
+ // If we had mapped any memory, clean up.
+ if (buffer != local_buffer)
+ UnmapOrDie((void *)buffer, buffer_size);
+ va_end(args2);
+}
+
+static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
+ va_list args) {
+ // |local_buffer| is small enough not to overflow the stack and/or violate
+ // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
+ // hand, the bigger the buffer is, the more the chance the error report will
+ // fit into it.
+ char local_buffer[400];
+ SharedPrintfCodeNoBuffer(append_pid, local_buffer, ARRAY_SIZE(local_buffer),
+ format, args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ SharedPrintfCode(false, format, args);
+ va_end(args);
+}
+
+// Like Printf, but prints the current PID before the output string.
+FORMAT(1, 2)
+void Report(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ SharedPrintfCode(true, format, args);
+ va_end(args);
+}
+
+// Writes at most "length" symbols to "buffer" (including trailing '\0').
+// Returns the number of symbols that should have been written to buffer
+// (not including trailing '\0'). Thus, the string is truncated
+// iff return value is not less than "length".
+FORMAT(3, 4)
+int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer, length, format, args);
+ va_end(args);
+ return needed_length;
+}
+
+FORMAT(2, 3)
+void InternalScopedString::append(const char *format, ...) {
+ CHECK_LT(length_, size());
+ va_list args;
+ va_start(args, format);
+ VSNPrintf(data() + length_, size() - length_, format, args);
+ va_end(args);
+ length_ += internal_strlen(data() + length_);
+ CHECK_LT(length_, size());
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps.h b/lib/tsan/sanitizer_common/sanitizer_procmaps.h
new file mode 100644
index 0000000000..665ed45fa9
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps.h
@@ -0,0 +1,100 @@
+//===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Information about the process mappings.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PROCMAPS_H
+#define SANITIZER_PROCMAPS_H
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS || \
+ SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_fuchsia.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mac.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Memory protection masks.
+static const uptr kProtectionRead = 1;
+static const uptr kProtectionWrite = 2;
+static const uptr kProtectionExecute = 4;
+static const uptr kProtectionShared = 8;
+
+struct MemoryMappedSegmentData;
+
+class MemoryMappedSegment {
+ public:
+ explicit MemoryMappedSegment(char *buff = nullptr, uptr size = 0)
+ : filename(buff), filename_size(size), data_(nullptr) {}
+ ~MemoryMappedSegment() {}
+
+ bool IsReadable() const { return protection & kProtectionRead; }
+ bool IsWritable() const { return protection & kProtectionWrite; }
+ bool IsExecutable() const { return protection & kProtectionExecute; }
+ bool IsShared() const { return protection & kProtectionShared; }
+
+ void AddAddressRanges(LoadedModule *module);
+
+ uptr start;
+ uptr end;
+ uptr offset;
+ char *filename; // owned by caller
+ uptr filename_size;
+ uptr protection;
+ ModuleArch arch;
+ u8 uuid[kModuleUUIDSize];
+
+ private:
+ friend class MemoryMappingLayout;
+
+ // This field is assigned and owned by MemoryMappingLayout if needed
+ MemoryMappedSegmentData *data_;
+};
+
+class MemoryMappingLayout {
+ public:
+ explicit MemoryMappingLayout(bool cache_enabled);
+ ~MemoryMappingLayout();
+ bool Next(MemoryMappedSegment *segment);
+ bool Error() const;
+ void Reset();
+ // In some cases, e.g. when running under a sandbox on Linux, ASan is unable
+ // to obtain the memory mappings. It should fall back to pre-cached data
+ // instead of aborting.
+ static void CacheMemoryMappings();
+
+ // Adds all mapped objects into a vector.
+ void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
+
+ private:
+ void LoadFromCache();
+
+ MemoryMappingLayoutData data_;
+};
+
+// Returns code range for the specified module.
+bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
+
+bool IsDecimal(char c);
+uptr ParseDecimal(const char **p);
+bool IsHex(char c);
+uptr ParseHex(const char **p);
+
+} // namespace __sanitizer
+
+#endif
+#endif // SANITIZER_PROCMAPS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_bsd.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_bsd.cpp
new file mode 100644
index 0000000000..02ff7c0e91
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_bsd.cpp
@@ -0,0 +1,139 @@
+//===-- sanitizer_procmaps_bsd.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings
+// (FreeBSD, OpenBSD and NetBSD-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+#include "sanitizer_common.h"
+#if SANITIZER_FREEBSD
+#include "sanitizer_freebsd.h"
+#endif
+#include "sanitizer_procmaps.h"
+
+// clang-format off
+#include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+#include <unistd.h>
+#if SANITIZER_FREEBSD
+#include <sys/user.h>
+#endif
+
+#include <limits.h>
+#if SANITIZER_OPENBSD
+#define KVME_PROT_READ KVE_PROT_READ
+#define KVME_PROT_WRITE KVE_PROT_WRITE
+#define KVME_PROT_EXEC KVE_PROT_EXEC
+#endif
+
+// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+#include <osreldate.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define kinfo_vmentry xkinfo_vmentry
+#endif
+#endif
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ const int Mib[] = {
+#if SANITIZER_FREEBSD
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_VMMAP,
+ getpid()
+#elif SANITIZER_OPENBSD
+ CTL_KERN,
+ KERN_PROC_VMMAP,
+ getpid()
+#elif SANITIZER_NETBSD
+ CTL_VM,
+ VM_PROC,
+ VM_PROC_MAP,
+ getpid(),
+ sizeof(struct kinfo_vmentry)
+#else
+#error "not supported"
+#endif
+ };
+
+ uptr Size = 0;
+ int Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+#if !SANITIZER_OPENBSD
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ proc_maps->data = (char *)VmMap;
+#else
+ size_t PageSize = GetPageSize();
+ size_t MmapedSize = Size;
+ MmapedSize = ((MmapedSize - 1) / PageSize + 1) * PageSize;
+ char *Mem = (char *)MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = 2 * Size + 10 * sizeof(struct kinfo_vmentry);
+ if (Size > 0x10000)
+ Size = 0x10000;
+ Size = (Size / sizeof(struct kinfo_vmentry)) * sizeof(struct kinfo_vmentry);
+ Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), Mem, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ MmapedSize = Size;
+ proc_maps->data = Mem;
+#endif
+
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ CHECK(!Error()); // can not fail
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last)
+ return false;
+ const struct kinfo_vmentry *VmEntry =
+ (const struct kinfo_vmentry *)data_.current;
+
+ segment->start = (uptr)VmEntry->kve_start;
+ segment->end = (uptr)VmEntry->kve_end;
+ segment->offset = (uptr)VmEntry->kve_offset;
+
+ segment->protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ segment->protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ segment->protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ segment->protection |= kProtectionExecute;
+
+#if !SANITIZER_OPENBSD
+ if (segment->filename != NULL && segment->filename_size > 0) {
+ internal_snprintf(segment->filename,
+ Min(segment->filename_size, (uptr)PATH_MAX), "%s",
+ VmEntry->kve_path);
+ }
+#endif
+
+#if SANITIZER_FREEBSD
+ data_.current += VmEntry->kve_structsize;
+#else
+ data_.current += sizeof(*VmEntry);
+#endif
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_common.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_common.cpp
new file mode 100644
index 0000000000..e0cb47f8ca
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_common.cpp
@@ -0,0 +1,174 @@
+//===-- sanitizer_procmaps_common.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (common parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+static ProcSelfMapsBuff cached_proc_self_maps;
+static StaticSpinMutex cache_lock;
+
+static int TranslateDigit(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+// Parse a number and promote 'p' up to the first non-digit character.
+static uptr ParseNumber(const char **p, int base) {
+ uptr n = 0;
+ int d;
+ CHECK(base >= 2 && base <= 16);
+ while ((d = TranslateDigit(**p)) >= 0 && d < base) {
+ n = n * base + d;
+ (*p)++;
+ }
+ return n;
+}
+
+bool IsDecimal(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 10;
+}
+
+uptr ParseDecimal(const char **p) {
+ return ParseNumber(p, 10);
+}
+
+bool IsHex(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 16;
+}
+
+uptr ParseHex(const char **p) {
+ return ParseNumber(p, 16);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // data_ should be unused on this platform
+ CHECK(!data_);
+ module->addAddressRange(start, end, IsExecutable(), IsWritable());
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+
+ // Read maps after the cache update to capture the maps/unmaps happening in
+ // the process of updating.
+ ReadProcMaps(&data_.proc_self_maps);
+ if (cache_enabled && data_.proc_self_maps.mmaped_size == 0)
+ LoadFromCache();
+
+ Reset();
+}
+
+bool MemoryMappingLayout::Error() const {
+ return data_.current == nullptr;
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (data_.proc_self_maps.data != cached_proc_self_maps.data)
+ UnmapOrDie(data_.proc_self_maps.data, data_.proc_self_maps.mmaped_size);
+}
+
+void MemoryMappingLayout::Reset() {
+ data_.current = data_.proc_self_maps.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ ProcSelfMapsBuff new_proc_self_maps;
+ ReadProcMaps(&new_proc_self_maps);
+ // Don't invalidate the cache if the mappings are unavailable.
+ if (new_proc_self_maps.mmaped_size == 0)
+ return;
+ SpinMutexLock l(&cache_lock);
+ if (cached_proc_self_maps.mmaped_size)
+ UnmapOrDie(cached_proc_self_maps.data, cached_proc_self_maps.mmaped_size);
+ cached_proc_self_maps = new_proc_self_maps;
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock);
+ if (cached_proc_self_maps.data)
+ data_.proc_self_maps = cached_proc_self_maps;
+}
+
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ Reset();
+ InternalScopedString module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), module_name.size());
+ for (uptr i = 0; Next(&segment); i++) {
+ const char *cur_name = segment.filename;
+ if (cur_name[0] == '\0')
+ continue;
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? segment.start : 0) - segment.offset;
+ LoadedModule cur_module;
+ cur_module.set(cur_name, base_address);
+ segment.AddAddressRanges(&cur_module);
+ modules->push_back(cur_module);
+ }
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = nullptr;
+ uptr smaps_cap = 0;
+ uptr smaps_len = 0;
+ if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
+ return;
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ParseHex(&pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ while (!IsDecimal(*pos)) pos++;
+ uptr rss = ParseDecimal(&pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_fuchsia.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_fuchsia.cpp
new file mode 100644
index 0000000000..cc3e9be064
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_fuchsia.cpp
@@ -0,0 +1,80 @@
+//===-- sanitizer_procmaps_fuchsia.cpp
+//----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Fuchsia-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+// The cache flag is ignored on Fuchsia because a process can always get this
+// information via its process-self handle.
+MemoryMappingLayout::MemoryMappingLayout(bool) { Reset(); }
+
+void MemoryMappingLayout::Reset() {
+ data_.data.clear();
+ data_.current = 0;
+
+ size_t count;
+ zx_status_t status = _zx_object_get_info(
+ _zx_process_self(), ZX_INFO_PROCESS_MAPS, nullptr, 0, nullptr, &count);
+ if (status != ZX_OK) {
+ return;
+ }
+
+ size_t filled;
+ do {
+ data_.data.resize(count);
+ status = _zx_object_get_info(
+ _zx_process_self(), ZX_INFO_PROCESS_MAPS, data_.data.data(),
+ count * sizeof(zx_info_maps_t), &filled, &count);
+ if (status != ZX_OK) {
+ data_.data.clear();
+ return;
+ }
+ } while (filled < count);
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {}
+
+bool MemoryMappingLayout::Error() const { return data_.data.empty(); }
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ while (data_.current < data_.data.size()) {
+ const auto &entry = data_.data[data_.current++];
+ if (entry.type == ZX_INFO_MAPS_TYPE_MAPPING) {
+ segment->start = entry.base;
+ segment->end = entry.base + entry.size;
+ segment->offset = entry.u.mapping.vmo_offset;
+ const auto flags = entry.u.mapping.mmu_flags;
+ segment->protection =
+ ((flags & ZX_VM_PERM_READ) ? kProtectionRead : 0) |
+ ((flags & ZX_VM_PERM_WRITE) ? kProtectionWrite : 0) |
+ ((flags & ZX_VM_PERM_EXECUTE) ? kProtectionExecute : 0);
+ if (segment->filename && segment->filename_size > 0) {
+ uptr len = Min(sizeof(entry.name), segment->filename_size) - 1;
+ internal_strncpy(segment->filename, entry.name, len);
+ segment->filename[len] = 0;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_linux.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_linux.cpp
new file mode 100644
index 0000000000..c7af57355b
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_linux.cpp
@@ -0,0 +1,81 @@
+//===-- sanitizer_procmaps_linux.cpp --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Linux-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ if (!ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
+ &proc_maps->mmaped_size, &proc_maps->len)) {
+ proc_maps->data = nullptr;
+ proc_maps->mmaped_size = 0;
+ proc_maps->len = 0;
+ }
+}
+
+static bool IsOneOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ if (Error()) return false; // simulate empty maps
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+ char *next_line =
+ (char *)internal_memchr(data_.current, '\n', last - data_.current);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ segment->start = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, '-');
+ segment->end = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ CHECK(IsOneOf(*data_.current, '-', 'r'));
+ segment->protection = 0;
+ if (*data_.current++ == 'r') segment->protection |= kProtectionRead;
+ CHECK(IsOneOf(*data_.current, '-', 'w'));
+ if (*data_.current++ == 'w') segment->protection |= kProtectionWrite;
+ CHECK(IsOneOf(*data_.current, '-', 'x'));
+ if (*data_.current++ == 'x') segment->protection |= kProtectionExecute;
+ CHECK(IsOneOf(*data_.current, 's', 'p'));
+ if (*data_.current++ == 's') segment->protection |= kProtectionShared;
+ CHECK_EQ(*data_.current++, ' ');
+ segment->offset = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ':');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ while (IsDecimal(*data_.current)) data_.current++;
+ // Qemu may lack the trailing space.
+ // https://github.com/google/sanitizers/issues/160
+ // CHECK_EQ(*data_.current++, ' ');
+ // Skip spaces.
+ while (data_.current < next_line && *data_.current == ' ') data_.current++;
+ // Fill in the filename.
+ if (segment->filename) {
+ uptr len =
+ Min((uptr)(next_line - data_.current), segment->filename_size - 1);
+ internal_strncpy(segment->filename, data_.current, len);
+ segment->filename[len] = 0;
+ }
+
+ data_.current = next_line + 1;
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_mac.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_mac.cpp
new file mode 100644
index 0000000000..d02afcfe87
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -0,0 +1,379 @@
+//===-- sanitizer_procmaps_mac.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Mac-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <mach/mach.h>
+
+// These are not available in older macOS SDKs.
+#ifndef CPU_SUBTYPE_X86_64_H
+#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7S
+#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7K
+#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
+
+namespace __sanitizer {
+
+// Contains information used to iterate through sections.
+struct MemoryMappedSegmentData {
+ char name[kMaxSegName];
+ uptr nsects;
+ const char *current_load_cmd_addr;
+ u32 lc_type;
+ uptr base_virt_addr;
+ uptr addr_mask;
+};
+
+template <typename Section>
+static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
+ bool isWritable) {
+ const Section *sc = (const Section *)data->current_load_cmd_addr;
+ data->current_load_cmd_addr += sizeof(Section);
+
+ uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
+ uptr sec_end = sec_start + sc->size;
+ module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
+ sc->sectname);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // Don't iterate over sections when the caller hasn't set up the
+ // data pointer, when there are no sections, or when the segment
+ // is executable. Avoid iterating over executable sections because
+ // it will confuse libignore, and because the extra granularity
+ // of information is not needed by any sanitizers.
+ if (!data_ || !data_->nsects || IsExecutable()) {
+ module->addAddressRange(start, end, IsExecutable(), IsWritable(),
+ data_ ? data_->name : nullptr);
+ return;
+ }
+
+ do {
+ if (data_->lc_type == LC_SEGMENT) {
+ NextSectionLoad<struct section>(module, data_, IsWritable());
+#ifdef MH_MAGIC_64
+ } else if (data_->lc_type == LC_SEGMENT_64) {
+ NextSectionLoad<struct section_64>(module, data_, IsWritable());
+#endif
+ }
+ } while (--data_->nsects);
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+bool MemoryMappingLayout::Error() const {
+ return false;
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ data_.current_image = _dyld_image_count();
+ data_.current_load_cmd_count = -1;
+ data_.current_load_cmd_addr = 0;
+ data_.current_magic = 0;
+ data_.current_filetype = 0;
+ data_.current_arch = kModuleArchUnknown;
+ internal_memset(data_.current_uuid, 0, kModuleUUIDSize);
+}
+
+// The dyld load address should be unchanged throughout process execution,
+// and it is expensive to compute once many libraries have been loaded,
+// so cache it here and do not reset.
+static mach_header *dyld_hdr = 0;
+static const char kDyldPath[] = "/usr/lib/dyld";
+static const int kDyldImageIdx = -1;
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// _dyld_get_image_header() and related APIs don't report dyld itself.
+// We work around this by manually recursing through the memory map
+// until we hit a Mach header matching dyld instead. These recurse
+// calls are expensive, but the first memory map generation occurs
+// early in the process, when dyld is one of the only images loaded,
+// so it will be hit after only a few iterations.
+static mach_header *get_dyld_image_header() {
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ while (true) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+ if (err != KERN_SUCCESS) return nullptr;
+
+ if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
+ mach_header *hdr = (mach_header *)address;
+ if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
+ hdr->filetype == MH_DYLINKER) {
+ return hdr;
+ }
+ }
+ address += size;
+ }
+}
+
+const mach_header *get_dyld_hdr() {
+ if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
+
+ return dyld_hdr;
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, https://github.com/gperftools/gperftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template <u32 kLCSegment, typename SegmentCommand>
+static bool NextSegmentLoad(MemoryMappedSegment *segment,
+ MemoryMappedSegmentData *seg_data,
+ MemoryMappingLayoutData *layout_data) {
+ const char *lc = layout_data->current_load_cmd_addr;
+ layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ uptr base_virt_addr, addr_mask;
+ if (layout_data->current_image == kDyldImageIdx) {
+ base_virt_addr = (uptr)get_dyld_hdr();
+ // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
+ // it contains an absolute address rather than an offset for dyld.
+ // To make matters even more complicated, this absolute address
+ // isn't actually the absolute segment address, but the offset portion
+ // of the address is accurate when combined with the dyld base address,
+ // and the mask will give just this offset.
+ addr_mask = 0xfffff;
+ } else {
+ base_virt_addr =
+ (uptr)_dyld_get_image_vmaddr_slide(layout_data->current_image);
+ addr_mask = ~0;
+ }
+
+ segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
+ segment->end = segment->start + sc->vmsize;
+ // Most callers don't need section information, so only fill this struct
+ // when required.
+ if (seg_data) {
+ seg_data->nsects = sc->nsects;
+ seg_data->current_load_cmd_addr =
+ (const char *)lc + sizeof(SegmentCommand);
+ seg_data->lc_type = kLCSegment;
+ seg_data->base_virt_addr = base_virt_addr;
+ seg_data->addr_mask = addr_mask;
+ internal_strncpy(seg_data->name, sc->segname,
+ ARRAY_SIZE(seg_data->name));
+ }
+
+ // Return the initial protection.
+ segment->protection = sc->initprot;
+ segment->offset = (layout_data->current_filetype ==
+ /*MH_EXECUTE*/ 0x2)
+ ? sc->vmaddr
+ : sc->fileoff;
+ if (segment->filename) {
+ const char *src = (layout_data->current_image == kDyldImageIdx)
+ ? kDyldPath
+ : _dyld_get_image_name(layout_data->current_image);
+ internal_strncpy(segment->filename, src, segment->filename_size);
+ }
+ segment->arch = layout_data->current_arch;
+ internal_memcpy(segment->uuid, layout_data->current_uuid, kModuleUUIDSize);
+ return true;
+ }
+ return false;
+}
+
+ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
+ cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ switch (cputype) {
+ case CPU_TYPE_I386:
+ return kModuleArchI386;
+ case CPU_TYPE_X86_64:
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ CHECK(0 && "Invalid subtype of x86_64");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM:
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ CHECK(0 && "Invalid subtype of ARM");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM64:
+ return kModuleArchARM64;
+ default:
+ CHECK(0 && "Invalid CPU type");
+ return kModuleArchUnknown;
+ }
+}
+
+static const load_command *NextCommand(const load_command *lc) {
+ return (const load_command *)((const char *)lc + lc->cmdsize);
+}
+
+static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_UUID) continue;
+
+ const uuid_command *uuid_lc = (const uuid_command *)lc;
+ const uint8_t *uuid = &uuid_lc->uuid[0];
+ internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
+ return;
+ }
+}
+
+static bool IsModuleInstrumented(const load_command *first_lc) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_LOAD_DYLIB) continue;
+
+ const dylib_command *dylib_lc = (const dylib_command *)lc;
+ uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;
+ const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset;
+ dylib_name = StripModuleName(dylib_name);
+ if (dylib_name != 0 && (internal_strstr(dylib_name, "libclang_rt."))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
+ if (!hdr) continue;
+ if (data_.current_load_cmd_count < 0) {
+ // Set up for this image;
+ data_.current_load_cmd_count = hdr->ncmds;
+ data_.current_magic = hdr->magic;
+ data_.current_filetype = hdr->filetype;
+ data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
+ switch (data_.current_magic) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ data_.current_load_cmd_addr =
+ (const char *)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ data_.current_load_cmd_addr = (const char *)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ FindUUID((const load_command *)data_.current_load_cmd_addr,
+ data_.current_uuid);
+ data_.current_instrumented = IsModuleInstrumented(
+ (const load_command *)data_.current_load_cmd_addr);
+ }
+
+ for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ switch (data_.current_magic) {
+ // data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ segment, segment->data_, &data_))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ segment, segment->data_, &data_))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ Reset();
+ InternalScopedString module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
+ MemoryMappedSegmentData data;
+ segment.data_ = &data;
+ while (Next(&segment)) {
+ if (segment.filename[0] == '\0') continue;
+ LoadedModule *cur_module = nullptr;
+ if (!modules->empty() &&
+ 0 == internal_strcmp(segment.filename, modules->back().full_name())) {
+ cur_module = &modules->back();
+ } else {
+ modules->push_back(LoadedModule());
+ cur_module = &modules->back();
+ cur_module->set(segment.filename, segment.start, segment.arch,
+ segment.uuid, data_.current_instrumented);
+ }
+ segment.AddAddressRanges(cur_module);
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/lib/tsan/sanitizer_common/sanitizer_procmaps_solaris.cpp b/lib/tsan/sanitizer_common/sanitizer_procmaps_solaris.cpp
new file mode 100644
index 0000000000..8793423a60
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_procmaps_solaris.cpp
@@ -0,0 +1,67 @@
+//===-- sanitizer_procmaps_solaris.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Solaris-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_SOLARIS
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
+#undef _FILE_OFFSET_BITS
+#include <procfs.h>
+#include <limits.h>
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ if (!ReadFileToBuffer("/proc/self/xmap", &proc_maps->data,
+ &proc_maps->mmaped_size, &proc_maps->len)) {
+ proc_maps->data = nullptr;
+ proc_maps->mmaped_size = 0;
+ proc_maps->len = 0;
+ }
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ if (Error()) return false; // simulate empty maps
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+
+ prxmap_t *xmapentry = (prxmap_t*)data_.current;
+
+ segment->start = (uptr)xmapentry->pr_vaddr;
+ segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);
+ segment->offset = (uptr)xmapentry->pr_offset;
+
+ segment->protection = 0;
+ if ((xmapentry->pr_mflags & MA_READ) != 0)
+ segment->protection |= kProtectionRead;
+ if ((xmapentry->pr_mflags & MA_WRITE) != 0)
+ segment->protection |= kProtectionWrite;
+ if ((xmapentry->pr_mflags & MA_EXEC) != 0)
+ segment->protection |= kProtectionExecute;
+
+ if (segment->filename != NULL && segment->filename_size > 0) {
+ char proc_path[PATH_MAX + 1];
+
+ internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
+ xmapentry->pr_mapname);
+ internal_readlink(proc_path, segment->filename, segment->filename_size);
+ }
+
+ data_.current += sizeof(prxmap_t);
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
diff --git a/lib/tsan/sanitizer_common/sanitizer_ptrauth.h b/lib/tsan/sanitizer_common/sanitizer_ptrauth.h
new file mode 100644
index 0000000000..4d0d96a64f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_ptrauth.h
@@ -0,0 +1,21 @@
+//===-- sanitizer_ptrauth.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PTRAUTH_H
+#define SANITIZER_PTRAUTH_H
+
+#if __has_feature(ptrauth_calls)
+#include <ptrauth.h>
+#else
+// Copied from <ptrauth.h>
+#define ptrauth_strip(__value, __key) __value
+#define ptrauth_auth_data(__value, __old_key, __old_data) __value
+#define ptrauth_string_discriminator(__string) ((int)0)
+#endif
+
+#endif // SANITIZER_PTRAUTH_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_quarantine.h b/lib/tsan/sanitizer_common/sanitizer_quarantine.h
new file mode 100644
index 0000000000..992f23152c
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_quarantine.h
@@ -0,0 +1,317 @@
+//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Memory quarantine for AddressSanitizer and potentially other tools.
+// Quarantine caches some specified amount of memory in per-thread caches,
+// then evicts to global FIFO queue. When the queue reaches specified threshold,
+// oldest memory is recycled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_QUARANTINE_H
+#define SANITIZER_QUARANTINE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_list.h"
+
+namespace __sanitizer {
+
+template<typename Node> class QuarantineCache;
+
+struct QuarantineBatch {
+ static const uptr kSize = 1021;
+ QuarantineBatch *next;
+ uptr size;
+ uptr count;
+ void *batch[kSize];
+
+ void init(void *ptr, uptr size) {
+ count = 1;
+ batch[0] = ptr;
+ this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr quarantined_size() const {
+ return size - sizeof(QuarantineBatch);
+ }
+
+ void push_back(void *ptr, uptr size) {
+ CHECK_LT(count, kSize);
+ batch[count++] = ptr;
+ this->size += size;
+ }
+
+ bool can_merge(const QuarantineBatch* const from) const {
+ return count + from->count <= kSize;
+ }
+
+ void merge(QuarantineBatch* const from) {
+ CHECK_LE(count + from->count, kSize);
+ CHECK_GE(size, sizeof(QuarantineBatch));
+
+ for (uptr i = 0; i < from->count; ++i)
+ batch[count + i] = from->batch[i];
+ count += from->count;
+ size += from->quarantined_size();
+
+ from->count = 0;
+ from->size = sizeof(QuarantineBatch);
+ }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
+
+// The callback interface is:
+// void Callback::Recycle(Node *ptr);
+// void *cb.Allocate(uptr size);
+// void cb.Deallocate(void *ptr);
+template<typename Callback, typename Node>
+class Quarantine {
+ public:
+ typedef QuarantineCache<Callback> Cache;
+
+ explicit Quarantine(LinkerInitialized)
+ : cache_(LINKER_INITIALIZED) {
+ }
+
+ void Init(uptr size, uptr cache_size) {
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per Put() call).
+ CHECK((size == 0 && cache_size == 0) || cache_size != 0);
+
+ atomic_store_relaxed(&max_size_, size);
+ atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&max_cache_size_, cache_size);
+
+ cache_mutex_.Init();
+ recycle_mutex_.Init();
+ }
+
+ uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
+ uptr GetCacheSize() const {
+ return atomic_load_relaxed(&max_cache_size_);
+ }
+
+ void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
+ uptr cache_size = GetCacheSize();
+ if (cache_size) {
+ c->Enqueue(cb, ptr, size);
+ } else {
+ // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
+ cb.Recycle(ptr);
+ }
+ // Check cache size anyway to accommodate for runtime cache_size change.
+ if (c->Size() > cache_size)
+ Drain(c, cb);
+ }
+
+ void NOINLINE Drain(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
+ Recycle(atomic_load_relaxed(&min_size_), cb);
+ }
+
+ void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ recycle_mutex_.Lock();
+ Recycle(0, cb);
+ }
+
+ void PrintStats() const {
+ // It assumes that the world is stopped, just as the allocator's PrintStats.
+ Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
+ GetSize() >> 20, GetCacheSize() >> 10);
+ cache_.PrintStats();
+ }
+
+ private:
+ // Read-only data.
+ char pad0_[kCacheLineSize];
+ atomic_uintptr_t max_size_;
+ atomic_uintptr_t min_size_;
+ atomic_uintptr_t max_cache_size_;
+ char pad1_[kCacheLineSize];
+ StaticSpinMutex cache_mutex_;
+ StaticSpinMutex recycle_mutex_;
+ Cache cache_;
+ char pad2_[kCacheLineSize];
+
+ void NOINLINE Recycle(uptr min_size, Callback cb) {
+ Cache tmp;
+ {
+ SpinMutexLock l(&cache_mutex_);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ uptr cache_size = cache_.Size();
+ uptr overhead_size = cache_.OverheadSize();
+ CHECK_GE(cache_size, overhead_size);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ const uptr kOverheadThresholdPercents = 100;
+ if (cache_size > overhead_size &&
+ overhead_size * (100 + kOverheadThresholdPercents) >
+ cache_size * kOverheadThresholdPercents) {
+ cache_.MergeBatches(&tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (cache_.Size() > min_size) {
+ tmp.EnqueueBatch(cache_.DequeueBatch());
+ }
+ }
+ recycle_mutex_.Unlock();
+ DoRecycle(&tmp, cb);
+ }
+
+ void NOINLINE DoRecycle(Cache *c, Callback cb) {
+ while (QuarantineBatch *b = c->DequeueBatch()) {
+ const uptr kPrefetch = 16;
+ CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
+ for (uptr i = 0; i < kPrefetch; i++)
+ PREFETCH(b->batch[i]);
+ for (uptr i = 0, count = b->count; i < count; i++) {
+ if (i + kPrefetch < count)
+ PREFETCH(b->batch[i + kPrefetch]);
+ cb.Recycle((Node*)b->batch[i]);
+ }
+ cb.Deallocate(b);
+ }
+ }
+};
+
+// Per-thread cache of memory blocks.
+template<typename Callback>
+class QuarantineCache {
+ public:
+ explicit QuarantineCache(LinkerInitialized) {
+ }
+
+ QuarantineCache()
+ : size_() {
+ list_.clear();
+ }
+
+ // Total memory used, including internal accounting.
+ uptr Size() const {
+ return atomic_load_relaxed(&size_);
+ }
+
+ // Memory used for internal accounting.
+ uptr OverheadSize() const {
+ return list_.size() * sizeof(QuarantineBatch);
+ }
+
+ void Enqueue(Callback cb, void *ptr, uptr size) {
+ if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
+ b->init(ptr, size);
+ EnqueueBatch(b);
+ } else {
+ list_.back()->push_back(ptr, size);
+ SizeAdd(size);
+ }
+ }
+
+ void Transfer(QuarantineCache *from_cache) {
+ list_.append_back(&from_cache->list_);
+ SizeAdd(from_cache->Size());
+
+ atomic_store_relaxed(&from_cache->size_, 0);
+ }
+
+ void EnqueueBatch(QuarantineBatch *b) {
+ list_.push_back(b);
+ SizeAdd(b->size);
+ }
+
+ QuarantineBatch *DequeueBatch() {
+ if (list_.empty())
+ return nullptr;
+ QuarantineBatch *b = list_.front();
+ list_.pop_front();
+ SizeSub(b->size);
+ return b;
+ }
+
+ void MergeBatches(QuarantineCache *to_deallocate) {
+ uptr extracted_size = 0;
+ QuarantineBatch *current = list_.front();
+ while (current && current->next) {
+ if (current->can_merge(current->next)) {
+ QuarantineBatch *extracted = current->next;
+ // Move all the chunks into the current batch.
+ current->merge(extracted);
+ CHECK_EQ(extracted->count, 0);
+ CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
+ // Remove the next batch from the list and account for its size.
+ list_.extract(current, extracted);
+ extracted_size += extracted->size;
+ // Add it to deallocation list.
+ to_deallocate->EnqueueBatch(extracted);
+ } else {
+ current = current->next;
+ }
+ }
+ SizeSub(extracted_size);
+ }
+
+ void PrintStats() const {
+ uptr batch_count = 0;
+ uptr total_overhead_bytes = 0;
+ uptr total_bytes = 0;
+ uptr total_quarantine_chunks = 0;
+ for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
+ batch_count++;
+ total_bytes += (*it).size;
+ total_overhead_bytes += (*it).size - (*it).quarantined_size();
+ total_quarantine_chunks += (*it).count;
+ }
+ uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
+ int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
+ 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
+ uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
+ int memory_overhead_percent = total_quarantined_bytes == 0 ?
+ 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
+ "\n",
+ batch_count, total_bytes, total_quarantined_bytes,
+ total_quarantine_chunks, quarantine_chunks_capacity,
+ chunks_usage_percent, memory_overhead_percent);
+ }
+
+ private:
+ typedef IntrusiveList<QuarantineBatch> List;
+
+ List list_;
+ atomic_uintptr_t size_;
+
+ void SizeAdd(uptr add) {
+ atomic_store_relaxed(&size_, Size() + add);
+ }
+ void SizeSub(uptr sub) {
+ atomic_store_relaxed(&size_, Size() - sub);
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_QUARANTINE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_report_decorator.h b/lib/tsan/sanitizer_common/sanitizer_report_decorator.h
new file mode 100644
index 0000000000..d276c2cdd8
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_report_decorator.h
@@ -0,0 +1,48 @@
+//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Tags to decorate the sanitizer reports.
+// Currently supported tags:
+// * None.
+// * ANSI color sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_REPORT_DECORATOR_H
+#define SANITIZER_REPORT_DECORATOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+class SanitizerCommonDecorator {
+ // FIXME: This is not portable. It assumes the special strings are printed to
+ // stdout, which is not the case on Windows (see SetConsoleTextAttribute()).
+ public:
+ SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
+ const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Warning() const { return Red(); }
+ const char *Error() const { return Red(); }
+ const char *MemoryByte() const { return Magenta(); }
+
+ protected:
+ const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() const { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() const { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
+ private:
+ bool ansi_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_REPORT_DECORATOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_ring_buffer.h b/lib/tsan/sanitizer_common/sanitizer_ring_buffer.h
new file mode 100644
index 0000000000..2a46e933b7
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_ring_buffer.h
@@ -0,0 +1,161 @@
+//===-- sanitizer_ring_buffer.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Simple ring buffer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_RING_BUFFER_H
+#define SANITIZER_RING_BUFFER_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+// RingBuffer<T>: fixed-size ring buffer optimized for speed of push().
+// T should be a POD type and sizeof(T) should be divisible by sizeof(void*).
+// At creation, all elements are zero.
+template<class T>
+class RingBuffer {
+ public:
+ COMPILER_CHECK(sizeof(T) % sizeof(void *) == 0);
+ static RingBuffer *New(uptr Size) {
+ void *Ptr = MmapOrDie(SizeInBytes(Size), "RingBuffer");
+ RingBuffer *RB = reinterpret_cast<RingBuffer*>(Ptr);
+ uptr End = reinterpret_cast<uptr>(Ptr) + SizeInBytes(Size);
+ RB->last_ = RB->next_ = reinterpret_cast<T*>(End - sizeof(T));
+ return RB;
+ }
+ void Delete() {
+ UnmapOrDie(this, SizeInBytes(size()));
+ }
+ uptr size() const {
+ return last_ + 1 -
+ reinterpret_cast<T *>(reinterpret_cast<uptr>(this) +
+ 2 * sizeof(T *));
+ }
+
+ static uptr SizeInBytes(uptr Size) {
+ return Size * sizeof(T) + 2 * sizeof(T*);
+ }
+
+ uptr SizeInBytes() { return SizeInBytes(size()); }
+
+ void push(T t) {
+ *next_ = t;
+ next_--;
+ // The condition below works only if sizeof(T) is divisible by sizeof(T*).
+ if (next_ <= reinterpret_cast<T*>(&next_))
+ next_ = last_;
+ }
+
+ T operator[](uptr Idx) const {
+ CHECK_LT(Idx, size());
+ sptr IdxNext = Idx + 1;
+ if (IdxNext > last_ - next_)
+ IdxNext -= size();
+ return next_[IdxNext];
+ }
+
+ private:
+ RingBuffer() {}
+ ~RingBuffer() {}
+ RingBuffer(const RingBuffer&) = delete;
+
+ // Data layout:
+ // LNDDDDDDDD
+ // D: data elements.
+ // L: last_, always points to the last data element.
+ // N: next_, initially equals to last_, is decremented on every push,
+ // wraps around if it's less or equal than its own address.
+ T *last_;
+ T *next_;
+ T data_[1]; // flexible array.
+};
+
+// A ring buffer with externally provided storage that encodes its state in 8
+// bytes. Has significant constraints on size and alignment of storage.
+// See a comment in hwasan/hwasan_thread_list.h for the motivation behind this.
+#if SANITIZER_WORDSIZE == 64
+template <class T>
+class CompactRingBuffer {
+ // Top byte of long_ stores the buffer size in pages.
+ // Lower bytes store the address of the next buffer element.
+ static constexpr int kPageSizeBits = 12;
+ static constexpr int kSizeShift = 56;
+ static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1;
+
+ uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; }
+
+ void Init(void *storage, uptr size) {
+ CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *));
+ CHECK(IsPowerOfTwo(size));
+ CHECK_GE(size, 1 << kPageSizeBits);
+ CHECK_LE(size, 128 << kPageSizeBits);
+ CHECK_EQ(size % 4096, 0);
+ CHECK_EQ(size % sizeof(T), 0);
+ CHECK_EQ((uptr)storage % (size * 2), 0);
+ long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift);
+ }
+
+ void SetNext(const T *next) {
+ long_ = (long_ & ~kNextMask) | (uptr)next;
+ }
+
+ public:
+ CompactRingBuffer(void *storage, uptr size) {
+ Init(storage, size);
+ }
+
+ // A copy constructor of sorts.
+ CompactRingBuffer(const CompactRingBuffer &other, void *storage) {
+ uptr size = other.GetStorageSize();
+ internal_memcpy(storage, other.StartOfStorage(), size);
+ Init(storage, size);
+ uptr Idx = other.Next() - (const T *)other.StartOfStorage();
+ SetNext((const T *)storage + Idx);
+ }
+
+ T *Next() const { return (T *)(long_ & kNextMask); }
+
+ void *StartOfStorage() const {
+ return (void *)((uptr)Next() & ~(GetStorageSize() - 1));
+ }
+
+ void *EndOfStorage() const {
+ return (void *)((uptr)StartOfStorage() + GetStorageSize());
+ }
+
+ uptr size() const { return GetStorageSize() / sizeof(T); }
+
+ void push(T t) {
+ T *next = Next();
+ *next = t;
+ next++;
+ next = (T *)((uptr)next & ~GetStorageSize());
+ SetNext(next);
+ }
+
+ const T &operator[](uptr Idx) const {
+ CHECK_LT(Idx, size());
+ const T *Begin = (const T *)StartOfStorage();
+ sptr StorageIdx = Next() - Begin;
+ StorageIdx -= (sptr)(Idx + 1);
+ if (StorageIdx < 0)
+ StorageIdx += size();
+ return Begin[StorageIdx];
+ }
+
+ public:
+ ~CompactRingBuffer() {}
+ CompactRingBuffer(const CompactRingBuffer &) = delete;
+
+ uptr long_;
+};
+#endif
+} // namespace __sanitizer
+
+#endif // SANITIZER_RING_BUFFER_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_rtems.cpp b/lib/tsan/sanitizer_common/sanitizer_rtems.cpp
new file mode 100644
index 0000000000..29bcfcfa6f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_rtems.cpp
@@ -0,0 +1,283 @@
+//===-- sanitizer_rtems.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements RTEMS-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_rtems.h"
+#if SANITIZER_RTEMS
+
+#define posix_memalign __real_posix_memalign
+#define free __real_free
+#define memset __real_memset
+
+#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+// There is no mmap on RTEMS. Use memalign, etc.
+#define __mmap_alloc_aligned posix_memalign
+#define __mmap_free free
+#define __mmap_memset memset
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+void NORETURN internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+uptr internal_sched_yield() {
+ return sched_yield();
+}
+
+uptr internal_getpid() {
+ return getpid();
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+ UNIMPLEMENTED();
+}
+
+bool FileExists(const char *filename) {
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }
+
+tid_t GetTid() { return GetThreadSelf(); }
+
+void Abort() { abort(); }
+
+int Atexit(void (*function)(void)) { return atexit(function); }
+
+void SleepForSeconds(int seconds) { sleep(seconds); }
+
+void SleepForMillis(int millis) { usleep(millis * 1000); }
+
+bool SupportsColoredOutput(fd_t fd) { return false; }
+
+void GetThreadStackTopAndBottom(bool at_initialization,
+ uptr *stack_top, uptr *stack_bottom) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ void *base = nullptr;
+ size_t size = 0;
+ CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
+ CHECK_EQ(pthread_attr_destroy(&attr), 0);
+
+ *stack_bottom = reinterpret_cast<uptr>(base);
+ *stack_top = *stack_bottom + size;
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = *tls_size = 0;
+}
+
+void InitializePlatformEarly() {}
+void MaybeReexec() {}
+void CheckASLR() {}
+void CheckMPROTECT() {}
+void DisableCoreDumperIfNecessary() {}
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
+void SetAlternateSignalStack() {}
+void UnsetAlternateSignalStack() {}
+void InitTlsSize() {}
+
+void PrintModuleMap() {}
+
+void SignalContext::DumpAllRegisters(void *context) {}
+const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
+
+enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+ internal_sched_yield();
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+uptr GetPageSize() { return getpagesize(); }
+
+uptr GetMmapGranularity() { return GetPageSize(); }
+
+uptr GetMaxVirtualAddress() {
+ return (1ULL << 32) - 1; // 0xffffffff
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
+ if (UNLIKELY(res))
+ ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report);
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
+ if (UNLIKELY(res)) {
+ if (res == ENOMEM)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, "allocate", false);
+ }
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, alignment, size);
+ if (res)
+ ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false);
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type, false);
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ __mmap_free(addr);
+ DecreaseTotalMmap(size);
+}
+
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
+ int flags;
+ switch (mode) {
+ case RdOnly: flags = O_RDONLY; break;
+ case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
+ case RdWr: flags = O_RDWR | O_CREAT; break;
+ }
+ fd_t res = open(filename, flags, 0660);
+ if (internal_iserror(res, errno_p))
+ return kInvalidFd;
+ return res;
+}
+
+void CloseFile(fd_t fd) {
+ close(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ uptr res = read(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_read)
+ *bytes_read = res;
+ return true;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ uptr res = write(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_written)
+ *bytes_written = res;
+ return true;
+}
+
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+void DumpProcessMap() {}
+
+// There is no page protection so everything is "accessible."
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ return true;
+}
+
+char **GetArgv() { return nullptr; }
+char **GetEnviron() { return nullptr; }
+
+const char *GetEnv(const char *name) {
+ return getenv(name);
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ internal_strncpy(buf, "StubBinaryName", buf_len);
+ return internal_strlen(buf);
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+ internal_strncpy(buf, "StubProcessName", buf_len);
+ return internal_strlen(buf);
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '/';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsPathSeparator(path[0]);
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ static const char *kWriteError =
+ "ReportFile::Write() can't output requested buffer!\n";
+ ReopenIfNecessary();
+ if (length != write(fd, buffer, length)) {
+ write(fd, kWriteError, internal_strlen(kWriteError));
+ Die();
+ }
+}
+
+uptr MainThreadStackBase, MainThreadStackSize;
+uptr MainThreadTlsBase, MainThreadTlsSize;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_RTEMS
diff --git a/lib/tsan/sanitizer_common/sanitizer_rtems.h b/lib/tsan/sanitizer_common/sanitizer_rtems.h
new file mode 100644
index 0000000000..e8adfd500d
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_rtems.h
@@ -0,0 +1,20 @@
+//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for RTEMS-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_RTEMS_H
+#define SANITIZER_RTEMS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_RTEMS
+#include "sanitizer_common.h"
+
+#endif // SANITIZER_RTEMS
+#endif // SANITIZER_RTEMS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_signal_interceptors.inc b/lib/tsan/sanitizer_common/sanitizer_signal_interceptors.inc
new file mode 100644
index 0000000000..68d9eb6596
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_signal_interceptors.inc
@@ -0,0 +1,86 @@
+//===-- sanitizer_signal_interceptors.inc -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Signal interceptors for sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_interceptors.h"
+
+using namespace __sanitizer;
+
+#if SANITIZER_NETBSD
+#define sigaction_symname __sigaction14
+#else
+#define sigaction_symname sigaction
+#endif
+
+#ifndef SIGNAL_INTERCEPTOR_SIGNAL_IMPL
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signum, handler) \
+ { return REAL(func)(signum, handler); }
+#endif
+
+#ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
+ { return REAL(sigaction_symname)(signum, act, oldact); }
+#endif
+
+#if SANITIZER_INTERCEPT_BSD_SIGNAL
+INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
+}
+#define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal)
+#else // SANITIZER_INTERCEPT_BSD_SIGNAL
+#define INIT_BSD_SIGNAL
+#endif // SANITIZER_INTERCEPT_BSD_SIGNAL
+
+#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+INTERCEPTOR(uptr, signal, int signum, uptr handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
+ return (uptr) nullptr;
+ SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
+}
+#define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal)
+
+INTERCEPTOR(int, sigaction_symname, int signum,
+ const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
+}
+#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction_symname)(signum, (const __sanitizer_sigaction *)act,
+ (__sanitizer_sigaction *)oldact);
+}
+} // namespace __sanitizer
+#else // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define INIT_SIGNAL
+#define INIT_SIGACTION
+// We need to have defined REAL(sigaction) on other systems.
+namespace __sanitizer {
+struct __sanitizer_sigaction;
+}
+DEFINE_REAL(int, sigaction, int signum, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *oldact)
+#endif // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+
+static void InitializeSignalInterceptors() {
+ static bool was_called_once;
+ CHECK(!was_called_once);
+ was_called_once = true;
+
+ INIT_BSD_SIGNAL;
+ INIT_SIGNAL;
+ INIT_SIGACTION;
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_solaris.cpp b/lib/tsan/sanitizer_common/sanitizer_solaris.cpp
new file mode 100644
index 0000000000..035f2d0ca2
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_solaris.cpp
@@ -0,0 +1,230 @@
+//===-- sanitizer_solaris.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements Solaris-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_SOLARIS
+
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <thread.h>
+#include <synch.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+
+namespace __sanitizer {
+
+//#include "sanitizer_syscall_generic.inc"
+
+#define _REAL(func) _ ## func
+#define DECLARE__REAL(ret_type, func, ...) \
+ extern "C" ret_type _REAL(func)(__VA_ARGS__)
+#define DECLARE__REAL_AND_INTERNAL(ret_type, func, ...) \
+ DECLARE__REAL(ret_type, func, __VA_ARGS__); \
+ ret_type internal_ ## func(__VA_ARGS__)
+
+#if !defined(_LP64) && _FILE_OFFSET_BITS == 64
+#define _REAL64(func) _ ## func ## 64
+#else
+#define _REAL64(func) _REAL(func)
+#endif
+#define DECLARE__REAL64(ret_type, func, ...) \
+ extern "C" ret_type _REAL64(func)(__VA_ARGS__)
+#define DECLARE__REAL_AND_INTERNAL64(ret_type, func, ...) \
+ DECLARE__REAL64(ret_type, func, __VA_ARGS__); \
+ ret_type internal_ ## func(__VA_ARGS__)
+
+// ---------------------- sanitizer_libc.h
+DECLARE__REAL_AND_INTERNAL64(uptr, mmap, void *addr, uptr /*size_t*/ length,
+ int prot, int flags, int fd, OFF_T offset) {
+ return (uptr)_REAL64(mmap)(addr, length, prot, flags, fd, offset);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, munmap, void *addr, uptr length) {
+ return _REAL(munmap)(addr, length);
+}
+
+DECLARE__REAL_AND_INTERNAL(int, mprotect, void *addr, uptr length, int prot) {
+ return _REAL(mprotect)(addr, length, prot);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {
+ return _REAL(close)(fd);
+}
+
+extern "C" int _REAL64(open)(const char *, int, ...);
+
+uptr internal_open(const char *filename, int flags) {
+ return _REAL64(open)(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return _REAL64(open)(filename, flags, mode);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, read, fd_t fd, void *buf, uptr count) {
+ return _REAL(read)(fd, buf, count);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, write, fd_t fd, const void *buf, uptr count) {
+ return _REAL(write)(fd, buf, count);
+}
+
+// FIXME: There's only _ftruncate64 beginning with Solaris 11.
+DECLARE__REAL_AND_INTERNAL(uptr, ftruncate, fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, stat, const char *path, void *buf) {
+ return _REAL64(stat)(path, (struct stat *)buf);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, lstat, const char *path, void *buf) {
+ return _REAL64(lstat)(path, (struct stat *)buf);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, fstat, fd_t fd, void *buf) {
+ return _REAL64(fstat)(fd, (struct stat *)buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, dup, int oldfd) {
+ return _REAL(dup)(oldfd);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, dup2, int oldfd, int newfd) {
+ return _REAL(dup2)(oldfd, newfd);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, readlink, const char *path, char *buf,
+ uptr bufsize) {
+ return _REAL(readlink)(path, buf, bufsize);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, unlink, const char *path) {
+ return _REAL(unlink)(path);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, rename, const char *oldpath,
+ const char *newpath) {
+ return _REAL(rename)(oldpath, newpath);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
+ return sched_yield();
+}
+
+DECLARE__REAL_AND_INTERNAL(void, _exit, int exitcode) {
+ _exit(exitcode);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
+ char *const argv[], char *const envp[]) {
+ return _REAL(execve)(filename, argv, envp);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, waitpid, int pid, int *status, int options) {
+ return _REAL(waitpid)(pid, status, options);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, getpid, void) {
+ return _REAL(getpid)();
+}
+
+// FIXME: This might be wrong: _getdents doesn't take a struct linux_dirent *.
+DECLARE__REAL_AND_INTERNAL64(uptr, getdents, fd_t fd, struct linux_dirent *dirp,
+ unsigned int count) {
+ return _REAL64(getdents)(fd, dirp, count);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, lseek, fd_t fd, OFF_T offset, int whence) {
+ return _REAL64(lseek)(fd, offset, whence);
+}
+
+// FIXME: This might be wrong: _sigfillset doesn't take a
+// __sanitizer_sigset_t *.
+DECLARE__REAL_AND_INTERNAL(void, sigfillset, __sanitizer_sigset_t *set) {
+ _REAL(sigfillset)(set);
+}
+
+// FIXME: This might be wrong: _sigprocmask doesn't take __sanitizer_sigset_t *.
+DECLARE__REAL_AND_INTERNAL(uptr, sigprocmask, int how,
+ __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ return _REAL(sigprocmask)(how, set, oldset);
+}
+
+DECLARE__REAL_AND_INTERNAL(int, fork, void) {
+ // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
+ return _REAL(fork)();
+}
+
+u64 NanoTime() {
+ return gethrtime();
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ // FIXME: No internal variant.
+ return clock_gettime(clk_id, (timespec *)tp);
+}
+
+// ----------------- sanitizer_common.h
+BlockingMutex::BlockingMutex() {
+ CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
+ internal_memset(this, 0, sizeof(*this));
+ CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
+ CHECK_NE(owner_, (uptr)thr_self());
+ CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
+ CHECK(!owner_);
+ owner_ = (uptr)thr_self();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == (uptr)thr_self());
+ owner_ = 0;
+ CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ((uptr)thr_self(), owner_);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
diff --git a/lib/tsan/sanitizer_common/sanitizer_stackdepot.h b/lib/tsan/sanitizer_common/sanitizer_stackdepot.h
new file mode 100644
index 0000000000..bf29cb9a00
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stackdepot.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_stackdepot.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACKDEPOT_H
+#define SANITIZER_STACKDEPOT_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+// StackDepot efficiently stores huge amounts of stack traces.
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(nullptr) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
+};
+
+const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
+
+StackDepotStats *StackDepotGetStats();
+u32 StackDepotPut(StackTrace stack);
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
+// Retrieves a stored stack trace by the id.
+StackTrace StackDepotGet(u32 id);
+
+void StackDepotLockAll();
+void StackDepotUnlockAll();
+
+// Instantiating this class creates a snapshot of StackDepot which can be
+// efficiently queried with StackDepotGet(). You can use it concurrently with
+// StackDepot, but the snapshot is only guaranteed to contain those stack traces
+// which were stored before it was instantiated.
+class StackDepotReverseMap {
+ public:
+ StackDepotReverseMap();
+ StackTrace Get(u32 id);
+
+ private:
+ struct IdDescPair {
+ u32 id;
+ StackDepotNode *desc;
+
+ static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
+ };
+
+ InternalMmapVector<IdDescPair> map_;
+
+ // Disallow evil constructors.
+ StackDepotReverseMap(const StackDepotReverseMap&);
+ void operator=(const StackDepotReverseMap&);
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOT_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_stackdepotbase.h b/lib/tsan/sanitizer_common/sanitizer_stackdepotbase.h
new file mode 100644
index 0000000000..ef1b4f7f70
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stackdepotbase.h
@@ -0,0 +1,177 @@
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = nullptr);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
+ args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return nullptr;
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
+ atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
+ atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!Node::is_valid(args)) return handle_type();
+ uptr h = Node::hash(args);
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ lock(&tab[i]);
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ uptr s = atomic_load(p, memory_order_relaxed);
+ unlock(p, (Node *)(s & ~1UL));
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOTBASE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_stacktrace.h b/lib/tsan/sanitizer_common/sanitizer_stacktrace.h
new file mode 100644
index 0000000000..f1f29e9f32
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stacktrace.h
@@ -0,0 +1,176 @@
+//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_H
+#define SANITIZER_STACKTRACE_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+struct BufferedStackTrace;
+
+static const u32 kStackTraceMax = 256;
+
+#if SANITIZER_LINUX && defined(__mips__)
+# define SANITIZER_CAN_FAST_UNWIND 0
+#elif SANITIZER_WINDOWS
+# define SANITIZER_CAN_FAST_UNWIND 0
+#elif SANITIZER_OPENBSD
+# define SANITIZER_CAN_FAST_UNWIND 0
+#else
+# define SANITIZER_CAN_FAST_UNWIND 1
+#endif
+
+// Fast unwind is the only option on Mac for now; we will need to
+// revisit this macro when slow unwind works on Mac, see
+// https://github.com/google/sanitizers/issues/137
+#if SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_RTEMS
+# define SANITIZER_CAN_SLOW_UNWIND 0
+#else
+# define SANITIZER_CAN_SLOW_UNWIND 1
+#endif
+
+struct StackTrace {
+ const uptr *trace;
+ u32 size;
+ u32 tag;
+
+ static const int TAG_UNKNOWN = 0;
+ static const int TAG_ALLOC = 1;
+ static const int TAG_DEALLOC = 2;
+ static const int TAG_CUSTOM = 100; // Tool specific tags start here.
+
+ StackTrace() : trace(nullptr), size(0), tag(0) {}
+ StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
+ StackTrace(const uptr *trace, u32 size, u32 tag)
+ : trace(trace), size(size), tag(tag) {}
+
+ // Prints a symbolized stacktrace, followed by an empty line.
+ void Print() const;
+
+ static bool WillUseFastUnwind(bool request_fast_unwind) {
+ if (!SANITIZER_CAN_FAST_UNWIND)
+ return false;
+ if (!SANITIZER_CAN_SLOW_UNWIND)
+ return true;
+ return request_fast_unwind;
+ }
+
+ static uptr GetCurrentPc();
+ static inline uptr GetPreviousInstructionPc(uptr pc);
+ static uptr GetNextInstructionPc(uptr pc);
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+};
+
+// Performance-critical, must be in the header.
+ALWAYS_INLINE
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#if defined(__arm__)
+ // T32 (Thumb) branch instructions might be 16 or 32 bit long,
+ // so we return (pc-2) in that case in order to be safe.
+ // For A32 mode we return (pc-4) because all instructions are 32 bit long.
+ return (pc - 3) & (~1);
+#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__) || defined(__mips__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
+// StackTrace that owns the buffer used to store the addresses.
+struct BufferedStackTrace : public StackTrace {
+ uptr trace_buffer[kStackTraceMax];
+ uptr top_frame_bp; // Optional bp of a top frame.
+
+ BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
+
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ // Get the stack trace with the given pc and bp.
+ // The pc will be in the position 0 of the resulting stack trace.
+ // The bp may refer to the current frame or to the caller's frame.
+ void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
+ u32 max_depth = kStackTraceMax) {
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Small max_depth optimization
+ if (max_depth <= 1) {
+ if (max_depth == 1)
+ trace_buffer[0] = pc;
+ size = max_depth;
+ return;
+ }
+ UnwindImpl(pc, bp, context, request_fast, max_depth);
+ }
+
+ void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+ uptr stack_bottom, bool request_fast_unwind);
+
+ void Reset() {
+ *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
+ top_frame_bp = 0;
+ }
+
+ private:
+ // Every runtime defines its own implementation of this method
+ void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
+ u32 max_depth);
+
+ // UnwindFast/Slow have platform-specific implementations
+ void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
+ u32 max_depth);
+ void UnwindSlow(uptr pc, u32 max_depth);
+ void UnwindSlow(uptr pc, void *context, u32 max_depth);
+
+ void PopStackFrames(uptr count);
+ uptr LocatePcInTrace(uptr pc);
+
+ BufferedStackTrace(const BufferedStackTrace &) = delete;
+ void operator=(const BufferedStackTrace &) = delete;
+
+ friend class FastUnwindTest;
+};
+
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+}
+
+} // namespace __sanitizer
+
+// Use this macro if you want to print stack trace with the caller
+// of the current function in the top frame.
+#define GET_CALLER_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC();
+
+#define GET_CALLER_PC_BP_SP \
+ GET_CALLER_PC_BP; \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+// Use this macro if you want to print stack trace with the current
+// function in the top frame.
+#define GET_CURRENT_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = StackTrace::GetCurrentPc()
+
+#define GET_CURRENT_PC_BP_SP \
+ GET_CURRENT_PC_BP; \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+
+#endif // SANITIZER_STACKTRACE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.h b/lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.h
new file mode 100644
index 0000000000..f7f7629f77
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_PRINTER_H
+#define SANITIZER_STACKTRACE_PRINTER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// Render the contents of "info" structure, which represents the contents of
+// stack frame "frame_no" and appends it to the "buffer". "format" is a
+// string with placeholders, which is copied to the output with
+// placeholders substituted with the contents of "info". For example,
+// format string
+// " frame %n: function %F at %S"
+// will be turned into
+// " frame 10: function foo::bar() at my/file.cc:10"
+// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+// source files and modules, and "strip_func_prefix" to strip prefixes of
+// function names.
+// Here's the full list of available placeholders:
+// %% - represents a '%' character;
+// %n - frame number (copy of frame_no);
+// %p - PC in hex format;
+// %m - path to module (binary or shared object);
+// %o - offset in the module in hex format;
+// %f - function name;
+// %q - offset in the function in hex format (*if available*);
+// %s - path to source file;
+// %l - line in the source file;
+// %c - column in the source file;
+// %F - if function is known to be <foo>, prints "in <foo>", possibly
+// followed by the offset in this function, but only if source file
+// is unknown;
+// %S - prints file/line/column information;
+// %L - prints location information: file/line/column, if it is known, or
+// module+offset if it is known, or (<unknown module>) string.
+// %M - prints module basename and offset, if it is known, or PC.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix = "",
+ const char *strip_func_prefix = "");
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix);
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix);
+
+// Same as RenderFrame, but for data section (global variables).
+// Accepts %s, %l from above.
+// Also accepts:
+// %g - name of the global variable.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix = "");
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_stoptheworld.h b/lib/tsan/sanitizer_common/sanitizer_stoptheworld.h
new file mode 100644
index 0000000000..4e42400571
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stoptheworld.h
@@ -0,0 +1,64 @@
+//===-- sanitizer_stoptheworld.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the StopTheWorld function which suspends the execution of the current
+// process and runs the user-supplied callback in the same address space.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STOPTHEWORLD_H
+#define SANITIZER_STOPTHEWORLD_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+enum PtraceRegistersStatus {
+ REGISTERS_UNAVAILABLE_FATAL = -1,
+ REGISTERS_UNAVAILABLE = 0,
+ REGISTERS_AVAILABLE = 1
+};
+
+// Holds the list of suspended threads and provides an interface to dump their
+// register contexts.
+class SuspendedThreadsList {
+ public:
+ SuspendedThreadsList() = default;
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
+ virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const {
+ UNIMPLEMENTED();
+ }
+
+ // The buffer in GetRegistersAndSP should be at least this big.
+ virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
+ virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
+ virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
+
+ private:
+ // Prohibit copy and assign.
+ SuspendedThreadsList(const SuspendedThreadsList&);
+ void operator=(const SuspendedThreadsList&);
+};
+
+typedef void (*StopTheWorldCallback)(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument);
+
+// Suspend all threads in the current process and run the callback on the list
+// of suspended threads. This function will resume the threads before returning.
+// The callback should not call any libc functions. The callback must not call
+// exit() nor _exit() and instead return to the caller.
+// This function should NOT be called from multiple threads simultaneously.
+void StopTheWorld(StopTheWorldCallback callback, void *argument);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STOPTHEWORLD_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp b/lib/tsan/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
new file mode 100644
index 0000000000..3a246443ed
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
@@ -0,0 +1,42 @@
+//===-- sanitizer_stoptheworld_fuchsia.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FUCHSIA
+
+#include <zircon/sanitizer.h>
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+
+// The Fuchsia implementation stops the world but doesn't offer a real
+// SuspendedThreadsList argument. This is enough for ASan's use case,
+// and LSan does not use this API on Fuchsia.
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct Params {
+ StopTheWorldCallback callback;
+ void *argument;
+ } params = {callback, argument};
+ __sanitizer_memory_snapshot(
+ nullptr, nullptr, nullptr, nullptr,
+ [](zx_status_t, void *data) {
+ auto params = reinterpret_cast<Params *>(data);
+ params->callback({}, params->argument);
+ },
+ &params);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
diff --git a/lib/tsan/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/lib/tsan/sanitizer_common/sanitizer_stoptheworld_mac.cpp
new file mode 100644
index 0000000000..6c577426ad
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -0,0 +1,182 @@
+//===-- sanitizer_stoptheworld_mac.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+ defined(__i386))
+
+#include <mach/mach.h>
+#include <mach/thread_info.h>
+#include <pthread.h>
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+typedef struct {
+ tid_t tid;
+ thread_t thread;
+} SuspendedThreadInfo;
+
+class SuspendedThreadsListMac : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListMac() : threads_(1024) {}
+
+ tid_t GetThreadID(uptr index) const;
+ thread_t GetThread(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsThread(thread_t thread) const;
+ void Append(thread_t thread);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<SuspendedThreadInfo> threads_;
+};
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+void *RunThread(void *arg) {
+ struct RunThreadArgs *run_args = (struct RunThreadArgs *)arg;
+ SuspendedThreadsListMac suspended_threads_list;
+
+ thread_array_t threads;
+ mach_msg_type_number_t num_threads;
+ kern_return_t err = task_threads(mach_task_self(), &threads, &num_threads);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Failed to get threads for task (errno %d).\n", err);
+ return nullptr;
+ }
+
+ thread_t thread_self = mach_thread_self();
+ for (unsigned int i = 0; i < num_threads; ++i) {
+ if (threads[i] == thread_self) continue;
+
+ thread_suspend(threads[i]);
+ suspended_threads_list.Append(threads[i]);
+ }
+
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ uptr num_suspended = suspended_threads_list.ThreadCount();
+ for (unsigned int i = 0; i < num_suspended; ++i) {
+ thread_resume(suspended_threads_list.GetThread(i));
+ }
+ return nullptr;
+}
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ pthread_t run_thread = (pthread_t)internal_start_thread(RunThread, &arg);
+ internal_join_thread(run_thread);
+}
+
+#if defined(__x86_64__)
+typedef x86_thread_state64_t regs_struct;
+
+#define SP_REG __rsp
+
+#elif defined(__aarch64__)
+typedef arm_thread_state64_t regs_struct;
+
+# if __DARWIN_UNIX03
+# define SP_REG __sp
+# else
+# define SP_REG sp
+# endif
+
+#elif defined(__i386)
+typedef x86_thread_state32_t regs_struct;
+
+#define SP_REG __esp
+
+#else
+#error "Unsupported architecture"
+#endif
+
+tid_t SuspendedThreadsListMac::GetThreadID(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].tid;
+}
+
+thread_t SuspendedThreadsListMac::GetThread(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].thread;
+}
+
+uptr SuspendedThreadsListMac::ThreadCount() const {
+ return threads_.size();
+}
+
+bool SuspendedThreadsListMac::ContainsThread(thread_t thread) const {
+ for (uptr i = 0; i < threads_.size(); i++) {
+ if (threads_[i].thread == thread) return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListMac::Append(thread_t thread) {
+ thread_identifier_info_data_t info;
+ mach_msg_type_number_t info_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kern_return_t err = thread_info(thread, THREAD_IDENTIFIER_INFO,
+ (thread_info_t)&info, &info_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get thread ident for a thread\n");
+ return;
+ }
+ threads_.push_back({info.thread_id, thread});
+}
+
+PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
+ thread_t thread = GetThread(index);
+ regs_struct regs;
+ int err;
+ mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
+ err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,
+ &reg_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get registers for a thread\n");
+ // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
+ // or the thread does not exist. The other possible error case,
+ // MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
+ // still safe to proceed.
+ return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
+ }
+
+ internal_memcpy(buffer, &regs, sizeof(regs));
+#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
+ *sp = arm_thread_state64_get_sp(regs);
+#else
+ *sp = regs.SP_REG;
+#endif
+
+ // On x86_64 and aarch64, we must account for the stack redzone, which is 128
+ // bytes.
+ if (SANITIZER_WORDSIZE == 64) *sp -= 128;
+
+ return REGISTERS_AVAILABLE;
+}
+
+uptr SuspendedThreadsListMac::RegisterCount() const {
+ return MACHINE_THREAD_STATE_COUNT;
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+ // defined(__i386))
diff --git a/lib/tsan/sanitizer_common/sanitizer_suppressions.cpp b/lib/tsan/sanitizer_common/sanitizer_suppressions.cpp
new file mode 100644
index 0000000000..44c83a66c5
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_suppressions.cpp
@@ -0,0 +1,181 @@
+//===-- sanitizer_suppressions.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Suppression parsing/matching code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_suppressions.h"
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_file.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+SuppressionContext::SuppressionContext(const char *suppression_types[],
+ int suppression_types_num)
+ : suppression_types_(suppression_types),
+ suppression_types_num_(suppression_types_num),
+ can_parse_(true) {
+ CHECK_LE(suppression_types_num_, kMaxSuppressionTypes);
+ internal_memset(has_suppression_type_, 0, suppression_types_num_);
+}
+
+#if !SANITIZER_FUCHSIA
+static bool GetPathAssumingFileIsRelativeToExec(const char *file_path,
+ /*out*/char *new_file_path,
+ uptr new_file_path_size) {
+ InternalScopedString exec(kMaxPathLength);
+ if (ReadBinaryNameCached(exec.data(), exec.size())) {
+ const char *file_name_pos = StripModuleName(exec.data());
+ uptr path_to_exec_len = file_name_pos - exec.data();
+ internal_strncat(new_file_path, exec.data(),
+ Min(path_to_exec_len, new_file_path_size - 1));
+ internal_strncat(new_file_path, file_path,
+ new_file_path_size - internal_strlen(new_file_path) - 1);
+ return true;
+ }
+ return false;
+}
+
+static const char *FindFile(const char *file_path,
+ /*out*/char *new_file_path,
+ uptr new_file_path_size) {
+ // If we cannot find the file, check if its location is relative to
+ // the location of the executable.
+ if (!FileExists(file_path) && !IsAbsolutePath(file_path) &&
+ GetPathAssumingFileIsRelativeToExec(file_path, new_file_path,
+ new_file_path_size)) {
+ return new_file_path;
+ }
+ return file_path;
+}
+#else
+static const char *FindFile(const char *file_path, char *, uptr) {
+ return file_path;
+}
+#endif
+
+void SuppressionContext::ParseFromFile(const char *filename) {
+ if (filename[0] == '\0')
+ return;
+
+ InternalScopedString new_file_path(kMaxPathLength);
+ filename = FindFile(filename, new_file_path.data(), new_file_path.size());
+
+ // Read the file.
+ VPrintf(1, "%s: reading suppressions file at %s\n",
+ SanitizerToolName, filename);
+ char *file_contents;
+ uptr buffer_size;
+ uptr contents_size;
+ if (!ReadFileToBuffer(filename, &file_contents, &buffer_size,
+ &contents_size)) {
+ Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
+ filename);
+ Die();
+ }
+
+ Parse(file_contents);
+}
+
+bool SuppressionContext::Match(const char *str, const char *type,
+ Suppression **s) {
+ can_parse_ = false;
+ if (!HasSuppressionType(type))
+ return false;
+ for (uptr i = 0; i < suppressions_.size(); i++) {
+ Suppression &cur = suppressions_[i];
+ if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) {
+ *s = &cur;
+ return true;
+ }
+ }
+ return false;
+}
+
+static const char *StripPrefix(const char *str, const char *prefix) {
+ while (*str && *str == *prefix) {
+ str++;
+ prefix++;
+ }
+ if (!*prefix)
+ return str;
+ return 0;
+}
+
+void SuppressionContext::Parse(const char *str) {
+ // Context must not mutate once Match has been called.
+ CHECK(can_parse_);
+ const char *line = str;
+ while (line) {
+ while (line[0] == ' ' || line[0] == '\t')
+ line++;
+ const char *end = internal_strchr(line, '\n');
+ if (end == 0)
+ end = line + internal_strlen(line);
+ if (line != end && line[0] != '#') {
+ const char *end2 = end;
+ while (line != end2 &&
+ (end2[-1] == ' ' || end2[-1] == '\t' || end2[-1] == '\r'))
+ end2--;
+ int type;
+ for (type = 0; type < suppression_types_num_; type++) {
+ const char *next_char = StripPrefix(line, suppression_types_[type]);
+ if (next_char && *next_char == ':') {
+ line = ++next_char;
+ break;
+ }
+ }
+ if (type == suppression_types_num_) {
+ Printf("%s: failed to parse suppressions\n", SanitizerToolName);
+ Die();
+ }
+ Suppression s;
+ s.type = suppression_types_[type];
+ s.templ = (char*)InternalAlloc(end2 - line + 1);
+ internal_memcpy(s.templ, line, end2 - line);
+ s.templ[end2 - line] = 0;
+ suppressions_.push_back(s);
+ has_suppression_type_[type] = true;
+ }
+ if (end[0] == 0)
+ break;
+ line = end + 1;
+ }
+}
+
+uptr SuppressionContext::SuppressionCount() const {
+ return suppressions_.size();
+}
+
+bool SuppressionContext::HasSuppressionType(const char *type) const {
+ for (int i = 0; i < suppression_types_num_; i++) {
+ if (0 == internal_strcmp(type, suppression_types_[i]))
+ return has_suppression_type_[i];
+ }
+ return false;
+}
+
+const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
+ CHECK_LT(i, suppressions_.size());
+ return &suppressions_[i];
+}
+
+void SuppressionContext::GetMatched(
+ InternalMmapVector<Suppression *> *matched) {
+ for (uptr i = 0; i < suppressions_.size(); i++)
+ if (atomic_load_relaxed(&suppressions_[i].hit_count))
+ matched->push_back(&suppressions_[i]);
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_suppressions.h b/lib/tsan/sanitizer_common/sanitizer_suppressions.h
new file mode 100644
index 0000000000..2d88b1f72f
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_suppressions.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_suppressions.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Suppression parsing/matching code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SUPPRESSIONS_H
+#define SANITIZER_SUPPRESSIONS_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+struct Suppression {
+ Suppression() { internal_memset(this, 0, sizeof(*this)); }
+ const char *type;
+ char *templ;
+ atomic_uint32_t hit_count;
+ uptr weight;
+};
+
+class SuppressionContext {
+ public:
+ // Create new SuppressionContext capable of parsing given suppression types.
+ SuppressionContext(const char *supprression_types[],
+ int suppression_types_num);
+
+ void ParseFromFile(const char *filename);
+ void Parse(const char *str);
+
+ bool Match(const char *str, const char *type, Suppression **s);
+ uptr SuppressionCount() const;
+ bool HasSuppressionType(const char *type) const;
+ const Suppression *SuppressionAt(uptr i) const;
+ void GetMatched(InternalMmapVector<Suppression *> *matched);
+
+ private:
+ static const int kMaxSuppressionTypes = 64;
+ const char **const suppression_types_;
+ const int suppression_types_num_;
+
+ InternalMmapVector<Suppression> suppressions_;
+ bool has_suppression_type_[kMaxSuppressionTypes];
+ bool can_parse_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SUPPRESSIONS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer.h
new file mode 100644
index 0000000000..2476b0ea7b
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer.h
@@ -0,0 +1,223 @@
+//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbolizer is used by sanitizers to map instruction address to a location in
+// source code at run-time. Symbolizer either uses __sanitizer_symbolize_*
+// defined in the program, or (if they are missing) tries to find and
+// launch "llvm-symbolizer" commandline tool in a separate process and
+// communicate with it.
+//
+// Generally we should try to avoid calling system library functions during
+// symbolization (and use their replacements from sanitizer_libc.h instead).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_H
+#define SANITIZER_SYMBOLIZER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_vector.h"
+
+namespace __sanitizer {
+
+struct AddressInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
+ uptr address;
+
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ static const uptr kUnknown = ~(uptr)0;
+ char *function;
+ uptr function_offset;
+
+ char *file;
+ int line;
+ int column;
+
+ AddressInfo();
+ // Deletes all strings and resets all fields.
+ void Clear();
+ void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
+};
+
+// Linked list of symbolized frames (each frame is described by AddressInfo).
+struct SymbolizedStack {
+ SymbolizedStack *next;
+ AddressInfo info;
+ static SymbolizedStack *New(uptr addr);
+ // Deletes current, and all subsequent frames in the linked list.
+ // The object cannot be accessed after the call to this function.
+ void ClearAll();
+
+ private:
+ SymbolizedStack();
+};
+
+// For now, DataInfo is used to describe global variable.
+struct DataInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ char *file;
+ uptr line;
+ char *name;
+ uptr start;
+ uptr size;
+
+ DataInfo();
+ void Clear();
+};
+
+struct LocalInfo {
+ char *function_name = nullptr;
+ char *name = nullptr;
+ char *decl_file = nullptr;
+ unsigned decl_line = 0;
+
+ bool has_frame_offset = false;
+ bool has_size = false;
+ bool has_tag_offset = false;
+
+ sptr frame_offset;
+ uptr size;
+ uptr tag_offset;
+
+ void Clear();
+};
+
+struct FrameInfo {
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ InternalMmapVector<LocalInfo> locals;
+ void Clear();
+};
+
+class SymbolizerTool;
+
+class Symbolizer final {
+ public:
+ /// Initialize and return platform-specific implementation of symbolizer
+ /// (if it wasn't already initialized).
+ static Symbolizer *GetOrInit();
+ static void LateInitialize();
+ // Returns a list of symbolized frames for a given address (containing
+ // all inlined functions, if necessary).
+ SymbolizedStack *SymbolizePC(uptr address);
+ bool SymbolizeData(uptr address, DataInfo *info);
+ bool SymbolizeFrame(uptr address, FrameInfo *info);
+
+ // The module names Symbolizer returns are stable and unique for every given
+ // module. It is safe to store and compare them as pointers.
+ bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address);
+ const char *GetModuleNameForPc(uptr pc) {
+ const char *module_name = nullptr;
+ uptr unused;
+ if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused))
+ return module_name;
+ return nullptr;
+ }
+
+ // Release internal caches (if any).
+ void Flush();
+ // Attempts to demangle the provided C++ mangled name.
+ const char *Demangle(const char *name);
+
+ // Allow user to install hooks that would be called before/after Symbolizer
+ // does the actual file/line info fetching. Specific sanitizers may need this
+ // to distinguish system library calls made in user code from calls made
+ // during in-process symbolization.
+ typedef void (*StartSymbolizationHook)();
+ typedef void (*EndSymbolizationHook)();
+ // May be called at most once.
+ void AddHooks(StartSymbolizationHook start_hook,
+ EndSymbolizationHook end_hook);
+
+ void RefreshModules();
+ const LoadedModule *FindModuleForAddress(uptr address);
+
+ void InvalidateModuleList();
+
+ private:
+ // GetModuleNameAndOffsetForPC has to return a string to the caller.
+ // Since the corresponding module might get unloaded later, we should create
+ // our owned copies of the strings that we can safely return.
+ // ModuleNameOwner does not provide any synchronization, thus calls to
+ // its method should be protected by |mu_|.
+ class ModuleNameOwner {
+ public:
+ explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+ : last_match_(nullptr), mu_(synchronized_by) {
+ storage_.reserve(kInitialCapacity);
+ }
+ const char *GetOwnedCopy(const char *str);
+
+ private:
+ static const uptr kInitialCapacity = 1000;
+ InternalMmapVector<const char*> storage_;
+ const char *last_match_;
+
+ BlockingMutex *mu_;
+ } module_names_;
+
+ /// Platform-specific function for creating a Symbolizer object.
+ static Symbolizer *PlatformInit();
+
+ bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
+ uptr *module_offset,
+ ModuleArch *module_arch);
+ ListOfModules modules_;
+ ListOfModules fallback_modules_;
+ // If stale, need to reload the modules before looking up addresses.
+ bool modules_fresh_;
+
+ // Platform-specific default demangler, must not return nullptr.
+ const char *PlatformDemangle(const char *name);
+
+ static Symbolizer *symbolizer_;
+ static StaticSpinMutex init_mu_;
+
+ // Mutex locked from public methods of |Symbolizer|, so that the internals
+ // (including individual symbolizer tools and platform-specific methods) are
+ // always synchronized.
+ BlockingMutex mu_;
+
+ IntrusiveList<SymbolizerTool> tools_;
+
+ explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);
+
+ static LowLevelAllocator symbolizer_allocator_;
+
+ StartSymbolizationHook start_hook_;
+ EndSymbolizationHook end_hook_;
+ class SymbolizerScope {
+ public:
+ explicit SymbolizerScope(const Symbolizer *sym);
+ ~SymbolizerScope();
+ private:
+ const Symbolizer *sym_;
+ };
+
+ // Calls `LateInitialize()` on all items in `tools_`.
+ void LateInitializeTools();
+};
+
+#ifdef SANITIZER_WINDOWS
+void InitializeDbgHelpIfNeeded();
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer_fuchsia.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer_fuchsia.h
new file mode 100644
index 0000000000..c4061e38c6
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer_fuchsia.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Define Fuchsia's string formats and limits for the markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H
+#define SANITIZER_SYMBOLIZER_FUCHSIA_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// See the spec at:
+// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
+constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
+
+// Function name or equivalent from PC location.
+constexpr const char *kFormatFunction = "{{{pc:%p}}}";
+constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
+
+// Global variable name or equivalent from data memory address.
+constexpr const char *kFormatData = "{{{data:%p}}}";
+
+// One frame in a backtrace (printed on a line by itself).
+constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
+
+// Dump trigger element.
+#define FORMAT_DUMPFILE "{{{dumpfile:%s:%s}}}"
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer_internal.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer_internal.h
new file mode 100644
index 0000000000..e4c351e667
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -0,0 +1,165 @@
+//===-- sanitizer_symbolizer_internal.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Header for internal classes and functions to be used by implementations of
+// symbolizers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
+#define SANITIZER_SYMBOLIZER_INTERNAL_H
+
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_file.h"
+#include "sanitizer_vector.h"
+
+namespace __sanitizer {
+
+// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
+// is extracted. When extracting a string, a newly allocated (using
+// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// to the next characted after the found delimiter.
+const char *ExtractToken(const char *str, const char *delims, char **result);
+const char *ExtractInt(const char *str, const char *delims, int *result);
+const char *ExtractUptr(const char *str, const char *delims, uptr *result);
+const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
+ char **result);
+
+const char *DemangleSwiftAndCXX(const char *name);
+
+// SymbolizerTool is an interface that is implemented by individual "tools"
+// that can perform symbolication (external llvm-symbolizer, libbacktrace,
+// Windows DbgHelp symbolizer, etc.).
+class SymbolizerTool {
+ public:
+ // The main |Symbolizer| class implements a "fallback chain" of symbolizer
+ // tools. In a request to symbolize an address, if one tool returns false,
+ // the next tool in the chain will be tried.
+ SymbolizerTool *next;
+
+ SymbolizerTool() : next(nullptr) { }
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+
+ // The |stack| parameter is inout. It is pre-filled with the address,
+ // module base and module offset values and is to be used to construct
+ // other stack frames.
+ virtual bool SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ UNIMPLEMENTED();
+ }
+
+ // The |info| parameter is inout. It is pre-filled with the module base
+ // and module offset values.
+ virtual bool SymbolizeData(uptr addr, DataInfo *info) {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool SymbolizeFrame(uptr addr, FrameInfo *info) {
+ return false;
+ }
+
+ virtual void Flush() {}
+
+ // Return nullptr to fallback to the default platform-specific demangler.
+ virtual const char *Demangle(const char *name) {
+ return nullptr;
+ }
+
+ // Called during the LateInitialize phase of Sanitizer initialization.
+ // Usually this is a safe place to call code that might need to use user
+ // memory allocators.
+ virtual void LateInitialize() {}
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess {
+ public:
+ explicit SymbolizerProcess(const char *path, bool use_posix_spawn = false);
+ const char *SendCommand(const char *command);
+
+ protected:
+ /// The maximum number of arguments required to invoke a tool process.
+ static const unsigned kArgVMax = 6;
+
+ // Customizable by subclasses.
+ virtual bool StartSymbolizerSubprocess();
+ virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);
+ // Return the environment to run the symbolizer in.
+ virtual char **GetEnvP() { return GetEnviron(); }
+
+ private:
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ /// Fill in an argv array to invoke the child process.
+ virtual void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const {
+ UNIMPLEMENTED();
+ }
+
+ bool Restart();
+ const char *SendCommandImpl(const char *command);
+ bool WriteToSymbolizer(const char *buffer, uptr length);
+
+ const char *path_;
+ fd_t input_fd_;
+ fd_t output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
+ uptr times_restarted_;
+ bool failed_to_start_;
+ bool reported_invalid_path_;
+ bool use_posix_spawn_;
+};
+
+class LLVMSymbolizerProcess;
+
+// This tool invokes llvm-symbolizer in a subprocess. It should be as portable
+// as the llvm-symbolizer tool is.
+class LLVMSymbolizer : public SymbolizerTool {
+ public:
+ explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+ bool SymbolizeFrame(uptr addr, FrameInfo *info) override;
+
+ private:
+ const char *FormatAndSendCommand(const char *command_prefix,
+ const char *module_name, uptr module_offset,
+ ModuleArch arch);
+
+ LLVMSymbolizerProcess *symbolizer_process_;
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format. Returns true if any useful debug
+// information was found.
+void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
+
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_INTERNAL_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
new file mode 100644
index 0000000000..e2a0f71420
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -0,0 +1,49 @@
+//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Header for libbacktrace symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+
+#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_symbolizer_internal.h"
+
+#ifndef SANITIZER_LIBBACKTRACE
+# define SANITIZER_LIBBACKTRACE 0
+#endif
+
+#ifndef SANITIZER_CP_DEMANGLE
+# define SANITIZER_CP_DEMANGLE 0
+#endif
+
+namespace __sanitizer {
+
+class LibbacktraceSymbolizer : public SymbolizerTool {
+ public:
+ static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ // May return NULL if demangling failed.
+ const char *Demangle(const char *name) override;
+
+ private:
+ explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
+
+ void *state_; // Leaked.
+};
+
+} // namespace __sanitizer
+#endif // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.h
new file mode 100644
index 0000000000..8996131fc1
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -0,0 +1,48 @@
+//===-- sanitizer_symbolizer_mac.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Header for Mac-specific "atos" symbolizer.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_MAC_H
+#define SANITIZER_SYMBOLIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+class DlAddrSymbolizer : public SymbolizerTool {
+ public:
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+};
+
+class AtosSymbolizerProcess;
+
+class AtosSymbolizer : public SymbolizerTool {
+ public:
+ explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+ void LateInitialize() override;
+
+ private:
+ AtosSymbolizerProcess *process_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+
+#endif // SANITIZER_SYMBOLIZER_MAC_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_symbolizer_rtems.h b/lib/tsan/sanitizer_common/sanitizer_symbolizer_rtems.h
new file mode 100644
index 0000000000..3371092e06
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_symbolizer_rtems.h
@@ -0,0 +1,40 @@
+//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Define RTEMS's string formats and limits for the markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_RTEMS_H
+#define SANITIZER_SYMBOLIZER_RTEMS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// The Myriad RTEMS symbolizer currently only parses backtrace lines,
+// so use a format that the symbolizer understands. For other
+// markups, keep them the same as the Fuchsia's.
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
+constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
+
+// Function name or equivalent from PC location.
+constexpr const char *kFormatFunction = "{{{pc:%p}}}";
+constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
+
+// Global variable name or equivalent from data memory address.
+constexpr const char *kFormatData = "{{{data:%p}}}";
+
+// One frame in a backtrace (printed on a line by itself).
+constexpr const char *kFormatFrame = " [%u] IP: %p";
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_RTEMS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_syscall_generic.inc b/lib/tsan/sanitizer_common/sanitizer_syscall_generic.inc
new file mode 100644
index 0000000000..a43ce3efab
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_syscall_generic.inc
@@ -0,0 +1,38 @@
+//===-- sanitizer_syscall_generic.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementations of internal_syscall* and internal_iserror.
+//
+//===----------------------------------------------------------------------===//
+
+// NetBSD uses libc calls directly
+#if !SANITIZER_NETBSD
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_SOLARIS
+# define SYSCALL(name) SYS_ ## name
+#else
+# define SYSCALL(name) __NR_ ## name
+#endif
+
+#if defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)
+# define internal_syscall __syscall
+# else
+# define internal_syscall syscall
+#endif
+
+#endif
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval == (uptr)-1) {
+ if (rverrno)
+ *rverrno = errno;
+ return true;
+ } else {
+ return false;
+ }
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_syscall_linux_aarch64.inc b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
new file mode 100644
index 0000000000..56c5e99220
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
@@ -0,0 +1,137 @@
+//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/aarch64.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0");
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall0(n) \
+ (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall1(n, a1) \
+ (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5, long arg6) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ register u64 x5 asm("x5") = arg6;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid cobbler errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_syscall_linux_arm.inc b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_arm.inc
new file mode 100644
index 0000000000..121a9445b4
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_arm.inc
@@ -0,0 +1,137 @@
+//===-- sanitizer_syscall_linux_arm.inc -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/arm.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr __internal_syscall(u32 nr) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0");
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall0(n) \
+ (__internal_syscall)(n)
+
+static uptr __internal_syscall(u32 nr, u32 arg1) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall1(n, a1) \
+ (__internal_syscall)(n, (u32)(a1))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4, long arg5) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ register u32 r4 asm("r4") = arg5;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u32)(a5))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4, long arg5, long arg6) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ register u32 r4 asm("r4") = arg5;
+ register u32 r5 asm("r5") = arg6;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u32)(a5), (long)(a6))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid cobbler errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
new file mode 100644
index 0000000000..67e8686d12
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
@@ -0,0 +1,90 @@
+//===-- sanitizer_syscall_linux_x86_64.inc ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr internal_syscall(u64 nr) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
+ "memory", "cc");
+ return retval;
+}
+
+template <typename T1>
+static uptr internal_syscall(u64 nr, T1 arg1) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1) :
+ "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2) : "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4) :
+ "rcx", "r11", "r10", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5) :
+ "rcx", "r11", "r10", "r8", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5, T6 arg6) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "mov %7, %%r9;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5),
+ "r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9",
+ "memory", "cc");
+ return retval;
+}
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/lib/tsan/sanitizer_common/sanitizer_syscalls_netbsd.inc b/lib/tsan/sanitizer_common/sanitizer_syscalls_netbsd.inc
new file mode 100644
index 0000000000..02b7e11b16
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -0,0 +1,3837 @@
+//===-- sanitizer_syscalls_netbsd.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common syscalls handlers for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_SYSCALL_PRE_READ_RANGE
+// Called in prehook for regions that will be read by the kernel and
+// must be initialized.
+// COMMON_SYSCALL_PRE_WRITE_RANGE
+// Called in prehook for regions that will be written to by the kernel
+// and must be addressable. The actual write range may be smaller than
+// reported in the prehook. See POST_WRITE_RANGE.
+// COMMON_SYSCALL_POST_READ_RANGE
+// Called in posthook for regions that were read by the kernel. Does
+// not make much sense.
+// COMMON_SYSCALL_POST_WRITE_RANGE
+// Called in posthook for regions that were written to by the kernel
+// and are now initialized.
+// COMMON_SYSCALL_ACQUIRE(addr)
+// Acquire memory visibility from addr.
+// COMMON_SYSCALL_RELEASE(addr)
+// Release memory visibility to addr.
+// COMMON_SYSCALL_FD_CLOSE(fd)
+// Called before closing file descriptor fd.
+// COMMON_SYSCALL_FD_ACQUIRE(fd)
+// Acquire memory visibility from fd.
+// COMMON_SYSCALL_FD_RELEASE(fd)
+// Release memory visibility to fd.
+// COMMON_SYSCALL_PRE_FORK()
+// Called before fork syscall.
+// COMMON_SYSCALL_POST_FORK(long long res)
+// Called after fork syscall.
+//
+// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
+//
+// Generated with: generate_netbsd_syscalls.awk
+// Generated date: 2019-12-24
+// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_NETBSD
+
+#include "sanitizer_libc.h"
+
+#define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+
+#define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+
+#ifndef COMMON_SYSCALL_ACQUIRE
+#define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_RELEASE
+#define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_CLOSE
+#define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_ACQUIRE
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_RELEASE
+#define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_PRE_FORK
+#define COMMON_SYSCALL_PRE_FORK() \
+ {}
+#endif
+
+#ifndef COMMON_SYSCALL_POST_FORK
+#define COMMON_SYSCALL_POST_FORK(res) \
+ {}
+#endif
+
+// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
+
+extern "C" {
+#define SYS_MAXSYSARGS 8
+PRE_SYSCALL(syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+POST_SYSCALL(syscall)
+(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(exit)(long long rval_) { /* Nothing to do */ }
+POST_SYSCALL(exit)(long long res, long long rval_) { /* Nothing to do */ }
+PRE_SYSCALL(fork)(void) { COMMON_SYSCALL_PRE_FORK(); }
+POST_SYSCALL(fork)(long long res) { COMMON_SYSCALL_POST_FORK(res); }
+PRE_SYSCALL(read)(long long fd_, void *buf_, long long nbyte_) {
+ if (buf_) {
+ PRE_WRITE(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(read)(long long res, long long fd_, void *buf_, long long nbyte_) {
+ if (res > 0) {
+ POST_WRITE(buf_, res);
+ }
+}
+PRE_SYSCALL(write)(long long fd_, void *buf_, long long nbyte_) {
+ if (buf_) {
+ PRE_READ(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(write)
+(long long res, long long fd_, void *buf_, long long nbyte_) {
+ if (res > 0) {
+ POST_READ(buf_, res);
+ }
+}
+PRE_SYSCALL(open)(void *path_, long long flags_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(open)
+(long long res, void *path_, long long flags_, long long mode_) {
+ if (res > 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(close)(long long fd_) { COMMON_SYSCALL_FD_CLOSE((int)fd_); }
+POST_SYSCALL(close)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_wait4)
+(long long pid_, void *status_, long long options_, void *rusage_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_wait4)
+(long long res, long long pid_, void *status_, long long options_,
+ void *rusage_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ocreat)(void *path_, long long mode_) { /* TODO */ }
+POST_SYSCALL(compat_43_ocreat)(long long res, void *path_, long long mode_) {
+ /* TODO */
+}
+PRE_SYSCALL(link)(void *path_, void *link_) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ PRE_READ(path, __sanitizer::internal_strlen(link) + 1);
+ }
+}
+POST_SYSCALL(link)(long long res, void *path_, void *link_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ POST_READ(path, __sanitizer::internal_strlen(link) + 1);
+ }
+ }
+}
+PRE_SYSCALL(unlink)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unlink)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+/* syscall 11 has been skipped */
+PRE_SYSCALL(chdir)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chdir)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchdir)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fchdir)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_mknod)(void *path_, long long mode_, long long dev_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mknod)
+(long long res, void *path_, long long mode_, long long dev_) {
+ /* TODO */
+}
+PRE_SYSCALL(chmod)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chmod)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(chown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(break)(void *nsize_) { /* Nothing to do */ }
+POST_SYSCALL(break)(long long res, void *nsize_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_20_getfsstat)
+(void *buf_, long long bufsize_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_20_getfsstat)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_olseek)
+(long long fd_, long long offset_, long long whence_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_olseek)
+(long long res, long long fd_, long long offset_, long long whence_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getpid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_40_mount)
+(void *type_, void *path_, long long flags_, void *data_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_40_mount)
+(long long res, void *type_, void *path_, long long flags_, void *data_) {
+ /* TODO */
+}
+PRE_SYSCALL(unmount)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unmount)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(setuid)(long long uid_) { /* Nothing to do */ }
+POST_SYSCALL(setuid)(long long res, long long uid_) { /* Nothing to do */ }
+PRE_SYSCALL(getuid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getuid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(geteuid)(void) { /* Nothing to do */ }
+POST_SYSCALL(geteuid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(ptrace)
+(long long req_, long long pid_, void *addr_, long long data_) {
+ if (req_ == ptrace_pt_io) {
+ struct __sanitizer_ptrace_io_desc *addr =
+ (struct __sanitizer_ptrace_io_desc *)addr_;
+ PRE_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);
+ if (addr->piod_op == ptrace_piod_write_d ||
+ addr->piod_op == ptrace_piod_write_i) {
+ PRE_READ(addr->piod_addr, addr->piod_len);
+ }
+ if (addr->piod_op == ptrace_piod_read_d ||
+ addr->piod_op == ptrace_piod_read_i ||
+ addr->piod_op == ptrace_piod_read_auxv) {
+ PRE_WRITE(addr->piod_addr, addr->piod_len);
+ }
+ } else if (req_ == ptrace_pt_lwpinfo) {
+ struct __sanitizer_ptrace_lwpinfo *addr =
+ (struct __sanitizer_ptrace_lwpinfo *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);
+ } else if (req_ == ptrace_pt_set_event_mask) {
+ PRE_READ(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_get_event_mask) {
+ PRE_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_set_siginfo) {
+ PRE_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_get_siginfo) {
+ PRE_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_lwpstatus) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_lwpnext) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_setregs) {
+ PRE_READ(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_getregs) {
+ PRE_WRITE(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_setfpregs) {
+ PRE_READ(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_getfpregs) {
+ PRE_WRITE(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_setdbregs) {
+ PRE_READ(addr_, struct_ptrace_dbreg_struct_sz);
+ } else if (req_ == ptrace_pt_getdbregs) {
+ PRE_WRITE(addr_, struct_ptrace_dbreg_struct_sz);
+ }
+}
+POST_SYSCALL(ptrace)
+(long long res, long long req_, long long pid_, void *addr_, long long data_) {
+ if (res == 0) {
+ if (req_ == ptrace_pt_io) {
+ struct __sanitizer_ptrace_io_desc *addr =
+ (struct __sanitizer_ptrace_io_desc *)addr_;
+ POST_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);
+ if (addr->piod_op == ptrace_piod_write_d ||
+ addr->piod_op == ptrace_piod_write_i) {
+ POST_READ(addr->piod_addr, addr->piod_len);
+ }
+ if (addr->piod_op == ptrace_piod_read_d ||
+ addr->piod_op == ptrace_piod_read_i ||
+ addr->piod_op == ptrace_piod_read_auxv) {
+ POST_WRITE(addr->piod_addr, addr->piod_len);
+ }
+ } else if (req_ == ptrace_pt_lwpinfo) {
+ struct __sanitizer_ptrace_lwpinfo *addr =
+ (struct __sanitizer_ptrace_lwpinfo *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);
+ } else if (req_ == ptrace_pt_set_event_mask) {
+ POST_READ(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_get_event_mask) {
+ POST_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_set_siginfo) {
+ POST_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_get_siginfo) {
+ POST_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_lwpstatus) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_lwpnext) {
+ struct __sanitizer_ptrace_lwpstatus *addr =
+ (struct __sanitizer_ptrace_lwpstatus *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpstatus_struct_sz);
+ } else if (req_ == ptrace_pt_setregs) {
+ POST_READ(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_getregs) {
+ POST_WRITE(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_setfpregs) {
+ POST_READ(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_getfpregs) {
+ POST_WRITE(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_setdbregs) {
+ POST_READ(addr_, struct_ptrace_dbreg_struct_sz);
+ } else if (req_ == ptrace_pt_getdbregs) {
+ POST_WRITE(addr_, struct_ptrace_dbreg_struct_sz);
+ }
+ }
+}
+PRE_SYSCALL(recvmsg)(long long s_, void *msg_, long long flags_) {
+ PRE_WRITE(msg_, sizeof(__sanitizer_msghdr));
+}
+POST_SYSCALL(recvmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ if (res > 0) {
+ POST_WRITE(msg_, sizeof(__sanitizer_msghdr));
+ }
+}
+PRE_SYSCALL(sendmsg)(long long s_, void *msg_, long long flags_) {
+ PRE_READ(msg_, sizeof(__sanitizer_msghdr));
+}
+POST_SYSCALL(sendmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ if (res > 0) {
+ POST_READ(msg_, sizeof(__sanitizer_msghdr));
+ }
+}
+PRE_SYSCALL(recvfrom)
+(long long s_, void *buf_, long long len_, long long flags_, void *from_,
+ void *fromlenaddr_) {
+ PRE_WRITE(buf_, len_);
+ PRE_WRITE(from_, struct_sockaddr_sz);
+ PRE_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(recvfrom)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *from_, void *fromlenaddr_) {
+ if (res >= 0) {
+ POST_WRITE(buf_, res);
+ POST_WRITE(from_, struct_sockaddr_sz);
+ POST_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(accept)(long long s_, void *name_, void *anamelen_) {
+ PRE_WRITE(name_, struct_sockaddr_sz);
+ PRE_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(accept)
+(long long res, long long s_, void *name_, void *anamelen_) {
+ if (res == 0) {
+ POST_WRITE(name_, struct_sockaddr_sz);
+ POST_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(getpeername)(long long fdes_, void *asa_, void *alen_) {
+ PRE_WRITE(asa_, struct_sockaddr_sz);
+ PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(getpeername)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ if (res == 0) {
+ POST_WRITE(asa_, struct_sockaddr_sz);
+ POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(getsockname)(long long fdes_, void *asa_, void *alen_) {
+ PRE_WRITE(asa_, struct_sockaddr_sz);
+ PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(getsockname)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ if (res == 0) {
+ POST_WRITE(asa_, struct_sockaddr_sz);
+ POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(access)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(access)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(chflags)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chflags)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchflags)(long long fd_, long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(fchflags)(long long res, long long fd_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(sync)(void) { /* Nothing to do */ }
+POST_SYSCALL(sync)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(kill)(long long pid_, long long signum_) { /* Nothing to do */ }
+POST_SYSCALL(kill)(long long res, long long pid_, long long signum_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_stat43)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_43_stat43)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(getppid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getppid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_43_lstat43)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_43_lstat43)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(dup)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(dup)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(pipe)(void) {
+ /* pipe returns two descriptors through two returned values */
+}
+POST_SYSCALL(pipe)(long long res) {
+ /* pipe returns two descriptors through two returned values */
+}
+PRE_SYSCALL(getegid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getegid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(profil)
+(void *samples_, long long size_, long long offset_, long long scale_) {
+ if (samples_) {
+ PRE_WRITE(samples_, size_);
+ }
+}
+POST_SYSCALL(profil)
+(long long res, void *samples_, long long size_, long long offset_,
+ long long scale_) {
+ if (res == 0) {
+ if (samples_) {
+ POST_WRITE(samples_, size_);
+ }
+ }
+}
+PRE_SYSCALL(ktrace)
+(void *fname_, long long ops_, long long facs_, long long pid_) {
+ const char *fname = (const char *)fname_;
+ if (fname) {
+ PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+}
+POST_SYSCALL(ktrace)
+(long long res, void *fname_, long long ops_, long long facs_, long long pid_) {
+ const char *fname = (const char *)fname_;
+ if (res == 0) {
+ if (fname) {
+ POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_13_sigaction13)(long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_13_sigaction13)
+(long long res, long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+PRE_SYSCALL(getgid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getgid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_13_sigprocmask13)(long long how_, long long mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_13_sigprocmask13)
+(long long res, long long how_, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getlogin)(void *namebuf_, long long namelen_) {
+ if (namebuf_) {
+ PRE_WRITE(namebuf_, namelen_);
+ }
+}
+POST_SYSCALL(__getlogin)(long long res, void *namebuf_, long long namelen_) {
+ if (res == 0) {
+ if (namebuf_) {
+ POST_WRITE(namebuf_, namelen_);
+ }
+ }
+}
+PRE_SYSCALL(__setlogin)(void *namebuf_) {
+ const char *namebuf = (const char *)namebuf_;
+ if (namebuf) {
+ PRE_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);
+ }
+}
+POST_SYSCALL(__setlogin)(long long res, void *namebuf_) {
+ if (res == 0) {
+ const char *namebuf = (const char *)namebuf_;
+ if (namebuf) {
+ POST_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);
+ }
+ }
+}
+PRE_SYSCALL(acct)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(acct)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_13_sigpending13)(void) { /* TODO */ }
+POST_SYSCALL(compat_13_sigpending13)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_13_sigaltstack13)(void *nss_, void *oss_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigaltstack13)(long long res, void *nss_, void *oss_) {
+ /* TODO */
+}
+PRE_SYSCALL(ioctl)(long long fd_, long long com_, void *data_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(ioctl)(long long res, long long fd_, long long com_, void *data_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_12_oreboot)(long long opt_) { /* TODO */ }
+POST_SYSCALL(compat_12_oreboot)(long long res, long long opt_) { /* TODO */ }
+PRE_SYSCALL(revoke)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(revoke)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(symlink)(void *path_, void *link_) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ PRE_READ(link, __sanitizer::internal_strlen(link) + 1);
+ }
+}
+POST_SYSCALL(symlink)(long long res, void *path_, void *link_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ POST_READ(link, __sanitizer::internal_strlen(link) + 1);
+ }
+ }
+}
+PRE_SYSCALL(readlink)(void *path_, void *buf_, long long count_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (buf_) {
+ PRE_WRITE(buf_, count_);
+ }
+}
+POST_SYSCALL(readlink)
+(long long res, void *path_, void *buf_, long long count_) {
+ if (res > 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (buf_) {
+ PRE_WRITE(buf_, res);
+ }
+ }
+}
+PRE_SYSCALL(execve)(void *path_, void *argp_, void *envp_) {
+ const char *path = (const char *)path_;
+ char **argp = (char **)argp_;
+ char **envp = (char **)envp_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (argp && argp[0]) {
+ char *a = argp[0];
+ while (a++) {
+ PRE_READ(a, __sanitizer::internal_strlen(a) + 1);
+ }
+ }
+ if (envp && envp[0]) {
+ char *e = envp[0];
+ while (e++) {
+ PRE_READ(e, __sanitizer::internal_strlen(e) + 1);
+ }
+ }
+}
+POST_SYSCALL(execve)(long long res, void *path_, void *argp_, void *envp_) {
+ /* If we are here, something went wrong */
+ const char *path = (const char *)path_;
+ char **argp = (char **)argp_;
+ char **envp = (char **)envp_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (argp && argp[0]) {
+ char *a = argp[0];
+ while (a++) {
+ POST_READ(a, __sanitizer::internal_strlen(a) + 1);
+ }
+ }
+ if (envp && envp[0]) {
+ char *e = envp[0];
+ while (e++) {
+ POST_READ(e, __sanitizer::internal_strlen(e) + 1);
+ }
+ }
+}
+PRE_SYSCALL(umask)(long long newmask_) { /* Nothing to do */ }
+POST_SYSCALL(umask)(long long res, long long newmask_) { /* Nothing to do */ }
+PRE_SYSCALL(chroot)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chroot)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_43_fstat43)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_43_fstat43)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetkerninfo)
+(long long op_, void *where_, void *size_, long long arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetkerninfo)
+(long long res, long long op_, void *where_, void *size_, long long arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetpagesize)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetpagesize)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_12_msync)(void *addr_, long long len_) { /* TODO */ }
+POST_SYSCALL(compat_12_msync)(long long res, void *addr_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(vfork)(void) { /* Nothing to do */ }
+POST_SYSCALL(vfork)(long long res) { /* Nothing to do */ }
+/* syscall 67 has been skipped */
+/* syscall 68 has been skipped */
+/* syscall 69 has been skipped */
+/* syscall 70 has been skipped */
+PRE_SYSCALL(compat_43_ommap)
+(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,
+ long long pos_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ommap)
+(long long res, void *addr_, long long len_, long long prot_, long long flags_,
+ long long fd_, long long pos_) {
+ /* TODO */
+}
+PRE_SYSCALL(vadvise)(long long anom_) { /* Nothing to do */ }
+POST_SYSCALL(vadvise)(long long res, long long anom_) { /* Nothing to do */ }
+PRE_SYSCALL(munmap)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(munmap)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mprotect)(void *addr_, long long len_, long long prot_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mprotect)
+(long long res, void *addr_, long long len_, long long prot_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(madvise)(void *addr_, long long len_, long long behav_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(madvise)
+(long long res, void *addr_, long long len_, long long behav_) {
+ /* Nothing to do */
+}
+/* syscall 76 has been skipped */
+/* syscall 77 has been skipped */
+PRE_SYSCALL(mincore)(void *addr_, long long len_, void *vec_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mincore)(long long res, void *addr_, long long len_, void *vec_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getgroups)(long long gidsetsize_, void *gidset_) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ PRE_WRITE(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+}
+POST_SYSCALL(getgroups)(long long res, long long gidsetsize_, void *gidset_) {
+ if (res == 0) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ POST_WRITE(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+ }
+}
+PRE_SYSCALL(setgroups)(long long gidsetsize_, void *gidset_) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ PRE_READ(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+}
+POST_SYSCALL(setgroups)(long long res, long long gidsetsize_, void *gidset_) {
+ if (res == 0) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ POST_READ(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+ }
+}
+PRE_SYSCALL(getpgrp)(void) { /* Nothing to do */ }
+POST_SYSCALL(getpgrp)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(setpgid)(long long pid_, long long pgid_) { /* Nothing to do */ }
+POST_SYSCALL(setpgid)(long long res, long long pid_, long long pgid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_setitimer)(long long which_, void *itv_, void *oitv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_setitimer)
+(long long res, long long which_, void *itv_, void *oitv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_owait)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_owait)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_12_oswapon)(void *name_) { /* TODO */ }
+POST_SYSCALL(compat_12_oswapon)(long long res, void *name_) { /* TODO */ }
+PRE_SYSCALL(compat_50_getitimer)(long long which_, void *itv_) { /* TODO */ }
+POST_SYSCALL(compat_50_getitimer)(long long res, long long which_, void *itv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogethostname)(void *hostname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogethostname)
+(long long res, void *hostname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osethostname)(void *hostname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osethostname)
+(long long res, void *hostname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetdtablesize)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetdtablesize)(long long res) { /* TODO */ }
+PRE_SYSCALL(dup2)(long long from_, long long to_) { /* Nothing to do */ }
+POST_SYSCALL(dup2)(long long res, long long from_, long long to_) {
+ /* Nothing to do */
+}
+/* syscall 91 has been skipped */
+PRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fcntl)(long long res, long long fd_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_select)
+(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_select)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* TODO */
+}
+/* syscall 94 has been skipped */
+PRE_SYSCALL(fsync)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fsync)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(setpriority)(long long which_, long long who_, long long prio_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(setpriority)
+(long long res, long long which_, long long who_, long long prio_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30_socket)
+(long long domain_, long long type_, long long protocol_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_socket)
+(long long res, long long domain_, long long type_, long long protocol_) {
+ /* TODO */
+}
+PRE_SYSCALL(connect)(long long s_, void *name_, long long namelen_) {
+ PRE_READ(name_, namelen_);
+}
+POST_SYSCALL(connect)
+(long long res, long long s_, void *name_, long long namelen_) {
+ if (res == 0) {
+ POST_READ(name_, namelen_);
+ }
+}
+PRE_SYSCALL(compat_43_oaccept)(long long s_, void *name_, void *anamelen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_oaccept)
+(long long res, long long s_, void *name_, void *anamelen_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpriority)(long long which_, long long who_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(getpriority)(long long res, long long which_, long long who_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_osend)
+(long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osend)
+(long long res, long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_orecv)
+(long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecv)
+(long long res, long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_13_sigreturn13)(void *sigcntxp_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigreturn13)(long long res, void *sigcntxp_) {
+ /* TODO */
+}
+PRE_SYSCALL(bind)(long long s_, void *name_, long long namelen_) {
+ PRE_READ(name_, namelen_);
+}
+POST_SYSCALL(bind)
+(long long res, long long s_, void *name_, long long namelen_) {
+ if (res == 0) {
+ PRE_READ(name_, namelen_);
+ }
+}
+PRE_SYSCALL(setsockopt)
+(long long s_, long long level_, long long name_, void *val_,
+ long long valsize_) {
+ if (val_) {
+ PRE_READ(val_, valsize_);
+ }
+}
+POST_SYSCALL(setsockopt)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ long long valsize_) {
+ if (res == 0) {
+ if (val_) {
+ POST_READ(val_, valsize_);
+ }
+ }
+}
+PRE_SYSCALL(listen)(long long s_, long long backlog_) { /* Nothing to do */ }
+POST_SYSCALL(listen)(long long res, long long s_, long long backlog_) {
+ /* Nothing to do */
+}
+/* syscall 107 has been skipped */
+PRE_SYSCALL(compat_43_osigvec)(long long signum_, void *nsv_, void *osv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osigvec)
+(long long res, long long signum_, void *nsv_, void *osv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osigblock)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigblock)(long long res, long long mask_) { /* TODO */ }
+PRE_SYSCALL(compat_43_osigsetmask)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigsetmask)(long long res, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_13_sigsuspend13)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigsuspend13)(long long res, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osigstack)(void *nss_, void *oss_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigstack)(long long res, void *nss_, void *oss_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_orecvmsg)(long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecvmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osendmsg)(long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osendmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+/* syscall 115 has been skipped */
+PRE_SYSCALL(compat_50_gettimeofday)(void *tp_, void *tzp_) { /* TODO */ }
+POST_SYSCALL(compat_50_gettimeofday)(long long res, void *tp_, void *tzp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_getrusage)(long long who_, void *rusage_) { /* TODO */ }
+POST_SYSCALL(compat_50_getrusage)
+(long long res, long long who_, void *rusage_) {
+ /* TODO */
+}
+PRE_SYSCALL(getsockopt)
+(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {
+ /* TODO */
+}
+POST_SYSCALL(getsockopt)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ void *avalsize_) {
+ /* TODO */
+}
+/* syscall 119 has been skipped */
+PRE_SYSCALL(readv)(long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ if (iovp) {
+ PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_; i++) {
+ PRE_WRITE(iovp[i].iov_base, iovp[i].iov_len);
+ }
+ }
+}
+POST_SYSCALL(readv)
+(long long res, long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ uptr m, n = res;
+ if (res > 0) {
+ if (iovp) {
+ POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_ && n > 0; i++) {
+ m = n > iovp[i].iov_len ? iovp[i].iov_len : n;
+ POST_WRITE(iovp[i].iov_base, m);
+ n -= m;
+ }
+ }
+ }
+}
+PRE_SYSCALL(writev)(long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ if (iovp) {
+ PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_; i++) {
+ PRE_READ(iovp[i].iov_base, iovp[i].iov_len);
+ }
+ }
+}
+POST_SYSCALL(writev)
+(long long res, long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ uptr m, n = res;
+ if (res > 0) {
+ if (iovp) {
+ POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_ && n > 0; i++) {
+ m = n > iovp[i].iov_len ? iovp[i].iov_len : n;
+ POST_READ(iovp[i].iov_base, m);
+ n -= m;
+ }
+ }
+ }
+}
+PRE_SYSCALL(compat_50_settimeofday)(void *tv_, void *tzp_) { /* TODO */ }
+POST_SYSCALL(compat_50_settimeofday)(long long res, void *tv_, void *tzp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fchown)(long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fchown)
+(long long res, long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fchmod)(long long fd_, long long mode_) { /* Nothing to do */ }
+POST_SYSCALL(fchmod)(long long res, long long fd_, long long mode_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_orecvfrom)
+(long long s_, void *buf_, long long len_, long long flags_, void *from_,
+ void *fromlenaddr_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecvfrom)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *from_, void *fromlenaddr_) {
+ /* TODO */
+}
+PRE_SYSCALL(setreuid)(long long ruid_, long long euid_) { /* Nothing to do */ }
+POST_SYSCALL(setreuid)(long long res, long long ruid_, long long euid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(setregid)(long long rgid_, long long egid_) { /* Nothing to do */ }
+POST_SYSCALL(setregid)(long long res, long long rgid_, long long egid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(rename)(void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(rename)(long long res, void *from_, void *to_) {
+ if (res == 0) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_43_otruncate)(void *path_, long long length_) { /* TODO */ }
+POST_SYSCALL(compat_43_otruncate)
+(long long res, void *path_, long long length_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_oftruncate)(long long fd_, long long length_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_oftruncate)
+(long long res, long long fd_, long long length_) {
+ /* TODO */
+}
+PRE_SYSCALL(flock)(long long fd_, long long how_) { /* Nothing to do */ }
+POST_SYSCALL(flock)(long long res, long long fd_, long long how_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mkfifo)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkfifo)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(sendto)
+(long long s_, void *buf_, long long len_, long long flags_, void *to_,
+ long long tolen_) {
+ PRE_READ(buf_, len_);
+ PRE_READ(to_, tolen_);
+}
+POST_SYSCALL(sendto)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *to_, long long tolen_) {
+ if (res >= 0) {
+ POST_READ(buf_, len_);
+ POST_READ(to_, tolen_);
+ }
+}
+PRE_SYSCALL(shutdown)(long long s_, long long how_) { /* Nothing to do */ }
+POST_SYSCALL(shutdown)(long long res, long long s_, long long how_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(socketpair)
+(long long domain_, long long type_, long long protocol_, void *rsv_) {
+ PRE_WRITE(rsv_, 2 * sizeof(int));
+}
+POST_SYSCALL(socketpair)
+(long long res, long long domain_, long long type_, long long protocol_,
+ void *rsv_) {
+ if (res == 0) {
+ POST_WRITE(rsv_, 2 * sizeof(int));
+ }
+}
+PRE_SYSCALL(mkdir)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkdir)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(rmdir)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(rmdir)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_50_utimes)(void *path_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_utimes)(long long res, void *path_, void *tptr_) {
+ /* TODO */
+}
+/* syscall 139 has been skipped */
+PRE_SYSCALL(compat_50_adjtime)(void *delta_, void *olddelta_) { /* TODO */ }
+POST_SYSCALL(compat_50_adjtime)(long long res, void *delta_, void *olddelta_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetpeername)(long long fdes_, void *asa_, void *alen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetpeername)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogethostid)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogethostid)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_43_osethostid)(long long hostid_) { /* TODO */ }
+POST_SYSCALL(compat_43_osethostid)(long long res, long long hostid_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetrlimit)(long long which_, void *rlp_) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetrlimit)
+(long long res, long long which_, void *rlp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osetrlimit)(long long which_, void *rlp_) { /* TODO */ }
+POST_SYSCALL(compat_43_osetrlimit)
+(long long res, long long which_, void *rlp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_okillpg)(long long pgid_, long long signum_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_okillpg)
+(long long res, long long pgid_, long long signum_) {
+ /* TODO */
+}
+PRE_SYSCALL(setsid)(void) { /* Nothing to do */ }
+POST_SYSCALL(setsid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_quotactl)
+(void *path_, long long cmd_, long long uid_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_quotactl)
+(long long res, void *path_, long long cmd_, long long uid_, void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_oquota)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_oquota)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_43_ogetsockname)(long long fdec_, void *asa_, void *alen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetsockname)
+(long long res, long long fdec_, void *asa_, void *alen_) {
+ /* TODO */
+}
+/* syscall 151 has been skipped */
+/* syscall 152 has been skipped */
+/* syscall 153 has been skipped */
+/* syscall 154 has been skipped */
+PRE_SYSCALL(nfssvc)(long long flag_, void *argp_) { /* Nothing to do */ }
+POST_SYSCALL(nfssvc)(long long res, long long flag_, void *argp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_ogetdirentries)
+(long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetdirentries)
+(long long res, long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_statfs)(void *path_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_statfs)(long long res, void *path_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_fstatfs)(long long fd_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_fstatfs)(long long res, long long fd_, void *buf_) {
+ /* TODO */
+}
+/* syscall 159 has been skipped */
+/* syscall 160 has been skipped */
+PRE_SYSCALL(compat_30_getfh)(void *fname_, void *fhp_) { /* TODO */ }
+POST_SYSCALL(compat_30_getfh)(long long res, void *fname_, void *fhp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_ogetdomainname)(void *domainname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_09_ogetdomainname)
+(long long res, void *domainname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_osetdomainname)(void *domainname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_09_osetdomainname)
+(long long res, void *domainname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_ouname)(void *name_) { /* TODO */ }
+POST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ }
+PRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ }
+POST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ }
+/* syscall 166 has been skipped */
+/* syscall 167 has been skipped */
+/* syscall 168 has been skipped */
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_osemsys)
+(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_osemsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_,
+ long long a5_) {
+ /* TODO */
+}
+#else
+/* syscall 169 has been skipped */
+#endif
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_omsgsys)
+(long long which_, long long a2_, long long a3_, long long a4_, long long a5_,
+ long long a6_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_omsgsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_,
+ long long a5_, long long a6_) {
+ /* TODO */
+}
+#else
+/* syscall 170 has been skipped */
+#endif
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_oshmsys)
+(long long which_, long long a2_, long long a3_, long long a4_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_oshmsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_) {
+ /* TODO */
+}
+#else
+/* syscall 171 has been skipped */
+#endif
+/* syscall 172 has been skipped */
+PRE_SYSCALL(pread)
+(long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (buf_) {
+ PRE_WRITE(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(pread)
+(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (res > 0) {
+ POST_WRITE(buf_, res);
+ }
+}
+PRE_SYSCALL(pwrite)
+(long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (buf_) {
+ PRE_READ(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(pwrite)
+(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (res > 0) {
+ POST_READ(buf_, res);
+ }
+}
+PRE_SYSCALL(compat_30_ntp_gettime)(void *ntvp_) { /* TODO */ }
+POST_SYSCALL(compat_30_ntp_gettime)(long long res, void *ntvp_) { /* TODO */ }
+#if defined(NTP) || !defined(_KERNEL_OPT)
+PRE_SYSCALL(ntp_adjtime)(void *tp_) { /* Nothing to do */ }
+POST_SYSCALL(ntp_adjtime)(long long res, void *tp_) { /* Nothing to do */ }
+#else
+/* syscall 176 has been skipped */
+#endif
+/* syscall 177 has been skipped */
+/* syscall 178 has been skipped */
+/* syscall 179 has been skipped */
+/* syscall 180 has been skipped */
+PRE_SYSCALL(setgid)(long long gid_) { /* Nothing to do */ }
+POST_SYSCALL(setgid)(long long res, long long gid_) { /* Nothing to do */ }
+PRE_SYSCALL(setegid)(long long egid_) { /* Nothing to do */ }
+POST_SYSCALL(setegid)(long long res, long long egid_) { /* Nothing to do */ }
+PRE_SYSCALL(seteuid)(long long euid_) { /* Nothing to do */ }
+POST_SYSCALL(seteuid)(long long res, long long euid_) { /* Nothing to do */ }
+PRE_SYSCALL(lfs_bmapv)(void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+POST_SYSCALL(lfs_bmapv)
+(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+PRE_SYSCALL(lfs_markv)(void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+POST_SYSCALL(lfs_markv)
+(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+PRE_SYSCALL(lfs_segclean)(void *fsidp_, long long segment_) { /* TODO */ }
+POST_SYSCALL(lfs_segclean)(long long res, void *fsidp_, long long segment_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_lfs_segwait)(void *fsidp_, void *tv_) { /* TODO */ }
+POST_SYSCALL(compat_50_lfs_segwait)(long long res, void *fsidp_, void *tv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_stat12)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_12_stat12)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_fstat12)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_12_fstat12)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_lstat12)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_12_lstat12)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(pathconf)(void *path_, long long name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(pathconf)(long long res, void *path_, long long name_) {
+ if (res != -1) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fpathconf)(long long fd_, long long name_) { /* Nothing to do */ }
+POST_SYSCALL(fpathconf)(long long res, long long fd_, long long name_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getsockopt2)
+(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {
+ /* TODO */
+}
+POST_SYSCALL(getsockopt2)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ void *avalsize_) {
+ /* TODO */
+}
+PRE_SYSCALL(getrlimit)(long long which_, void *rlp_) {
+ PRE_WRITE(rlp_, struct_rlimit_sz);
+}
+POST_SYSCALL(getrlimit)(long long res, long long which_, void *rlp_) {
+ if (res == 0) {
+ POST_WRITE(rlp_, struct_rlimit_sz);
+ }
+}
+PRE_SYSCALL(setrlimit)(long long which_, void *rlp_) {
+ PRE_READ(rlp_, struct_rlimit_sz);
+}
+POST_SYSCALL(setrlimit)(long long res, long long which_, void *rlp_) {
+ if (res == 0) {
+ POST_READ(rlp_, struct_rlimit_sz);
+ }
+}
+PRE_SYSCALL(compat_12_getdirentries)
+(long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_12_getdirentries)
+(long long res, long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+PRE_SYSCALL(mmap)
+(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,
+ long long PAD_, long long pos_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mmap)
+(long long res, void *addr_, long long len_, long long prot_, long long flags_,
+ long long fd_, long long PAD_, long long pos_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__syscall)
+(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(lseek)
+(long long fd_, long long PAD_, long long offset_, long long whence_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(lseek)
+(long long res, long long fd_, long long PAD_, long long offset_,
+ long long whence_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(truncate)(void *path_, long long PAD_, long long length_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(truncate)
+(long long res, void *path_, long long PAD_, long long length_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(ftruncate)(long long fd_, long long PAD_, long long length_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(ftruncate)
+(long long res, long long fd_, long long PAD_, long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__sysctl)
+(void *name_, long long namelen_, void *oldv_, void *oldlenp_, void *newv_,
+ long long newlen_) {
+ const int *name = (const int *)name_;
+ if (name) {
+ PRE_READ(name, namelen_ * sizeof(*name));
+ }
+ if (newv_) {
+ PRE_READ(name, newlen_);
+ }
+}
+POST_SYSCALL(__sysctl)
+(long long res, void *name_, long long namelen_, void *oldv_, void *oldlenp_,
+ void *newv_, long long newlen_) {
+ if (res == 0) {
+ const int *name = (const int *)name_;
+ if (name) {
+ POST_READ(name, namelen_ * sizeof(*name));
+ }
+ if (newv_) {
+ POST_READ(name, newlen_);
+ }
+ }
+}
+PRE_SYSCALL(mlock)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(mlock)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(munlock)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(munlock)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(undelete)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(undelete)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_50_futimes)(long long fd_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_futimes)(long long res, long long fd_, void *tptr_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpgid)(long long pid_) { /* Nothing to do */ }
+POST_SYSCALL(getpgid)(long long res, long long pid_) { /* Nothing to do */ }
+PRE_SYSCALL(reboot)(long long opt_, void *bootstr_) {
+ const char *bootstr = (const char *)bootstr_;
+ if (bootstr) {
+ PRE_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);
+ }
+}
+POST_SYSCALL(reboot)(long long res, long long opt_, void *bootstr_) {
+ /* This call should never return */
+ const char *bootstr = (const char *)bootstr_;
+ if (bootstr) {
+ POST_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);
+ }
+}
+PRE_SYSCALL(poll)(void *fds_, long long nfds_, long long timeout_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(poll)
+(long long res, void *fds_, long long nfds_, long long timeout_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(afssys)
+(long long id_, long long a1_, long long a2_, long long a3_, long long a4_,
+ long long a5_, long long a6_) {
+ /* TODO */
+}
+POST_SYSCALL(afssys)
+(long long res, long long id_, long long a1_, long long a2_, long long a3_,
+ long long a4_, long long a5_, long long a6_) {
+ /* TODO */
+}
+/* syscall 211 has been skipped */
+/* syscall 212 has been skipped */
+/* syscall 213 has been skipped */
+/* syscall 214 has been skipped */
+/* syscall 215 has been skipped */
+/* syscall 216 has been skipped */
+/* syscall 217 has been skipped */
+/* syscall 218 has been skipped */
+/* syscall 219 has been skipped */
+PRE_SYSCALL(compat_14___semctl)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14___semctl)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(semget)(long long key_, long long nsems_, long long semflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(semget)
+(long long res, long long key_, long long nsems_, long long semflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(semop)(long long semid_, void *sops_, long long nsops_) {
+ if (sops_) {
+ PRE_READ(sops_, nsops_ * struct_sembuf_sz);
+ }
+}
+POST_SYSCALL(semop)
+(long long res, long long semid_, void *sops_, long long nsops_) {
+ if (res == 0) {
+ if (sops_) {
+ POST_READ(sops_, nsops_ * struct_sembuf_sz);
+ }
+ }
+}
+PRE_SYSCALL(semconfig)(long long flag_) { /* Nothing to do */ }
+POST_SYSCALL(semconfig)(long long res, long long flag_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_14_msgctl)(long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14_msgctl)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(msgget)(long long key_, long long msgflg_) { /* Nothing to do */ }
+POST_SYSCALL(msgget)(long long res, long long key_, long long msgflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(msgsnd)
+(long long msqid_, void *msgp_, long long msgsz_, long long msgflg_) {
+ if (msgp_) {
+ PRE_READ(msgp_, msgsz_);
+ }
+}
+POST_SYSCALL(msgsnd)
+(long long res, long long msqid_, void *msgp_, long long msgsz_,
+ long long msgflg_) {
+ if (res == 0) {
+ if (msgp_) {
+ POST_READ(msgp_, msgsz_);
+ }
+ }
+}
+PRE_SYSCALL(msgrcv)
+(long long msqid_, void *msgp_, long long msgsz_, long long msgtyp_,
+ long long msgflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(msgrcv)
+(long long res, long long msqid_, void *msgp_, long long msgsz_,
+ long long msgtyp_, long long msgflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(shmat)(long long shmid_, void *shmaddr_, long long shmflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(shmat)
+(long long res, long long shmid_, void *shmaddr_, long long shmflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_14_shmctl)(long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14_shmctl)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(shmdt)(void *shmaddr_) { /* Nothing to do */ }
+POST_SYSCALL(shmdt)(long long res, void *shmaddr_) { /* Nothing to do */ }
+PRE_SYSCALL(shmget)(long long key_, long long size_, long long shmflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(shmget)
+(long long res, long long key_, long long size_, long long shmflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_clock_gettime)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_gettime)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_clock_settime)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_settime)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_clock_getres)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_getres)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(timer_create)(long long clock_id_, void *evp_, void *timerid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(timer_create)
+(long long res, long long clock_id_, void *evp_, void *timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(timer_delete)(long long timerid_) { /* Nothing to do */ }
+POST_SYSCALL(timer_delete)(long long res, long long timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_timer_settime)
+(long long timerid_, long long flags_, void *value_, void *ovalue_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_timer_settime)
+(long long res, long long timerid_, long long flags_, void *value_,
+ void *ovalue_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_timer_gettime)(long long timerid_, void *value_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_timer_gettime)
+(long long res, long long timerid_, void *value_) {
+ /* TODO */
+}
+PRE_SYSCALL(timer_getoverrun)(long long timerid_) { /* Nothing to do */ }
+POST_SYSCALL(timer_getoverrun)(long long res, long long timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_nanosleep)(void *rqtp_, void *rmtp_) { /* TODO */ }
+POST_SYSCALL(compat_50_nanosleep)(long long res, void *rqtp_, void *rmtp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fdatasync)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fdatasync)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(mlockall)(long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(mlockall)(long long res, long long flags_) { /* Nothing to do */ }
+PRE_SYSCALL(munlockall)(void) { /* Nothing to do */ }
+POST_SYSCALL(munlockall)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50___sigtimedwait)(void *set_, void *info_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___sigtimedwait)
+(long long res, void *set_, void *info_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(sigqueueinfo)(long long pid_, void *info_) {
+ if (info_) {
+ PRE_READ(info_, siginfo_t_sz);
+ }
+}
+POST_SYSCALL(sigqueueinfo)(long long res, long long pid_, void *info_) {}
+PRE_SYSCALL(modctl)(long long cmd_, void *arg_) { /* TODO */ }
+POST_SYSCALL(modctl)(long long res, long long cmd_, void *arg_) { /* TODO */ }
+PRE_SYSCALL(_ksem_init)(long long value_, void *idp_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_init)(long long res, long long value_, void *idp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_open)
+(void *name_, long long oflag_, long long mode_, long long value_, void *idp_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_ksem_open)
+(long long res, void *name_, long long oflag_, long long mode_,
+ long long value_, void *idp_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_ksem_unlink)(void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_ksem_unlink)(long long res, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_ksem_close)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_close)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_post)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_post)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_wait)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_wait)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_trywait)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_trywait)(long long res, long long id_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_getvalue)(long long id_, void *value_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_getvalue)(long long res, long long id_, void *value_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_destroy)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_destroy)(long long res, long long id_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_timedwait)(long long id_, void *abstime_) {
+ if (abstime_) {
+ PRE_READ(abstime_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(_ksem_timedwait)(long long res, long long id_, void *abstime_) {}
+PRE_SYSCALL(mq_open)
+(void *name_, long long oflag_, long long mode_, void *attr_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(mq_open)
+(long long res, void *name_, long long oflag_, long long mode_, void *attr_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(mq_close)(long long mqdes_) { /* Nothing to do */ }
+POST_SYSCALL(mq_close)(long long res, long long mqdes_) { /* Nothing to do */ }
+PRE_SYSCALL(mq_unlink)(void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(mq_unlink)(long long res, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(mq_getattr)(long long mqdes_, void *mqstat_) { /* Nothing to do */ }
+POST_SYSCALL(mq_getattr)(long long res, long long mqdes_, void *mqstat_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mq_setattr)(long long mqdes_, void *mqstat_, void *omqstat_) {
+ if (mqstat_) {
+ PRE_READ(mqstat_, struct_mq_attr_sz);
+ }
+}
+POST_SYSCALL(mq_setattr)
+(long long res, long long mqdes_, void *mqstat_, void *omqstat_) {}
+PRE_SYSCALL(mq_notify)(long long mqdes_, void *notification_) {
+ if (notification_) {
+ PRE_READ(notification_, struct_sigevent_sz);
+ }
+}
+POST_SYSCALL(mq_notify)(long long res, long long mqdes_, void *notification_) {}
+PRE_SYSCALL(mq_send)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+}
+POST_SYSCALL(mq_send)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_) {}
+PRE_SYSCALL(mq_receive)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mq_receive)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_mq_timedsend)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,
+ void *abs_timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mq_timedsend)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_, void *abs_timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_mq_timedreceive)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,
+ void *abs_timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mq_timedreceive)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_, void *abs_timeout_) {
+ /* TODO */
+}
+/* syscall 267 has been skipped */
+/* syscall 268 has been skipped */
+/* syscall 269 has been skipped */
+PRE_SYSCALL(__posix_rename)(void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from_) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(__posix_rename)(long long res, void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+PRE_SYSCALL(swapctl)(long long cmd_, void *arg_, long long misc_) { /* TODO */ }
+POST_SYSCALL(swapctl)
+(long long res, long long cmd_, void *arg_, long long misc_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30_getdents)(long long fd_, void *buf_, long long count_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_getdents)
+(long long res, long long fd_, void *buf_, long long count_) {
+ /* TODO */
+}
+PRE_SYSCALL(minherit)(void *addr_, long long len_, long long inherit_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(minherit)
+(long long res, void *addr_, long long len_, long long inherit_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(lchmod)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchmod)(long long res, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lchown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(compat_50_lutimes)(void *path_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_lutimes)(long long res, void *path_, void *tptr_) {
+ /* TODO */
+}
+PRE_SYSCALL(__msync13)(void *addr_, long long len_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__msync13)
+(long long res, void *addr_, long long len_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30___stat13)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_30___stat13)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30___fstat13)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30___fstat13)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30___lstat13)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_30___lstat13)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(__sigaltstack14)(void *nss_, void *oss_) {
+ if (nss_) {
+ PRE_READ(nss_, struct_sigaltstack_sz);
+ }
+ if (oss_) {
+ PRE_READ(oss_, struct_sigaltstack_sz);
+ }
+}
+POST_SYSCALL(__sigaltstack14)(long long res, void *nss_, void *oss_) {}
+PRE_SYSCALL(__vfork14)(void) { /* Nothing to do */ }
+POST_SYSCALL(__vfork14)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(__posix_chown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__posix_chown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(__posix_fchown)(long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__posix_fchown)
+(long long res, long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__posix_lchown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__posix_lchown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(getsid)(long long pid_) { /* Nothing to do */ }
+POST_SYSCALL(getsid)(long long res, long long pid_) { /* Nothing to do */ }
+PRE_SYSCALL(__clone)(long long flags_, void *stack_) { /* Nothing to do */ }
+POST_SYSCALL(__clone)(long long res, long long flags_, void *stack_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fktrace)
+(long long fd_, long long ops_, long long facs_, long long pid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fktrace)
+(long long res, long long fd_, long long ops_, long long facs_,
+ long long pid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(preadv)
+(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(preadv)
+(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pwritev)
+(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(pwritev)
+(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_16___sigaction14)
+(long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_16___sigaction14)
+(long long res, long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+PRE_SYSCALL(__sigpending14)(void *set_) { /* Nothing to do */ }
+POST_SYSCALL(__sigpending14)(long long res, void *set_) { /* Nothing to do */ }
+PRE_SYSCALL(__sigprocmask14)(long long how_, void *set_, void *oset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__sigprocmask14)
+(long long res, long long how_, void *set_, void *oset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__sigsuspend14)(void *set_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__sigsuspend14)(long long res, void *set_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+}
+PRE_SYSCALL(compat_16___sigreturn14)(void *sigcntxp_) { /* TODO */ }
+POST_SYSCALL(compat_16___sigreturn14)(long long res, void *sigcntxp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getcwd)(void *bufp_, long long length_) { /* Nothing to do */ }
+POST_SYSCALL(__getcwd)(long long res, void *bufp_, long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fchroot)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fchroot)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_30_fhopen)(void *fhp_, long long flags_) { /* TODO */ }
+POST_SYSCALL(compat_30_fhopen)(long long res, void *fhp_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30_fhstat)(void *fhp_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30_fhstat)(long long res, void *fhp_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_fhstatfs)(void *fhp_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_fhstatfs)(long long res, void *fhp_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_____semctl13)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_____semctl13)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___msgctl13)
+(long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___msgctl13)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___shmctl13)
+(long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___shmctl13)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(lchflags)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchflags)(long long res, void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(issetugid)(void) { /* Nothing to do */ }
+POST_SYSCALL(issetugid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(utrace)(void *label_, void *addr_, long long len_) {
+ const char *label = (const char *)label_;
+ if (label) {
+ PRE_READ(label, __sanitizer::internal_strlen(label) + 1);
+ }
+ if (addr_) {
+ PRE_READ(addr_, len_);
+ }
+}
+POST_SYSCALL(utrace)(long long res, void *label_, void *addr_, long long len_) {
+ const char *label = (const char *)label_;
+ if (label) {
+ POST_READ(label, __sanitizer::internal_strlen(label) + 1);
+ }
+ if (addr_) {
+ POST_READ(addr_, len_);
+ }
+}
+PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }
+POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }
+PRE_SYSCALL(setcontext)(void *ucp_) {
+ if (ucp_) {
+ PRE_READ(ucp_, ucontext_t_sz);
+ }
+}
+POST_SYSCALL(setcontext)(long long res, void *ucp_) {}
+PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {
+ if (ucp_) {
+ PRE_READ(ucp_, ucontext_t_sz);
+ }
+}
+POST_SYSCALL(_lwp_create)
+(long long res, void *ucp_, long long flags_, void *new_lwp_) {}
+PRE_SYSCALL(_lwp_exit)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_exit)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_self)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_self)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_wait)(long long wait_for_, void *departed_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_wait)(long long res, long long wait_for_, void *departed_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_suspend)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_suspend)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_continue)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_continue)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_wakeup)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_wakeup)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_getprivate)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_getprivate)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_setprivate)(void *ptr_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_setprivate)(long long res, void *ptr_) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_kill)(long long target_, long long signo_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_kill)(long long res, long long target_, long long signo_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_detach)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_detach)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50__lwp_park)
+(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50__lwp_park)
+(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+PRE_SYSCALL(_lwp_unpark)(long long target_, void *hint_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_unpark)(long long res, long long target_, void *hint_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_unpark_all)(void *targets_, long long ntargets_, void *hint_) {
+ if (targets_) {
+ PRE_READ(targets_, ntargets_ * sizeof(__sanitizer_lwpid_t));
+ }
+}
+POST_SYSCALL(_lwp_unpark_all)
+(long long res, void *targets_, long long ntargets_, void *hint_) {}
+PRE_SYSCALL(_lwp_setname)(long long target_, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_lwp_setname)(long long res, long long target_, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_lwp_getname)(long long target_, void *name_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_getname)
+(long long res, long long target_, void *name_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_ctl)(long long features_, void **address_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_ctl)(long long res, long long features_, void **address_) {
+ /* Nothing to do */
+}
+/* syscall 326 has been skipped */
+/* syscall 327 has been skipped */
+/* syscall 328 has been skipped */
+/* syscall 329 has been skipped */
+PRE_SYSCALL(compat_60_sa_register)
+(void *newv_, void **oldv_, long long flags_, long long stackinfo_offset_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_60_sa_register)
+(long long res, void *newv_, void **oldv_, long long flags_,
+ long long stackinfo_offset_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_stacks)(long long num_, void *stacks_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_stacks)
+(long long res, long long num_, void *stacks_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_enable)(void) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_enable)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_60_sa_setconcurrency)(long long concurrency_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_setconcurrency)
+(long long res, long long concurrency_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_yield)(void) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_yield)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_60_sa_preempt)(long long sa_id_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_preempt)(long long res, long long sa_id_) {
+ /* TODO */
+}
+/* syscall 336 has been skipped */
+/* syscall 337 has been skipped */
+/* syscall 338 has been skipped */
+/* syscall 339 has been skipped */
+PRE_SYSCALL(__sigaction_sigtramp)
+(long long signum_, void *nsa_, void *osa_, void *tramp_, long long vers_) {
+ if (nsa_) {
+ PRE_READ(nsa_, sizeof(__sanitizer_sigaction));
+ }
+}
+POST_SYSCALL(__sigaction_sigtramp)
+(long long res, long long signum_, void *nsa_, void *osa_, void *tramp_,
+ long long vers_) {
+ if (nsa_) {
+ PRE_READ(nsa_, sizeof(__sanitizer_sigaction));
+ }
+}
+/* syscall 341 has been skipped */
+/* syscall 342 has been skipped */
+PRE_SYSCALL(rasctl)(void *addr_, long long len_, long long op_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(rasctl)
+(long long res, void *addr_, long long len_, long long op_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(kqueue)(void) { /* Nothing to do */ }
+POST_SYSCALL(kqueue)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_kevent)
+(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,
+ long long nevents_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_kevent)
+(long long res, long long fd_, void *changelist_, long long nchanges_,
+ void *eventlist_, long long nevents_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(_sched_setparam)
+(long long pid_, long long lid_, long long policy_, void *params_) {
+ if (params_) {
+ PRE_READ(params_, struct_sched_param_sz);
+ }
+}
+POST_SYSCALL(_sched_setparam)
+(long long res, long long pid_, long long lid_, long long policy_,
+ void *params_) {
+ if (params_) {
+ PRE_READ(params_, struct_sched_param_sz);
+ }
+}
+PRE_SYSCALL(_sched_getparam)
+(long long pid_, long long lid_, void *policy_, void *params_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_sched_getparam)
+(long long res, long long pid_, long long lid_, void *policy_, void *params_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_sched_setaffinity)
+(long long pid_, long long lid_, long long size_, void *cpuset_) {
+ if (cpuset_) {
+ PRE_READ(cpuset_, size_);
+ }
+}
+POST_SYSCALL(_sched_setaffinity)
+(long long res, long long pid_, long long lid_, long long size_,
+ void *cpuset_) {
+ if (cpuset_) {
+ PRE_READ(cpuset_, size_);
+ }
+}
+PRE_SYSCALL(_sched_getaffinity)
+(long long pid_, long long lid_, long long size_, void *cpuset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_sched_getaffinity)
+(long long res, long long pid_, long long lid_, long long size_,
+ void *cpuset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(sched_yield)(void) { /* Nothing to do */ }
+POST_SYSCALL(sched_yield)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_sched_protect)(long long priority_) { /* Nothing to do */ }
+POST_SYSCALL(_sched_protect)(long long res, long long priority_) {
+ /* Nothing to do */
+}
+/* syscall 352 has been skipped */
+/* syscall 353 has been skipped */
+PRE_SYSCALL(fsync_range)
+(long long fd_, long long flags_, long long start_, long long length_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fsync_range)
+(long long res, long long fd_, long long flags_, long long start_,
+ long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(uuidgen)(void *store_, long long count_) { /* Nothing to do */ }
+POST_SYSCALL(uuidgen)(long long res, void *store_, long long count_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_90_getvfsstat)
+(void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(compat_90_getvfsstat)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_90_statvfs1)(void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(compat_90_statvfs1)
+(long long res, void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(compat_90_fstatvfs1)(long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(compat_90_fstatvfs1)
+(long long res, long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30_fhstatvfs1)(void *fhp_, void *buf_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_fhstatvfs1)
+(long long res, void *fhp_, void *buf_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattrctl)
+(void *path_, long long cmd_, void *filename_, long long attrnamespace_,
+ void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattrctl)
+(long long res, void *path_, long long cmd_, void *filename_,
+ long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_set_file)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_set_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_get_file)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_get_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_delete_file)
+(void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_delete_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_set_fd)
+(long long fd_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_set_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_get_fd)
+(long long fd_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_get_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_delete_fd)
+(long long fd_, long long attrnamespace_, void *attrname_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_delete_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_set_link)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_set_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_get_link)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_get_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_delete_link)
+(void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_delete_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_list_fd)
+(long long fd_, long long attrnamespace_, void *data_, long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_list_fd)
+(long long res, long long fd_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_list_file)
+(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_list_file)
+(long long res, void *path_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_list_link)
+(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_list_link)
+(long long res, void *path_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(compat_50_pselect)
+(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_pselect)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,
+ void *mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_pollts)
+(void *fds_, long long nfds_, void *ts_, void *mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_pollts)
+(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(setxattr)
+(void *path_, void *name_, void *value_, long long size_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(setxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lsetxattr)
+(void *path_, void *name_, void *value_, long long size_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lsetxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fsetxattr)
+(long long fd_, void *name_, void *value_, long long size_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fsetxattr)
+(long long res, long long fd_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getxattr)(void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(getxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lgetxattr)
+(void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lgetxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fgetxattr)
+(long long fd_, void *name_, void *value_, long long size_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fgetxattr)
+(long long res, long long fd_, void *name_, void *value_, long long size_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(listxattr)(void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(listxattr)
+(long long res, void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(llistxattr)(void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(llistxattr)
+(long long res, void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(flistxattr)(long long fd_, void *list_, long long size_) {
+ /* TODO */
+}
+POST_SYSCALL(flistxattr)
+(long long res, long long fd_, void *list_, long long size_) {
+ /* TODO */
+}
+PRE_SYSCALL(removexattr)(void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(removexattr)(long long res, void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lremovexattr)(void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lremovexattr)(long long res, void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fremovexattr)(long long fd_, void *name_) { /* TODO */ }
+POST_SYSCALL(fremovexattr)(long long res, long long fd_, void *name_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___stat30)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_50___stat30)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___fstat30)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_50___fstat30)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___lstat30)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_50___lstat30)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getdents30)(long long fd_, void *buf_, long long count_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getdents30)
+(long long res, long long fd_, void *buf_, long long count_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(posix_fadvise)(long long) { /* Nothing to do */ }
+POST_SYSCALL(posix_fadvise)(long long res, long long) { /* Nothing to do */ }
+PRE_SYSCALL(compat_30___fhstat30)(void *fhp_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30___fhstat30)(long long res, void *fhp_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___ntp_gettime30)(void *ntvp_) { /* TODO */ }
+POST_SYSCALL(compat_50___ntp_gettime30)(long long res, void *ntvp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__socket30)
+(long long domain_, long long type_, long long protocol_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__socket30)
+(long long res, long long domain_, long long type_, long long protocol_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__getfh30)(void *fname_, void *fhp_, void *fh_size_) {
+ const char *fname = (const char *)fname_;
+ if (fname) {
+ PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+}
+POST_SYSCALL(__getfh30)
+(long long res, void *fname_, void *fhp_, void *fh_size_) {
+ const char *fname = (const char *)fname_;
+ if (res == 0) {
+ if (fname) {
+ POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fhopen40)(void *fhp_, long long fh_size_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhopen40)
+(long long res, void *fhp_, long long fh_size_, long long flags_) {}
+PRE_SYSCALL(compat_90_fhstatvfs1)
+(void *fhp_, long long fh_size_, void *buf_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(compat_90_fhstatvfs1)
+(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
+PRE_SYSCALL(compat_50___fhstat40)(void *fhp_, long long fh_size_, void *sb_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(compat_50___fhstat40)
+(long long res, void *fhp_, long long fh_size_, void *sb_) {}
+PRE_SYSCALL(aio_cancel)(long long fildes_, void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_cancel)(long long res, long long fildes_, void *aiocbp_) {}
+PRE_SYSCALL(aio_error)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_error)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(aio_fsync)(long long op_, void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_fsync)(long long res, long long op_, void *aiocbp_) {}
+PRE_SYSCALL(aio_read)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_read)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(aio_return)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_return)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(compat_50_aio_suspend)
+(void *list_, long long nent_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_aio_suspend)
+(long long res, void *list_, long long nent_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(aio_write)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_write)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(lio_listio)
+(long long mode_, void *list_, long long nent_, void *sig_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(lio_listio)
+(long long res, long long mode_, void *list_, long long nent_, void *sig_) {
+ /* Nothing to do */
+}
+/* syscall 407 has been skipped */
+/* syscall 408 has been skipped */
+/* syscall 409 has been skipped */
+PRE_SYSCALL(__mount50)
+(void *type_, void *path_, long long flags_, void *data_, long long data_len_) {
+ const char *type = (const char *)type_;
+ const char *path = (const char *)path_;
+ if (type) {
+ PRE_READ(type, __sanitizer::internal_strlen(type) + 1);
+ }
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (data_) {
+ PRE_READ(data_, data_len_);
+ }
+}
+POST_SYSCALL(__mount50)
+(long long res, void *type_, void *path_, long long flags_, void *data_,
+ long long data_len_) {
+ const char *type = (const char *)type_;
+ const char *path = (const char *)path_;
+ if (type) {
+ POST_READ(type, __sanitizer::internal_strlen(type) + 1);
+ }
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (data_) {
+ POST_READ(data_, data_len_);
+ }
+}
+PRE_SYSCALL(mremap)
+(void *old_address_, long long old_size_, void *new_address_,
+ long long new_size_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mremap)
+(long long res, void *old_address_, long long old_size_, void *new_address_,
+ long long new_size_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pset_create)(void *psid_) { /* Nothing to do */ }
+POST_SYSCALL(pset_create)(long long res, void *psid_) { /* Nothing to do */ }
+PRE_SYSCALL(pset_destroy)(long long psid_) { /* Nothing to do */ }
+POST_SYSCALL(pset_destroy)(long long res, long long psid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pset_assign)(long long psid_, long long cpuid_, void *opsid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(pset_assign)
+(long long res, long long psid_, long long cpuid_, void *opsid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_pset_bind)
+(long long idtype_, long long first_id_, long long second_id_, long long psid_,
+ void *opsid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_pset_bind)
+(long long res, long long idtype_, long long first_id_, long long second_id_,
+ long long psid_, void *opsid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__posix_fadvise50)
+(long long fd_, long long PAD_, long long offset_, long long len_,
+ long long advice_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__posix_fadvise50)
+(long long res, long long fd_, long long PAD_, long long offset_,
+ long long len_, long long advice_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__select50)
+(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__select50)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__gettimeofday50)(void *tp_, void *tzp_) { /* Nothing to do */ }
+POST_SYSCALL(__gettimeofday50)(long long res, void *tp_, void *tzp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__settimeofday50)(void *tv_, void *tzp_) {
+ if (tv_) {
+ PRE_READ(tv_, timeval_sz);
+ }
+ if (tzp_) {
+ PRE_READ(tzp_, struct_timezone_sz);
+ }
+}
+POST_SYSCALL(__settimeofday50)(long long res, void *tv_, void *tzp_) {}
+PRE_SYSCALL(__utimes50)(void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__utimes50)(long long res, void *path_, void *tptr_) {}
+PRE_SYSCALL(__adjtime50)(void *delta_, void *olddelta_) {
+ if (delta_) {
+ PRE_READ(delta_, timeval_sz);
+ }
+}
+POST_SYSCALL(__adjtime50)(long long res, void *delta_, void *olddelta_) {}
+PRE_SYSCALL(__lfs_segwait50)(void *fsidp_, void *tv_) { /* TODO */ }
+POST_SYSCALL(__lfs_segwait50)(long long res, void *fsidp_, void *tv_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futimes50)(long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__futimes50)(long long res, long long fd_, void *tptr_) {}
+PRE_SYSCALL(__lutimes50)(void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__lutimes50)(long long res, void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ POST_READ(tptr[0], struct_timespec_sz);
+ POST_READ(tptr[1], struct_timespec_sz);
+ }
+}
+PRE_SYSCALL(__setitimer50)(long long which_, void *itv_, void *oitv_) {
+ struct __sanitizer_itimerval *itv = (struct __sanitizer_itimerval *)itv_;
+ if (itv) {
+ PRE_READ(&itv->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&itv->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ PRE_READ(&itv->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&itv->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+}
+POST_SYSCALL(__setitimer50)
+(long long res, long long which_, void *itv_, void *oitv_) {}
+PRE_SYSCALL(__getitimer50)(long long which_, void *itv_) { /* Nothing to do */ }
+POST_SYSCALL(__getitimer50)(long long res, long long which_, void *itv_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__clock_gettime50)(long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__clock_gettime50)(long long res, long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__clock_settime50)(long long clock_id_, void *tp_) {
+ if (tp_) {
+ PRE_READ(tp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__clock_settime50)
+(long long res, long long clock_id_, void *tp_) {}
+PRE_SYSCALL(__clock_getres50)(long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__clock_getres50)(long long res, long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__nanosleep50)(void *rqtp_, void *rmtp_) {
+ if (rqtp_) {
+ PRE_READ(rqtp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__nanosleep50)(long long res, void *rqtp_, void *rmtp_) {}
+PRE_SYSCALL(____sigtimedwait50)(void *set_, void *info_, void *timeout_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(____sigtimedwait50)
+(long long res, void *set_, void *info_, void *timeout_) {}
+PRE_SYSCALL(__mq_timedsend50)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,
+ void *abs_timeout_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+ if (abs_timeout_) {
+ PRE_READ(abs_timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__mq_timedsend50)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_, void *abs_timeout_) {}
+PRE_SYSCALL(__mq_timedreceive50)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,
+ void *abs_timeout_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+ if (abs_timeout_) {
+ PRE_READ(abs_timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__mq_timedreceive50)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_, void *abs_timeout_) {}
+PRE_SYSCALL(compat_60__lwp_park)
+(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_60__lwp_park)
+(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+PRE_SYSCALL(__kevent50)
+(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,
+ long long nevents_, void *timeout_) {
+ if (changelist_) {
+ PRE_READ(changelist_, nchanges_ * struct_kevent_sz);
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__kevent50)
+(long long res, long long fd_, void *changelist_, long long nchanges_,
+ void *eventlist_, long long nevents_, void *timeout_) {}
+PRE_SYSCALL(__pselect50)
+(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+ if (mask_) {
+ PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__pselect50)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,
+ void *mask_) {}
+PRE_SYSCALL(__pollts50)(void *fds_, long long nfds_, void *ts_, void *mask_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+ if (mask_) {
+ PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__pollts50)
+(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {}
+PRE_SYSCALL(__aio_suspend50)(void *list_, long long nent_, void *timeout_) {
+ int i;
+ const struct aiocb *const *list = (const struct aiocb *const *)list_;
+ if (list) {
+ for (i = 0; i < nent_; i++) {
+ if (list[i]) {
+ PRE_READ(list[i], sizeof(struct __sanitizer_aiocb));
+ }
+ }
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__aio_suspend50)
+(long long res, void *list_, long long nent_, void *timeout_) {}
+PRE_SYSCALL(__stat50)(void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__stat50)(long long res, void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fstat50)(long long fd_, void *sb_) { /* Nothing to do */ }
+POST_SYSCALL(__fstat50)(long long res, long long fd_, void *sb_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__lstat50)(void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__lstat50)(long long res, void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(____semctl50)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(____semctl50)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__shmctl50)(long long shmid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__shmctl50)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__msgctl50)(long long msqid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__msgctl50)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__getrusage50)(long long who_, void *rusage_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getrusage50)(long long res, long long who_, void *rusage_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__timer_settime50)
+(long long timerid_, long long flags_, void *value_, void *ovalue_) {
+ struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;
+ if (value) {
+ PRE_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ PRE_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+}
+POST_SYSCALL(__timer_settime50)
+(long long res, long long timerid_, long long flags_, void *value_,
+ void *ovalue_) {
+ struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;
+ if (res == 0) {
+ if (value) {
+ POST_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ POST_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ POST_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ POST_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+ }
+}
+PRE_SYSCALL(__timer_gettime50)(long long timerid_, void *value_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__timer_gettime50)
+(long long res, long long timerid_, void *value_) {
+ /* Nothing to do */
+}
+#if defined(NTP) || !defined(_KERNEL_OPT)
+PRE_SYSCALL(__ntp_gettime50)(void *ntvp_) { /* Nothing to do */ }
+POST_SYSCALL(__ntp_gettime50)(long long res, void *ntvp_) {
+ /* Nothing to do */
+}
+#else
+/* syscall 448 has been skipped */
+#endif
+PRE_SYSCALL(__wait450)
+(long long pid_, void *status_, long long options_, void *rusage_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__wait450)
+(long long res, long long pid_, void *status_, long long options_,
+ void *rusage_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__mknod50)(void *path_, long long mode_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__mknod50)
+(long long res, void *path_, long long mode_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fhstat50)(void *fhp_, long long fh_size_, void *sb_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhstat50)
+(long long res, void *fhp_, long long fh_size_, void *sb_) {
+ if (res == 0) {
+ if (fhp_) {
+ POST_READ(fhp_, fh_size_);
+ }
+ }
+}
+/* syscall 452 has been skipped */
+PRE_SYSCALL(pipe2)(void *fildes_, long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(pipe2)(long long res, void *fildes_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(dup3)(long long from_, long long to_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(dup3)
+(long long res, long long from_, long long to_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(kqueue1)(long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(kqueue1)(long long res, long long flags_) { /* Nothing to do */ }
+PRE_SYSCALL(paccept)
+(long long s_, void *name_, void *anamelen_, void *mask_, long long flags_) {
+ if (mask_) {
+ PRE_READ(mask_, sizeof(__sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(paccept)
+(long long res, long long s_, void *name_, void *anamelen_, void *mask_,
+ long long flags_) {
+ if (res >= 0) {
+ if (mask_) {
+ PRE_READ(mask_, sizeof(__sanitizer_sigset_t));
+ }
+ }
+}
+PRE_SYSCALL(linkat)
+(long long fd1_, void *name1_, long long fd2_, void *name2_, long long flags_) {
+ const char *name1 = (const char *)name1_;
+ const char *name2 = (const char *)name2_;
+ if (name1) {
+ PRE_READ(name1, __sanitizer::internal_strlen(name1) + 1);
+ }
+ if (name2) {
+ PRE_READ(name2, __sanitizer::internal_strlen(name2) + 1);
+ }
+}
+POST_SYSCALL(linkat)
+(long long res, long long fd1_, void *name1_, long long fd2_, void *name2_,
+ long long flags_) {
+ const char *name1 = (const char *)name1_;
+ const char *name2 = (const char *)name2_;
+ if (res == 0) {
+ if (name1) {
+ POST_READ(name1, __sanitizer::internal_strlen(name1) + 1);
+ }
+ if (name2) {
+ POST_READ(name2, __sanitizer::internal_strlen(name2) + 1);
+ }
+ }
+}
+PRE_SYSCALL(renameat)
+(long long fromfd_, void *from_, long long tofd_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(renameat)
+(long long res, long long fromfd_, void *from_, long long tofd_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (res == 0) {
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mkfifoat)(long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkfifoat)
+(long long res, long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mknodat)
+(long long fd_, void *path_, long long mode_, long long PAD_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mknodat)
+(long long res, long long fd_, void *path_, long long mode_, long long PAD_,
+ long long dev_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mkdirat)(long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkdirat)
+(long long res, long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(faccessat)
+(long long fd_, void *path_, long long amode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(faccessat)
+(long long res, long long fd_, void *path_, long long amode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchmodat)
+(long long fd_, void *path_, long long mode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fchmodat)
+(long long res, long long fd_, void *path_, long long mode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchownat)
+(long long fd_, void *path_, long long owner_, long long group_,
+ long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fchownat)
+(long long res, long long fd_, void *path_, long long owner_, long long group_,
+ long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fexecve)(long long fd_, void *argp_, void *envp_) { /* TODO */ }
+POST_SYSCALL(fexecve)(long long res, long long fd_, void *argp_, void *envp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fstatat)(long long fd_, void *path_, void *buf_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fstatat)
+(long long res, long long fd_, void *path_, void *buf_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(utimensat)
+(long long fd_, void *path_, void *tptr_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr_) {
+ PRE_READ(tptr_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(utimensat)
+(long long res, long long fd_, void *path_, void *tptr_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr_) {
+ POST_READ(tptr_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(openat)
+(long long fd_, void *path_, long long oflags_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(openat)
+(long long res, long long fd_, void *path_, long long oflags_,
+ long long mode_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(readlinkat)
+(long long fd_, void *path_, void *buf_, long long bufsize_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(readlinkat)
+(long long res, long long fd_, void *path_, void *buf_, long long bufsize_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(symlinkat)(void *path1_, long long fd_, void *path2_) {
+ const char *path1 = (const char *)path1_;
+ const char *path2 = (const char *)path2_;
+ if (path1) {
+ PRE_READ(path1, __sanitizer::internal_strlen(path1) + 1);
+ }
+ if (path2) {
+ PRE_READ(path2, __sanitizer::internal_strlen(path2) + 1);
+ }
+}
+POST_SYSCALL(symlinkat)
+(long long res, void *path1_, long long fd_, void *path2_) {
+ const char *path1 = (const char *)path1_;
+ const char *path2 = (const char *)path2_;
+ if (res == 0) {
+ if (path1) {
+ POST_READ(path1, __sanitizer::internal_strlen(path1) + 1);
+ }
+ if (path2) {
+ POST_READ(path2, __sanitizer::internal_strlen(path2) + 1);
+ }
+ }
+}
+PRE_SYSCALL(unlinkat)(long long fd_, void *path_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unlinkat)
+(long long res, long long fd_, void *path_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(futimens)(long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(futimens)(long long res, long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (res == 0) {
+ if (tptr) {
+ POST_READ(tptr[0], struct_timespec_sz);
+ POST_READ(tptr[1], struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(__quotactl)(void *path_, void *args_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__quotactl)(long long res, void *path_, void *args_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(posix_spawn)
+(void *pid_, void *path_, void *file_actions_, void *attrp_, void *argv_,
+ void *envp_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(posix_spawn)
+(long long res, void *pid_, void *path_, void *file_actions_, void *attrp_,
+ void *argv_, void *envp_) {
+ const char *path = (const char *)path_;
+ if (pid_) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(recvmmsg)
+(long long s_, void *mmsg_, long long vlen_, long long flags_, void *timeout_) {
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(recvmmsg)
+(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_,
+ void *timeout_) {
+ if (res >= 0) {
+ if (timeout_) {
+ POST_READ(timeout_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(sendmmsg)
+(long long s_, void *mmsg_, long long vlen_, long long flags_) {
+ struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;
+ if (mmsg) {
+ PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *
+ (vlen_ > 1024 ? 1024 : vlen_));
+ }
+}
+POST_SYSCALL(sendmmsg)
+(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_) {
+ struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;
+ if (res >= 0) {
+ if (mmsg) {
+ POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *
+ (vlen_ > 1024 ? 1024 : vlen_));
+ }
+ }
+}
+PRE_SYSCALL(clock_nanosleep)
+(long long clock_id_, long long flags_, void *rqtp_, void *rmtp_) {
+ if (rqtp_) {
+ PRE_READ(rqtp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(clock_nanosleep)
+(long long res, long long clock_id_, long long flags_, void *rqtp_,
+ void *rmtp_) {
+ if (rqtp_) {
+ POST_READ(rqtp_, struct_timespec_sz);
+ }
+}
+PRE_SYSCALL(___lwp_park60)
+(long long clock_id_, long long flags_, void *ts_, long long unpark_,
+ void *hint_, void *unparkhint_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(___lwp_park60)
+(long long res, long long clock_id_, long long flags_, void *ts_,
+ long long unpark_, void *hint_, void *unparkhint_) {
+ if (res == 0) {
+ if (ts_) {
+ POST_READ(ts_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(posix_fallocate)
+(long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(posix_fallocate)
+(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fdiscard)
+(long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fdiscard)
+(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(wait6)
+(long long idtype_, long long id_, void *status_, long long options_,
+ void *wru_, void *info_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(wait6)
+(long long res, long long idtype_, long long id_, void *status_,
+ long long options_, void *wru_, void *info_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(clock_getcpuclockid2)
+(long long idtype_, long long id_, void *clock_id_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(clock_getcpuclockid2)
+(long long res, long long idtype_, long long id_, void *clock_id_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__getvfsstat90)(void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getvfsstat90)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__statvfs190)(void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__statvfs190)
+(long long res, void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(__fstatvfs190)(long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__fstatvfs190)
+(long long res, long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__fhstatvfs190)
+(void *fhp_, long long fh_size_, void *buf_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhstatvfs190)
+(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
+#undef SYS_MAXSYSARGS
+} // extern "C"
+
+#undef PRE_SYSCALL
+#undef PRE_READ
+#undef PRE_WRITE
+#undef POST_SYSCALL
+#undef POST_READ
+#undef POST_WRITE
+
+#endif // SANITIZER_NETBSD
diff --git a/lib/tsan/sanitizer_common/sanitizer_termination.cpp b/lib/tsan/sanitizer_common/sanitizer_termination.cpp
new file mode 100644
index 0000000000..84be6fc323
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_termination.cpp
@@ -0,0 +1,94 @@
+//===-- sanitizer_termination.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains the Sanitizer termination functions CheckFailed and Die,
+/// and the callback functionalities associated with them.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+static const int kMaxNumOfInternalDieCallbacks = 5;
+static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
+
+bool AddDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == nullptr) {
+ InternalDieCallbacks[i] = callback;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool RemoveDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == callback) {
+ internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
+ sizeof(InternalDieCallbacks[0]) *
+ (kMaxNumOfInternalDieCallbacks - i - 1));
+ InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
+ return true;
+ }
+ }
+ return false;
+}
+
+static DieCallbackType UserDieCallback;
+void SetUserDieCallback(DieCallbackType callback) {
+ UserDieCallback = callback;
+}
+
+void NORETURN Die() {
+ if (UserDieCallback)
+ UserDieCallback();
+ for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
+ if (InternalDieCallbacks[i])
+ InternalDieCallbacks[i]();
+ }
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
+}
+
+static CheckFailedCallbackType CheckFailedCallback;
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {
+ CheckFailedCallback = callback;
+}
+
+const int kSecondsToSleepWhenRecursiveCheckFailed = 2;
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) > 10) {
+ SleepForSeconds(kSecondsToSleepWhenRecursiveCheckFailed);
+ Trap();
+ }
+
+ if (CheckFailedCallback) {
+ CheckFailedCallback(file, line, cond, v1, v2);
+ }
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
+ Die();
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+} // extern "C"
diff --git a/lib/tsan/sanitizer_common/sanitizer_thread_registry.cpp b/lib/tsan/sanitizer_common/sanitizer_thread_registry.cpp
new file mode 100644
index 0000000000..f2c6f27993
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_thread_registry.cpp
@@ -0,0 +1,351 @@
+//===-- sanitizer_thread_registry.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+
+ThreadContextBase::ThreadContextBase(u32 tid)
+ : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
+ status(ThreadStatusInvalid), detached(false),
+ thread_type(ThreadType::Regular), parent_tid(0), next(0) {
+ name[0] = '\0';
+ atomic_store(&thread_destroyed, 0, memory_order_release);
+}
+
+ThreadContextBase::~ThreadContextBase() {
+ // ThreadContextBase should never be deleted.
+ CHECK(0);
+}
+
+void ThreadContextBase::SetName(const char *new_name) {
+ name[0] = '\0';
+ if (new_name) {
+ internal_strncpy(name, new_name, sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+ }
+}
+
+void ThreadContextBase::SetDead() {
+ CHECK(status == ThreadStatusRunning ||
+ status == ThreadStatusFinished);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnDead();
+}
+
+void ThreadContextBase::SetDestroyed() {
+ atomic_store(&thread_destroyed, 1, memory_order_release);
+}
+
+bool ThreadContextBase::GetDestroyed() {
+ return !!atomic_load(&thread_destroyed, memory_order_acquire);
+}
+
+void ThreadContextBase::SetJoined(void *arg) {
+ // FIXME(dvyukov): print message and continue (it's user error).
+ CHECK_EQ(false, detached);
+ CHECK_EQ(ThreadStatusFinished, status);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnJoined(arg);
+}
+
+void ThreadContextBase::SetFinished() {
+ // ThreadRegistry::FinishThread calls here in ThreadStatusCreated state
+ // for a thread that never actually started. In that case the thread
+ // should go to ThreadStatusFinished regardless of whether it was created
+ // as detached.
+ if (!detached || status == ThreadStatusCreated) status = ThreadStatusFinished;
+ OnFinished();
+}
+
+void ThreadContextBase::SetStarted(tid_t _os_id, ThreadType _thread_type,
+ void *arg) {
+ status = ThreadStatusRunning;
+ os_id = _os_id;
+ thread_type = _thread_type;
+ OnStarted(arg);
+}
+
+void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
+ bool _detached, u32 _parent_tid, void *arg) {
+ status = ThreadStatusCreated;
+ user_id = _user_id;
+ unique_id = _unique_id;
+ detached = _detached;
+ // Parent tid makes no sense for the main thread.
+ if (tid != 0)
+ parent_tid = _parent_tid;
+ OnCreated(arg);
+}
+
+void ThreadContextBase::Reset() {
+ status = ThreadStatusInvalid;
+ SetName(0);
+ atomic_store(&thread_destroyed, 0, memory_order_release);
+ OnReset();
+}
+
+// ThreadRegistry implementation.
+
+const u32 ThreadRegistry::kUnknownTid = ~0U;
+
+ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size, u32 max_reuse)
+ : context_factory_(factory),
+ max_threads_(max_threads),
+ thread_quarantine_size_(thread_quarantine_size),
+ max_reuse_(max_reuse),
+ mtx_(),
+ n_contexts_(0),
+ total_threads_(0),
+ alive_threads_(0),
+ max_alive_threads_(0),
+ running_threads_(0) {
+ threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
+ "ThreadRegistry");
+ dead_threads_.clear();
+ invalid_threads_.clear();
+}
+
+void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
+ uptr *alive) {
+ BlockingMutexLock l(&mtx_);
+ if (total) *total = n_contexts_;
+ if (running) *running = running_threads_;
+ if (alive) *alive = alive_threads_;
+}
+
+uptr ThreadRegistry::GetMaxAliveThreads() {
+ BlockingMutexLock l(&mtx_);
+ return max_alive_threads_;
+}
+
+u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
+ void *arg) {
+ BlockingMutexLock l(&mtx_);
+ u32 tid = kUnknownTid;
+ ThreadContextBase *tctx = QuarantinePop();
+ if (tctx) {
+ tid = tctx->tid;
+ } else if (n_contexts_ < max_threads_) {
+ // Allocate new thread context and tid.
+ tid = n_contexts_++;
+ tctx = context_factory_(tid);
+ threads_[tid] = tctx;
+ } else {
+#if !SANITIZER_GO
+ Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
+ SanitizerToolName, max_threads_);
+#else
+ Printf("race: limit on %u simultaneously alive goroutines is exceeded,"
+ " dying\n", max_threads_);
+#endif
+ Die();
+ }
+ CHECK_NE(tctx, 0);
+ CHECK_NE(tid, kUnknownTid);
+ CHECK_LT(tid, max_threads_);
+ CHECK_EQ(tctx->status, ThreadStatusInvalid);
+ alive_threads_++;
+ if (max_alive_threads_ < alive_threads_) {
+ max_alive_threads_++;
+ CHECK_EQ(alive_threads_, max_alive_threads_);
+ }
+ tctx->SetCreated(user_id, total_threads_++, detached,
+ parent_tid, arg);
+ return tid;
+}
+
+void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
+ void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx == 0)
+ continue;
+ cb(tctx, arg);
+ }
+}
+
+u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx->tid;
+ }
+ return kUnknownTid;
+}
+
+ThreadContextBase *
+ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx;
+ }
+ return 0;
+}
+
+static bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx,
+ void *arg) {
+ return (tctx->os_id == (uptr)arg && tctx->status != ThreadStatusInvalid &&
+ tctx->status != ThreadStatusDead);
+}
+
+ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
+ return FindThreadContextLocked(FindThreadContextByOsIdCallback,
+ (void *)os_id);
+}
+
+void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
+ tctx->status);
+ tctx->SetName(name);
+}
+
+void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
+ BlockingMutexLock l(&mtx_);
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && tctx->user_id == user_id &&
+ tctx->status != ThreadStatusInvalid) {
+ tctx->SetName(name);
+ return;
+ }
+ }
+}
+
+void ThreadRegistry::DetachThread(u32 tid, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Detach of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ tctx->OnDetached(arg);
+ if (tctx->status == ThreadStatusFinished) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ } else {
+ tctx->detached = true;
+ }
+}
+
+void ThreadRegistry::JoinThread(u32 tid, void *arg) {
+ bool destroyed = false;
+ do {
+ {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Join of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ if ((destroyed = tctx->GetDestroyed())) {
+ tctx->SetJoined(arg);
+ QuarantinePush(tctx);
+ }
+ }
+ if (!destroyed)
+ internal_sched_yield();
+ } while (!destroyed);
+}
+
+// Normally this is called when the thread is about to exit. If
+// called in ThreadStatusCreated state, then this thread was never
+// really started. We just did CreateThread for a prospective new
+// thread before trying to create it, and then failed to actually
+// create it, and so never called StartThread.
+void ThreadRegistry::FinishThread(u32 tid) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_GT(alive_threads_, 0);
+ alive_threads_--;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ bool dead = tctx->detached;
+ if (tctx->status == ThreadStatusRunning) {
+ CHECK_GT(running_threads_, 0);
+ running_threads_--;
+ } else {
+ // The thread never really existed.
+ CHECK_EQ(tctx->status, ThreadStatusCreated);
+ dead = true;
+ }
+ tctx->SetFinished();
+ if (dead) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ }
+ tctx->SetDestroyed();
+}
+
+void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
+ void *arg) {
+ BlockingMutexLock l(&mtx_);
+ running_threads_++;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(ThreadStatusCreated, tctx->status);
+ tctx->SetStarted(os_id, thread_type, arg);
+}
+
+void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
+ if (tctx->tid == 0)
+ return; // Don't reuse the main thread. It's a special snowflake.
+ dead_threads_.push_back(tctx);
+ if (dead_threads_.size() <= thread_quarantine_size_)
+ return;
+ tctx = dead_threads_.front();
+ dead_threads_.pop_front();
+ CHECK_EQ(tctx->status, ThreadStatusDead);
+ tctx->Reset();
+ tctx->reuse_count++;
+ if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)
+ return;
+ invalid_threads_.push_back(tctx);
+}
+
+ThreadContextBase *ThreadRegistry::QuarantinePop() {
+ if (invalid_threads_.size() == 0)
+ return 0;
+ ThreadContextBase *tctx = invalid_threads_.front();
+ invalid_threads_.pop_front();
+ return tctx;
+}
+
+void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_NE(tctx->status, ThreadStatusInvalid);
+ CHECK_NE(tctx->status, ThreadStatusDead);
+ CHECK_EQ(tctx->user_id, 0);
+ tctx->user_id = user_id;
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_thread_registry.h b/lib/tsan/sanitizer_common/sanitizer_thread_registry.h
new file mode 100644
index 0000000000..493aa988f7
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_thread_registry.h
@@ -0,0 +1,160 @@
+//===-- sanitizer_thread_registry.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_REGISTRY_H
+#define SANITIZER_THREAD_REGISTRY_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+enum ThreadStatus {
+ ThreadStatusInvalid, // Non-existent thread, data is invalid.
+ ThreadStatusCreated, // Created but not yet running.
+ ThreadStatusRunning, // The thread is currently running.
+ ThreadStatusFinished, // Joinable thread is finished but not yet joined.
+ ThreadStatusDead // Joined, but some info is still available.
+};
+
+enum class ThreadType {
+ Regular, // Normal thread
+ Worker, // macOS Grand Central Dispatch (GCD) worker thread
+ Fiber, // Fiber
+};
+
+// Generic thread context. Specific sanitizer tools may inherit from it.
+// If thread is dead, context may optionally be reused for a new thread.
+class ThreadContextBase {
+ public:
+ explicit ThreadContextBase(u32 tid);
+ ~ThreadContextBase(); // Should never be called.
+
+ const u32 tid; // Thread ID. Main thread should have tid = 0.
+ u64 unique_id; // Unique thread ID.
+ u32 reuse_count; // Number of times this tid was reused.
+ tid_t os_id; // PID (used for reporting).
+ uptr user_id; // Some opaque user thread id (e.g. pthread_t).
+ char name[64]; // As annotated by user.
+
+ ThreadStatus status;
+ bool detached;
+ ThreadType thread_type;
+
+ u32 parent_tid;
+ ThreadContextBase *next; // For storing thread contexts in a list.
+
+ atomic_uint32_t thread_destroyed; // To address race of Joined vs Finished
+
+ void SetName(const char *new_name);
+
+ void SetDead();
+ void SetJoined(void *arg);
+ void SetFinished();
+ void SetStarted(tid_t _os_id, ThreadType _thread_type, void *arg);
+ void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
+ u32 _parent_tid, void *arg);
+ void Reset();
+
+ void SetDestroyed();
+ bool GetDestroyed();
+
+ // The following methods may be overriden by subclasses.
+ // Some of them take opaque arg that may be optionally be used
+ // by subclasses.
+ virtual void OnDead() {}
+ virtual void OnJoined(void *arg) {}
+ virtual void OnFinished() {}
+ virtual void OnStarted(void *arg) {}
+ virtual void OnCreated(void *arg) {}
+ virtual void OnReset() {}
+ virtual void OnDetached(void *arg) {}
+};
+
+typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
+
+class ThreadRegistry {
+ public:
+ static const u32 kUnknownTid;
+
+ ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size, u32 max_reuse = 0);
+ void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
+ uptr *alive = nullptr);
+ uptr GetMaxAliveThreads();
+
+ void Lock() { mtx_.Lock(); }
+ void CheckLocked() { mtx_.CheckLocked(); }
+ void Unlock() { mtx_.Unlock(); }
+
+ // Should be guarded by ThreadRegistryLock.
+ ThreadContextBase *GetThreadLocked(u32 tid) {
+ DCHECK_LT(tid, n_contexts_);
+ return threads_[tid];
+ }
+
+ u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
+
+ typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Invokes callback with a specified arg for each thread context.
+ // Should be guarded by ThreadRegistryLock.
+ void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);
+
+ typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Finds a thread using the provided callback. Returns kUnknownTid if no
+ // thread is found.
+ u32 FindThread(FindThreadCallback cb, void *arg);
+ // Should be guarded by ThreadRegistryLock. Return 0 if no thread
+ // is found.
+ ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,
+ void *arg);
+ ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id);
+
+ void SetThreadName(u32 tid, const char *name);
+ void SetThreadNameByUserId(uptr user_id, const char *name);
+ void DetachThread(u32 tid, void *arg);
+ void JoinThread(u32 tid, void *arg);
+ void FinishThread(u32 tid);
+ void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
+ void SetThreadUserId(u32 tid, uptr user_id);
+
+ private:
+ const ThreadContextFactory context_factory_;
+ const u32 max_threads_;
+ const u32 thread_quarantine_size_;
+ const u32 max_reuse_;
+
+ BlockingMutex mtx_;
+
+ u32 n_contexts_; // Number of created thread contexts,
+ // at most max_threads_.
+ u64 total_threads_; // Total number of created threads. May be greater than
+ // max_threads_ if contexts were reused.
+ uptr alive_threads_; // Created or running.
+ uptr max_alive_threads_;
+ uptr running_threads_;
+
+ ThreadContextBase **threads_; // Array of thread contexts is leaked.
+ IntrusiveList<ThreadContextBase> dead_threads_;
+ IntrusiveList<ThreadContextBase> invalid_threads_;
+
+ void QuarantinePush(ThreadContextBase *tctx);
+ ThreadContextBase *QuarantinePop();
+};
+
+typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_REGISTRY_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.cpp b/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.cpp
new file mode 100644
index 0000000000..9ca898a306
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -0,0 +1,154 @@
+//===-- sanitizer_tls_get_addr.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_tls_get_addr.h"
+
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_interceptors.h"
+
+namespace __sanitizer {
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+// The actual parameter that comes to __tls_get_addr
+// is a pointer to a struct with two words in it:
+struct TlsGetAddrParam {
+ uptr dso_id;
+ uptr offset;
+};
+
+// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
+// which has such header.
+struct Glibc_2_19_tls_header {
+ uptr size;
+ uptr start;
+};
+
+// This must be static TLS
+__attribute__((tls_model("initial-exec")))
+static __thread DTLS dtls;
+
+// Make sure we properly destroy the DTLS objects:
+// this counter should never get too large.
+static atomic_uintptr_t number_of_live_dtls;
+
+static const uptr kDestroyedThread = -1;
+
+static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
+ if (!size) return;
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+ atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
+}
+
+static inline void DTLS_Resize(uptr new_size) {
+ if (dtls.dtv_size >= new_size) return;
+ new_size = RoundUpToPowerOfTwo(new_size);
+ new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
+ DTLS::DTV *new_dtv =
+ (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+ uptr num_live_dtls =
+ atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
+ VReport(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ CHECK_LT(num_live_dtls, 1 << 20);
+ uptr old_dtv_size = dtls.dtv_size;
+ DTLS::DTV *old_dtv = dtls.dtv;
+ if (old_dtv_size)
+ internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
+ dtls.dtv = new_dtv;
+ dtls.dtv_size = new_size;
+ if (old_dtv_size)
+ DTLS_Deallocate(old_dtv, old_dtv_size);
+}
+
+void DTLS_Destroy() {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ uptr s = dtls.dtv_size;
+ dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
+ DTLS_Deallocate(dtls.dtv, s);
+}
+
+#if defined(__powerpc64__) || defined(__mips__)
+// This is glibc's TLS_DTV_OFFSET:
+// "Dynamic thread vector pointers point 0x8000 past the start of each
+// TLS block."
+static const uptr kDtvOffset = 0x8000;
+#else
+static const uptr kDtvOffset = 0;
+#endif
+
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
+ uptr static_tls_begin, uptr static_tls_end) {
+ if (!common_flags()->intercept_tls_get_addr) return 0;
+ TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
+ uptr dso_id = arg->dso_id;
+ if (dtls.dtv_size == kDestroyedThread) return 0;
+ DTLS_Resize(dso_id + 1);
+ if (dtls.dtv[dso_id].beg) return 0;
+ uptr tls_size = 0;
+ uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
+ VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ "num_live_dtls %zd\n",
+ arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ atomic_load(&number_of_live_dtls, memory_order_relaxed));
+ if (dtls.last_memalign_ptr == tls_beg) {
+ tls_size = dtls.last_memalign_size;
+ VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ tls_beg, tls_size);
+ } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
+ // This is the static TLS block which was initialized / unpoisoned at thread
+ // creation.
+ VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ tls_size = 0;
+ } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
+ // We may want to check gnu_get_libc_version().
+ Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
+ tls_size = header->size;
+ tls_beg = header->start;
+ VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ tls_beg, tls_size);
+ } else {
+ VReport(2, "__tls_get_addr: Can't guess glibc version\n");
+ // This may happen inside the DTOR of main thread, so just ignore it.
+ tls_size = 0;
+ }
+ dtls.dtv[dso_id].beg = tls_beg;
+ dtls.dtv[dso_id].size = tls_size;
+ return dtls.dtv + dso_id;
+}
+
+void DTLS_on_libc_memalign(void *ptr, uptr size) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
+ dtls.last_memalign_size = size;
+}
+
+DTLS *DTLS_Get() { return &dtls; }
+
+bool DTLSInDestruction(DTLS *dtls) {
+ return dtls->dtv_size == kDestroyedThread;
+}
+
+#else
+void DTLS_on_libc_memalign(void *ptr, uptr size) {}
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res,
+ unsigned long, unsigned long) { return 0; }
+DTLS *DTLS_Get() { return 0; }
+void DTLS_Destroy() {}
+bool DTLSInDestruction(DTLS *dtls) {
+ UNREACHABLE("dtls is unsupported on this platform!");
+}
+
+#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.h b/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.h
new file mode 100644
index 0000000000..c7cd5a8bff
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_tls_get_addr.h
@@ -0,0 +1,62 @@
+//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+// All this magic is specific to glibc and is required to workaround
+// the lack of interface that would tell us about the Dynamic TLS (DTLS).
+// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
+//
+// The matters get worse because the glibc implementation changed between
+// 2.18 and 2.19:
+// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
+//
+// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// which we intercept and thus know where is the DTLS.
+// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
+// which is an internal function that wraps a mmap call, neither of which
+// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
+// header which we can use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TLS_GET_ADDR_H
+#define SANITIZER_TLS_GET_ADDR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+struct DTLS {
+ // Array of DTLS chunks for the current Thread.
+ // If beg == 0, the chunk is unused.
+ struct DTV {
+ uptr beg, size;
+ };
+
+ uptr dtv_size;
+ DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+
+ // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cpp
+ uptr last_memalign_size;
+ uptr last_memalign_ptr;
+};
+
+// Returns pointer and size of a linker-allocated TLS block.
+// Each block is returned exactly once.
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
+ uptr static_tls_end);
+void DTLS_on_libc_memalign(void *ptr, uptr size);
+DTLS *DTLS_Get();
+void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+// Returns true if DTLS of suspended thread is in destruction process.
+bool DTLSInDestruction(DTLS *dtls);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_TLS_GET_ADDR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_type_traits.cpp b/lib/tsan/sanitizer_common/sanitizer_type_traits.cpp
new file mode 100644
index 0000000000..5ee37d7376
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_type_traits.cpp
@@ -0,0 +1,20 @@
+//===-- sanitizer_type_traits.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a subset of C++ type traits. This is so we can avoid depending
+// on system C++ headers.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+const bool true_type::value;
+const bool false_type::value;
+
+} // namespace __sanitizer
diff --git a/lib/tsan/sanitizer_common/sanitizer_type_traits.h b/lib/tsan/sanitizer_common/sanitizer_type_traits.h
new file mode 100644
index 0000000000..2a58d9874d
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_type_traits.h
@@ -0,0 +1,62 @@
+//===-- sanitizer_type_traits.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a subset of C++ type traits. This is so we can avoid depending
+// on system C++ headers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_TYPE_TRAITS_H
+#define SANITIZER_TYPE_TRAITS_H
+
+namespace __sanitizer {
+
+struct true_type {
+ static const bool value = true;
+};
+
+struct false_type {
+ static const bool value = false;
+};
+
+// is_same<T, U>
+//
+// Type trait to compare if types are the same.
+// E.g.
+//
+// ```
+// is_same<int,int>::value - True
+// is_same<int,char>::value - False
+// ```
+template <typename T, typename U>
+struct is_same : public false_type {};
+
+template <typename T>
+struct is_same<T, T> : public true_type {};
+
+// conditional<B, T, F>
+//
+// Defines type as T if B is true or as F otherwise.
+// E.g. the following is true
+//
+// ```
+// is_same<int, conditional<true, int, double>::type>::value
+// is_same<double, conditional<false, int, double>::type>::value
+// ```
+template <bool B, class T, class F>
+struct conditional {
+ using type = T;
+};
+
+template <class T, class F>
+struct conditional<false, T, F> {
+ using type = F;
+};
+
+} // namespace __sanitizer
+
+#endif
diff --git a/lib/tsan/sanitizer_common/sanitizer_vector.h b/lib/tsan/sanitizer_common/sanitizer_vector.h
new file mode 100644
index 0000000000..31216f3ec3
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_vector.h
@@ -0,0 +1,124 @@
+//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+
+// Low-fat STL-like vector container.
+
+#ifndef SANITIZER_VECTOR_H
+#define SANITIZER_VECTOR_H
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __sanitizer {
+
+template<typename T>
+class Vector {
+ public:
+ Vector() : begin_(), end_(), last_() {}
+
+ ~Vector() {
+ if (begin_)
+ InternalFree(begin_);
+ }
+
+ void Reset() {
+ if (begin_)
+ InternalFree(begin_);
+ begin_ = 0;
+ end_ = 0;
+ last_ = 0;
+ }
+
+ uptr Size() const {
+ return end_ - begin_;
+ }
+
+ T &operator[](uptr i) {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ const T &operator[](uptr i) const {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ T *PushBack() {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
+ }
+
+ void PopBack() {
+ DCHECK_GT(end_, begin_);
+ end_--;
+ }
+
+ void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
+ uptr old_size = Size();
+ if (size <= old_size) {
+ end_ = begin_ + size;
+ return;
+ }
+ EnsureSize(size);
+ if (old_size < size) {
+ for (uptr i = old_size; i < size; i++)
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
+ }
+ }
+
+ private:
+ T *begin_;
+ T *end_;
+ T *last_;
+
+ void EnsureSize(uptr size) {
+ if (size <= Size())
+ return;
+ if (size <= (uptr)(last_ - begin_)) {
+ end_ = begin_ + size;
+ return;
+ }
+ uptr cap0 = last_ - begin_;
+ uptr cap = cap0 * 5 / 4; // 25% growth
+ if (cap == 0)
+ cap = 16;
+ if (cap < size)
+ cap = size;
+ T *p = (T*)InternalAlloc(cap * sizeof(T));
+ if (cap0) {
+ internal_memcpy(p, begin_, cap0 * sizeof(T));
+ InternalFree(begin_);
+ }
+ begin_ = p;
+ end_ = begin_ + size;
+ last_ = begin_ + cap;
+ }
+
+ Vector(const Vector&);
+ void operator=(const Vector&);
+};
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_VECTOR_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_win.cpp b/lib/tsan/sanitizer_common/sanitizer_win.cpp
new file mode 100644
index 0000000000..fca15beb61
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_win.cpp
@@ -0,0 +1,1129 @@
+//===-- sanitizer_win.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements windows-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#define NOGDI
+#include <windows.h>
+#include <io.h>
+#include <psapi.h>
+#include <stdlib.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_win_defs.h"
+
+#if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
+#pragma comment(lib, "psapi")
+#endif
+#if SANITIZER_WIN_TRACE
+#include <traceloggingprovider.h>
+// Windows trace logging provider init
+#pragma comment(lib, "advapi32.lib")
+TRACELOGGING_DECLARE_PROVIDER(g_asan_provider);
+// GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
+TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
+ (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
+ 0x53, 0x0b, 0xd0, 0xf3, 0xfa));
+#else
+#define TraceLoggingUnregister(x)
+#endif
+
+// A macro to tell the compiler that this part of the code cannot be reached,
+// if the compiler supports this feature. Since we're using this in
+// code that is called when terminating the process, the expansion of the
+// macro should not terminate the process to avoid infinite recursion.
+#if defined(__clang__)
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define BUILTIN_UNREACHABLE() __assume(0)
+#else
+# define BUILTIN_UNREACHABLE()
+#endif
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+// --------------------- sanitizer_common.h
+uptr GetPageSize() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+}
+
+uptr GetMmapGranularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
+}
+
+uptr GetMaxUserVirtualAddress() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return (uptr)si.lpMaximumApplicationAddress;
+}
+
+uptr GetMaxVirtualAddress() {
+ return GetMaxUserVirtualAddress();
+}
+
+bool FileExists(const char *filename) {
+ return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
+}
+
+uptr internal_getpid() {
+ return GetProcessId(GetCurrentProcess());
+}
+
+int internal_dlinfo(void *handle, int request, void *p) {
+ UNIMPLEMENTED();
+}
+
+// In contrast to POSIX, on Windows GetCurrentThreadId()
+// returns a system-unique identifier.
+tid_t GetTid() {
+ return GetCurrentThreadId();
+}
+
+uptr GetThreadSelf() {
+ return GetTid();
+}
+
+#if !SANITIZER_GO
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
+ // FIXME: is it possible for the stack to not be a single allocation?
+ // Are these values what ASan expects to get (reserved, not committed;
+ // including stack guard page) ?
+ *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
+ *stack_bottom = (uptr)mbi.AllocationBase;
+}
+#endif // #if !SANITIZER_GO
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0)
+ ReportMmapFailureAndDie(size, mem_type, "allocate",
+ GetLastError(), raw_report);
+ return rv;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!size || !addr)
+ return;
+
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
+
+ // MEM_RELEASE can only be used to unmap whole regions previously mapped with
+ // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
+ // fails try MEM_DECOMMIT.
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+ if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
+ CHECK("unable to unmap" && 0);
+ }
+ }
+}
+
+static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
+ const char *mmap_type) {
+ error_t last_error = GetLastError();
+ if (last_error == ERROR_NOT_ENOUGH_MEMORY)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ return rv;
+}
+
+// We want to map a chunk of address space aligned to 'alignment'.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ // Windows will align our allocations to at least 64K.
+ alignment = Max(alignment, GetMmapGranularity());
+
+ uptr mapped_addr =
+ (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (!mapped_addr)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ // If we got it right on the first try, return. Otherwise, unmap it and go to
+ // the slow path.
+ if (IsAligned(mapped_addr, alignment))
+ return (void*)mapped_addr;
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // If we didn't get an aligned address, overallocate, find an aligned address,
+ // unmap, and try to allocate at that aligned address.
+ int retries = 0;
+ const int kMaxRetries = 10;
+ for (; retries < kMaxRetries &&
+ (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
+ retries++) {
+ // Overallocate size + alignment bytes.
+ mapped_addr =
+ (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
+ if (!mapped_addr)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ // Find the aligned address.
+ uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
+
+ // Free the overallocation.
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // Attempt to allocate exactly the number of bytes we need at the aligned
+ // address. This may fail for a number of reasons, in which case we continue
+ // the loop.
+ mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ }
+
+ // Fail if we can't make this work quickly.
+ if (retries == kMaxRetries && mapped_addr == 0)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ return (void *)mapped_addr;
+}
+
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+ // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
+ // but on Win64 it does.
+ (void)name; // unsupported
+#if !SANITIZER_GO && SANITIZER_WINDOWS64
+ // On asan/Windows64, use MEM_COMMIT would result in error
+ // 1455:ERROR_COMMITMENT_LIMIT.
+ // Asan uses exception handler to commit page on demand.
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
+#else
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
+ PAGE_READWRITE);
+#endif
+ if (p == 0) {
+ Report("ERROR: %s failed to "
+ "allocate %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return false;
+ }
+ return true;
+}
+
+bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {
+ // FIXME: Windows support large pages too. Might be worth checking
+ return MmapFixedNoReserve(fixed_addr, size, name);
+}
+
+// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
+// 'MmapFixedNoAccess'.
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
+ }
+ return p;
+}
+
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
+ return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
+}
+
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
+ const char *name) {
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ // Only unmap if it covers the entire range.
+ CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));
+ // We unmap the whole range, just null out the base.
+ base_ = nullptr;
+ size_ = 0;
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ }
+ return p;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ // FIXME: make this really NoReserve?
+ return MmapOrDie(size, mem_type);
+}
+
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
+ size_ = size;
+ name_ = name;
+ (void)os_handle_; // unsupported
+ return reinterpret_cast<uptr>(base_);
+}
+
+
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ (void)name; // unsupported
+ void *res = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return res;
+}
+
+void *MmapNoAccess(uptr size) {
+ void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes (error code: %d)\n",
+ SanitizerToolName, size, size, GetLastError());
+ return res;
+}
+
+bool MprotectNoAccess(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
+}
+
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+}
+
+void SetShadowRegionHugePageMode(uptr addr, uptr size) {
+ // FIXME: probably similar to ReleaseMemoryToOS.
+}
+
+bool DontDumpShadowMemory(uptr addr, uptr length) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+ return true;
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ uptr address = 0;
+ while (true) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return 0;
+
+ if (info.State == MEM_FREE) {
+ uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
+ alignment);
+ if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
+ return shadow_address;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ }
+ return 0;
+}
+
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
+ return mbi.Protect == PAGE_NOACCESS &&
+ (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ UNIMPLEMENTED();
+}
+
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ UNIMPLEMENTED();
+}
+
+static const int kMaxEnvNameLength = 128;
+static const DWORD kMaxEnvValueLength = 32767;
+
+namespace {
+
+struct EnvVariable {
+ char name[kMaxEnvNameLength];
+ char value[kMaxEnvValueLength];
+};
+
+} // namespace
+
+static const int kEnvVariables = 5;
+static EnvVariable env_vars[kEnvVariables];
+static int num_env_vars;
+
+const char *GetEnv(const char *name) {
+ // Note: this implementation caches the values of the environment variables
+ // and limits their quantity.
+ for (int i = 0; i < num_env_vars; i++) {
+ if (0 == internal_strcmp(name, env_vars[i].name))
+ return env_vars[i].value;
+ }
+ CHECK_LT(num_env_vars, kEnvVariables);
+ DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
+ kMaxEnvValueLength);
+ if (rv > 0 && rv < kMaxEnvValueLength) {
+ CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
+ internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
+ num_env_vars++;
+ return env_vars[num_env_vars - 1].value;
+ }
+ return 0;
+}
+
+const char *GetPwd() {
+ UNIMPLEMENTED();
+}
+
+u32 GetUid() {
+ UNIMPLEMENTED();
+}
+
+namespace {
+struct ModuleInfo {
+ const char *filepath;
+ uptr base_address;
+ uptr end_address;
+};
+
+#if !SANITIZER_GO
+int CompareModulesBase(const void *pl, const void *pr) {
+ const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;
+ if (l->base_address < r->base_address)
+ return -1;
+ return l->base_address > r->base_address;
+}
+#endif
+} // namespace
+
+#if !SANITIZER_GO
+void DumpProcessMap() {
+ Report("Dumping process modules:\n");
+ ListOfModules modules;
+ modules.init();
+ uptr num_modules = modules.size();
+
+ InternalMmapVector<ModuleInfo> module_infos(num_modules);
+ for (size_t i = 0; i < num_modules; ++i) {
+ module_infos[i].filepath = modules[i].full_name();
+ module_infos[i].base_address = modules[i].ranges().front()->beg;
+ module_infos[i].end_address = modules[i].ranges().back()->end;
+ }
+ qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
+ CompareModulesBase);
+
+ for (size_t i = 0; i < num_modules; ++i) {
+ const ModuleInfo &mi = module_infos[i];
+ if (mi.end_address != 0) {
+ Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
+ mi.filepath[0] ? mi.filepath : "[no name]");
+ } else if (mi.filepath[0]) {
+ Printf("\t??\?-??? %s\n", mi.filepath);
+ } else {
+ Printf("\t???\n");
+ }
+ }
+}
+#endif
+
+void PrintModuleMap() { }
+
+void DisableCoreDumperIfNecessary() {
+ // Do nothing.
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+
+bool StackSizeIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ UNIMPLEMENTED();
+}
+
+bool AddressSpaceIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetAddressSpaceUnlimited() {
+ UNIMPLEMENTED();
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '\\' || c == '/';
+}
+
+static bool IsAlpha(char c) {
+ c = ToLower(c);
+ return c >= 'a' && c <= 'z';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsAlpha(path[0]) && path[1] == ':' &&
+ IsPathSeparator(path[2]);
+}
+
+void SleepForSeconds(int seconds) {
+ Sleep(seconds * 1000);
+}
+
+void SleepForMillis(int millis) {
+ Sleep(millis);
+}
+
+u64 NanoTime() {
+ static LARGE_INTEGER frequency = {};
+ LARGE_INTEGER counter;
+ if (UNLIKELY(frequency.QuadPart == 0)) {
+ QueryPerformanceFrequency(&frequency);
+ CHECK_NE(frequency.QuadPart, 0);
+ }
+ QueryPerformanceCounter(&counter);
+ counter.QuadPart *= 1000ULL * 1000000ULL;
+ counter.QuadPart /= frequency.QuadPart;
+ return counter.QuadPart;
+}
+
+u64 MonotonicNanoTime() { return NanoTime(); }
+
+void Abort() {
+ internal__exit(3);
+}
+
+#if !SANITIZER_GO
+// Read the file to extract the ImageBase field from the PE header. If ASLR is
+// disabled and this virtual address is available, the loader will typically
+// load the image at this address. Therefore, we call it the preferred base. Any
+// addresses in the DWARF typically assume that the object has been loaded at
+// this address.
+static uptr GetPreferredBase(const char *modname) {
+ fd_t fd = OpenFile(modname, RdOnly, nullptr);
+ if (fd == kInvalidFd)
+ return 0;
+ FileCloser closer(fd);
+
+ // Read just the DOS header.
+ IMAGE_DOS_HEADER dos_header;
+ uptr bytes_read;
+ if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
+ bytes_read != sizeof(dos_header))
+ return 0;
+
+ // The file should start with the right signature.
+ if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
+ return 0;
+
+ // The layout at e_lfanew is:
+ // "PE\0\0"
+ // IMAGE_FILE_HEADER
+ // IMAGE_OPTIONAL_HEADER
+ // Seek to e_lfanew and read all that data.
+ char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
+ if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
+ INVALID_SET_FILE_POINTER)
+ return 0;
+ if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
+ bytes_read != sizeof(buf))
+ return 0;
+
+ // Check for "PE\0\0" before the PE header.
+ char *pe_sig = &buf[0];
+ if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
+ return 0;
+
+ // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
+ IMAGE_OPTIONAL_HEADER *pe_header =
+ (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
+
+ // Check for more magic in the PE header.
+ if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
+ return 0;
+
+ // Finally, return the ImageBase.
+ return (uptr)pe_header->ImageBase;
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ HANDLE cur_process = GetCurrentProcess();
+
+ // Query the list of modules. Start by assuming there are no more than 256
+ // modules and retry if that's not sufficient.
+ HMODULE *hmodules = 0;
+ uptr modules_buffer_size = sizeof(HMODULE) * 256;
+ DWORD bytes_required;
+ while (!hmodules) {
+ hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
+ CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
+ &bytes_required));
+ if (bytes_required > modules_buffer_size) {
+ // Either there turned out to be more than 256 hmodules, or new hmodules
+ // could have loaded since the last try. Retry.
+ UnmapOrDie(hmodules, modules_buffer_size);
+ hmodules = 0;
+ modules_buffer_size = bytes_required;
+ }
+ }
+
+ // |num_modules| is the number of modules actually present,
+ size_t num_modules = bytes_required / sizeof(HMODULE);
+ for (size_t i = 0; i < num_modules; ++i) {
+ HMODULE handle = hmodules[i];
+ MODULEINFO mi;
+ if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
+ continue;
+
+ // Get the UTF-16 path and convert to UTF-8.
+ wchar_t modname_utf16[kMaxPathLength];
+ int modname_utf16_len =
+ GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
+ if (modname_utf16_len == 0)
+ modname_utf16[0] = '\0';
+ char module_name[kMaxPathLength];
+ int module_name_len =
+ ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
+ &module_name[0], kMaxPathLength, NULL, NULL);
+ module_name[module_name_len] = '\0';
+
+ uptr base_address = (uptr)mi.lpBaseOfDll;
+ uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
+
+ // Adjust the base address of the module so that we get a VA instead of an
+ // RVA when computing the module offset. This helps llvm-symbolizer find the
+ // right DWARF CU. In the common case that the image is loaded at it's
+ // preferred address, we will now print normal virtual addresses.
+ uptr preferred_base = GetPreferredBase(&module_name[0]);
+ uptr adjusted_base = base_address - preferred_base;
+
+ LoadedModule cur_module;
+ cur_module.set(module_name, adjusted_base);
+ // We add the whole module as one single address range.
+ cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
+ /*writable*/ true);
+ modules_.push_back(cur_module);
+ }
+ UnmapOrDie(hmodules, modules_buffer_size);
+}
+
+void ListOfModules::fallbackInit() { clear(); }
+
+// We can't use atexit() directly at __asan_init time as the CRT is not fully
+// initialized at this point. Place the functions into a vector and use
+// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
+InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
+
+int Atexit(void (*function)(void)) {
+ atexit_functions.push_back(function);
+ return 0;
+}
+
+static int RunAtexit() {
+ TraceLoggingUnregister(g_asan_provider);
+ int ret = 0;
+ for (uptr i = 0; i < atexit_functions.size(); ++i) {
+ ret |= atexit(atexit_functions[i]);
+ }
+ return ret;
+}
+
+#pragma section(".CRT$XID", long, read)
+__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
+#endif
+
+// ------------------ sanitizer_libc.h
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
+ // FIXME: Use the wide variants to handle Unicode filenames.
+ fd_t res;
+ if (mode == RdOnly) {
+ res = CreateFileA(filename, GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else if (mode == WrOnly) {
+ res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else {
+ UNIMPLEMENTED();
+ }
+ CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
+ CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
+ if (res == kInvalidFd && last_error)
+ *last_error = GetLastError();
+ return res;
+}
+
+void CloseFile(fd_t fd) {
+ CloseHandle(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
+
+ // bytes_read can't be passed directly to ReadFile:
+ // uptr is unsigned long long on 64-bit Windows.
+ unsigned long num_read_long;
+
+ bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
+ if (!success && error_p)
+ *error_p = GetLastError();
+ if (bytes_read)
+ *bytes_read = num_read_long;
+ return success;
+}
+
+bool SupportsColoredOutput(fd_t fd) {
+ // FIXME: support colored output.
+ return false;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
+
+ // Handle null optional parameters.
+ error_t dummy_error;
+ error_p = error_p ? error_p : &dummy_error;
+ uptr dummy_bytes_written;
+ bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
+
+ // Initialize output parameters in case we fail.
+ *error_p = 0;
+ *bytes_written = 0;
+
+ // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
+ // closed, in which case this will fail.
+ if (fd == kStdoutFd || fd == kStderrFd) {
+ fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
+ if (fd == 0) {
+ *error_p = ERROR_INVALID_HANDLE;
+ return false;
+ }
+ }
+
+ DWORD bytes_written_32;
+ if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
+ *error_p = GetLastError();
+ return false;
+ } else {
+ *bytes_written = bytes_written_32;
+ return true;
+ }
+}
+
+uptr internal_sched_yield() {
+ Sleep(0);
+ return 0;
+}
+
+void internal__exit(int exitcode) {
+ TraceLoggingUnregister(g_asan_provider);
+ // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
+ // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
+ // so add our own breakpoint here.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ TerminateProcess(GetCurrentProcess(), exitcode);
+ BUILTIN_UNREACHABLE();
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ UNIMPLEMENTED();
+}
+
+uptr GetRSS() {
+ PROCESS_MEMORY_COUNTERS counters;
+ if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))
+ return 0;
+ return counters.WorkingSetSize;
+}
+
+void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) { }
+
+// ---------------------- BlockingMutex ---------------- {{{1
+
+BlockingMutex::BlockingMutex() {
+ CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
+ CHECK_EQ(owner_, 0);
+ owner_ = GetThreadSelf();
+}
+
+void BlockingMutex::Unlock() {
+ CheckLocked();
+ owner_ = 0;
+ ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ(owner_, GetThreadSelf());
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#if SANITIZER_GO
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#else
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ if (!WriteToFile(fd, buffer, length)) {
+ // stderr may be closed, but we may be able to print to the debugger
+ // instead. This is the case when launching a program from Visual Studio,
+ // and the following routine should write to its console.
+ OutputDebugStringA(buffer);
+ }
+}
+
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ (void)handler;
+ // FIXME: Decide what to do on Windows.
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ // FIXME: Decide what to do on Windows.
+ return kHandleSignalNo;
+}
+
+// Check based on flags if we should handle this exception.
+bool IsHandledDeadlyException(DWORD exceptionCode) {
+ switch (exceptionCode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_STACK_OVERFLOW:
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ case EXCEPTION_BREAKPOINT:
+ return common_flags()->handle_sigill;
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_STACK_CHECK:
+ case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ return common_flags()->handle_sigfpe;
+ }
+ return false;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ SYSTEM_INFO si;
+ GetNativeSystemInfo(&si);
+ uptr page_size = si.dwPageSize;
+ uptr page_mask = ~(page_size - 1);
+
+ for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
+ page <= end;) {
+ MEMORY_BASIC_INFORMATION info;
+ if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
+ return false;
+
+ if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
+ info.Protect == PAGE_EXECUTE)
+ return false;
+
+ if (info.RegionSize == 0)
+ return false;
+
+ page += info.RegionSize;
+ }
+
+ return true;
+}
+
+bool SignalContext::IsStackOverflow() const {
+ return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
+}
+
+void SignalContext::InitPcSpBp() {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ CONTEXT *context_record = (CONTEXT *)context;
+
+ pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ bp = (uptr)context_record->Rbp;
+ sp = (uptr)context_record->Rsp;
+#else
+ bp = (uptr)context_record->Ebp;
+ sp = (uptr)context_record->Esp;
+#endif
+}
+
+uptr SignalContext::GetAddress() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ return exception_record->ExceptionInformation[1];
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ return GetWriteFlag() != SignalContext::UNKNOWN;
+}
+
+bool SignalContext::IsTrueFaultingAddress() const {
+ // FIXME: Provide real implementation for this. See Linux and Mac variants.
+ return IsMemoryAccess();
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ // The contents of this array are documented at
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
+ // The first element indicates read as 0, write as 1, or execute as 8. The
+ // second element is the faulting address.
+ switch (exception_record->ExceptionInformation[0]) {
+ case 0:
+ return SignalContext::READ;
+ case 1:
+ return SignalContext::WRITE;
+ case 8:
+ return SignalContext::UNKNOWN;
+ }
+ return SignalContext::UNKNOWN;
+}
+
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
+}
+
+const char *SignalContext::Describe() const {
+ unsigned code = GetType();
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ return "array-bounds-exceeded";
+ case EXCEPTION_STACK_OVERFLOW:
+ return "stack-overflow";
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ return "datatype-misalignment";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
+ case EXCEPTION_PRIV_INSTRUCTION:
+ return "priv-instruction";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ return "flt-denormal-operand";
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ return "flt-divide-by-zero";
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ return "flt-inexact-result";
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ return "flt-invalid-operation";
+ case EXCEPTION_FLT_OVERFLOW:
+ return "flt-overflow";
+ case EXCEPTION_FLT_STACK_CHECK:
+ return "flt-stack-check";
+ case EXCEPTION_FLT_UNDERFLOW:
+ return "flt-underflow";
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ return "int-divide-by-zero";
+ case EXCEPTION_INT_OVERFLOW:
+ return "int-overflow";
+ }
+ return "unknown exception";
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ // FIXME: Actually implement this function.
+ CHECK_GT(buf_len, 0);
+ buf[0] = 0;
+ return 0;
+}
+
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+void CheckVMASize() {
+ // Do nothing.
+}
+
+void InitializePlatformEarly() {
+ // Do nothing.
+}
+
+void MaybeReexec() {
+ // No need to re-exec on Windows.
+}
+
+void CheckASLR() {
+ // Do nothing
+}
+
+void CheckMPROTECT() {
+ // Do nothing
+}
+
+char **GetArgv() {
+ // FIXME: Actually implement this function.
+ return 0;
+}
+
+char **GetEnviron() {
+ // FIXME: Actually implement this function.
+ return 0;
+}
+
+pid_t StartSubprocess(const char *program, const char *const argv[],
+ const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,
+ fd_t stderr_fd) {
+ // FIXME: implement on this platform
+ // Should be implemented based on
+ // SymbolizerProcess::StarAtSymbolizerSubprocess
+ // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp.
+ return -1;
+}
+
+bool IsProcessRunning(pid_t pid) {
+ // FIXME: implement on this platform.
+ return false;
+}
+
+int WaitForProcess(pid_t pid) { return -1; }
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
+// FIXME: implement on this platform.
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ UNIMPLEMENTED();
+}
+
+u32 GetNumberOfCPUs() {
+ SYSTEM_INFO sysinfo = {};
+ GetNativeSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors;
+}
+
+#if SANITIZER_WIN_TRACE
+// TODO(mcgov): Rename this project-wide to PlatformLogInit
+void AndroidLogInit(void) {
+ HRESULT hr = TraceLoggingRegister(g_asan_provider);
+ if (!SUCCEEDED(hr))
+ return;
+}
+
+void SetAbortMessage(const char *) {}
+
+void LogFullErrorReport(const char *buffer) {
+ if (common_flags()->log_to_syslog) {
+ InternalMmapVector<wchar_t> filename;
+ DWORD filename_length = 0;
+ do {
+ filename.resize(filename.size() + 0x100);
+ filename_length =
+ GetModuleFileNameW(NULL, filename.begin(), filename.size());
+ } while (filename_length >= filename.size());
+ TraceLoggingWrite(g_asan_provider, "AsanReportEvent",
+ TraceLoggingValue(filename.begin(), "ExecutableName"),
+ TraceLoggingValue(buffer, "AsanReportContents"));
+ }
+}
+#endif // SANITIZER_WIN_TRACE
+
+} // namespace __sanitizer
+
+#endif // _WIN32
diff --git a/lib/tsan/sanitizer_common/sanitizer_win.h b/lib/tsan/sanitizer_common/sanitizer_win.h
new file mode 100644
index 0000000000..ff8939ca5e
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_win.h
@@ -0,0 +1,25 @@
+//===-- sanitizer_win.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Windows-specific declarations.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_H
+#define SANITIZER_WIN_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+// Check based on flags if we should handle the exception.
+bool IsHandledDeadlyException(DWORD exceptionCode);
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_win_defs.h b/lib/tsan/sanitizer_common/sanitizer_win_defs.h
new file mode 100644
index 0000000000..bfe38a3323
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_win_defs.h
@@ -0,0 +1,174 @@
+//===-- sanitizer_win_defs.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions for Windows-specific code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DEFS_H
+#define SANITIZER_WIN_DEFS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#ifndef WINAPI
+#if defined(_M_IX86) || defined(__i386__)
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+#endif
+
+#if defined(_M_IX86) || defined(__i386__)
+#define WIN_SYM_PREFIX "_"
+#else
+#define WIN_SYM_PREFIX
+#endif
+
+// For MinGW, the /export: directives contain undecorated symbols, contrary to
+// link/lld-link. The GNU linker doesn't support /alternatename and /include
+// though, thus lld-link in MinGW mode interprets them in the same way as
+// in the default mode.
+#ifdef __MINGW32__
+#define WIN_EXPORT_PREFIX
+#else
+#define WIN_EXPORT_PREFIX WIN_SYM_PREFIX
+#endif
+
+// Intermediate macro to ensure the parameter is expanded before stringified.
+#define STRINGIFY_(A) #A
+#define STRINGIFY(A) STRINGIFY_(A)
+
+#if !SANITIZER_GO
+
+// ----------------- A workaround for the absence of weak symbols --------------
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value.
+// Take into account that this is a pragma directive for the linker, so it will
+// be ignored by the compiler and the function will be marked as UNDEF in the
+// symbol table of the resulting object file. The linker won't find the default
+// implementation until it links with that object file.
+// So, suppose we provide a default implementation "fundef" for "fun", and this
+// is compiled into the object file "test.obj" including the pragma directive.
+// If we have some code with references to "fun" and we link that code with
+// "test.obj", it will work because the linker always link object files.
+// But, if "test.obj" is included in a static library, like "test.lib", then the
+// liker will only link to "test.obj" if necessary. If we only included the
+// definition of "fun", it won't link to "test.obj" (from test.lib) because
+// "fun" appears as UNDEF, so it doesn't resolve the symbol "fun", and will
+// result in a link error (the linker doesn't find the pragma directive).
+// So, a workaround is to force linkage with the modules that include weak
+// definitions, with the following macro: WIN_FORCE_LINK()
+
+#define WIN_WEAK_ALIAS(Name, Default) \
+ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY(Name) "="\
+ WIN_SYM_PREFIX STRINGIFY(Default)))
+
+#define WIN_FORCE_LINK(Name) \
+ __pragma(comment(linker, "/include:" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+#define WIN_EXPORT(ExportedName, Name) \
+ __pragma(comment(linker, "/export:" WIN_EXPORT_PREFIX STRINGIFY(ExportedName)\
+ "=" WIN_EXPORT_PREFIX STRINGIFY(Name)))
+
+// We cannot define weak functions on Windows, but we can use WIN_WEAK_ALIAS()
+// which defines an alias to a default implementation, and only works when
+// linking statically.
+// So, to define a weak function "fun", we define a default implementation with
+// a different name "fun__def" and we create a "weak alias" fun = fun__def.
+// Then, users can override it just defining "fun".
+// We impose "extern "C"" because otherwise WIN_WEAK_ALIAS() will fail because
+// of name mangling.
+
+// Dummy name for default implementation of weak function.
+# define WEAK_DEFAULT_NAME(Name) Name##__def
+// Name for exported implementation of weak function.
+# define WEAK_EXPORT_NAME(Name) Name##__dll
+
+// Use this macro when you need to define and export a weak function from a
+// library. For example:
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_ALIAS(Name, WEAK_DEFAULT_NAME(Name)) \
+ WIN_EXPORT(WEAK_EXPORT_NAME(Name), Name) \
+ extern "C" ReturnType Name(__VA_ARGS__); \
+ extern "C" ReturnType WEAK_DEFAULT_NAME(Name)(__VA_ARGS__)
+
+// Use this macro when you need to import a weak function from a library. It
+// defines a weak alias to the imported function from the dll. For example:
+// WIN_WEAK_IMPORT_DEF(compare)
+# define WIN_WEAK_IMPORT_DEF(Name) \
+ WIN_WEAK_ALIAS(Name, WEAK_EXPORT_NAME(Name))
+
+// So, for Windows we provide something similar to weak symbols in Linux, with
+// some differences:
+// + A default implementation must always be provided.
+//
+// + When linking statically it works quite similarly. For example:
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// And it will work fine. If we don't override the function, we need to ensure
+// that the linker includes the object file with the default implementation.
+// We can do so with the linker option "-wholearchive:".
+//
+// + When linking dynamically with a library (dll), weak functions are exported
+// with "__dll" suffix. Clients can use the macro WIN_WEAK_IMPORT_DEF(fun)
+// which defines a "weak alias" fun = fun__dll.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// But if we override the function, the dlls don't have access to it (which
+// is different in linux). If that is desired, the strong definition must be
+// exported and interception can be used from the rest of the dlls.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+// // When initialized, check if the main executable defined "compare".
+// int libExample_init() {
+// uptr fnptr = __interception::InternalGetProcAddress(
+// (void *)GetModuleHandleA(0), "compare");
+// if (fnptr && !__interception::OverrideFunction((uptr)compare, fnptr, 0))
+// abort();
+// return 0;
+// }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We override and export compare:
+// extern "C" __declspec(dllexport) bool compare (int a, int b) {
+// return a >= b;
+// }
+//
+
+#else // SANITIZER_GO
+
+// Go neither needs nor wants weak references.
+// The shenanigans above don't work for gcc.
+# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...) \
+ extern "C" ReturnType Name(__VA_ARGS__)
+
+#endif // SANITIZER_GO
+
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_DEFS_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.h b/lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.h
new file mode 100644
index 0000000000..48c73c4c98
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_win_dll_thunk.h
@@ -0,0 +1,181 @@
+//===-- sanitizer_win_dll_thunk.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls to the shared runtime
+// that lives in the main executable. It should be included to dll_thunks that
+// will be linked to the dlls, when the sanitizer is a static library included
+// in the main executable.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DLL_THUNK_H
+#define SANITIZER_WIN_DLL_THUNK_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name);
+
+int dllThunkIntercept(const char* main_function, uptr dll_function);
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function);
+}
+
+extern "C" int __dll_thunk_init();
+
+// ----------------- Function interception helper macros -------------------- //
+// Override dll_function with main_function from main executable.
+#define INTERCEPT_OR_DIE(main_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkIntercept(main_function, (__sanitizer::uptr) \
+ dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// Try to override dll_function with main_function from main executable.
+// If main_function is not present, override dll_function with default_function.
+#define INTERCEPT_WHEN_POSSIBLE(main_function, default_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkInterceptWhenPossible(main_function, \
+ default_function, (__sanitizer::uptr)dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// -------------------- Function interception macros ------------------------ //
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERCEPT_SANITIZER_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name)
+
+// Special case of hooks -- Weak functions, could be redefined in the main
+// executable, but that is not necessary, so we shouldn't die if we can not find
+// a reference. Instead, when the function is not present in the main executable
+// we consider the default impl provided by asan library.
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, STRINGIFY(WEAK_EXPORT_NAME(name)), name)
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __dll_thunk_init, but that would lead
+// to false negatives in the startup code before user's global initializers,
+// which isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+
+// Use these macros for functions that could be called before __dll_thunk_init()
+// is executed and don't lead to errors if defined (free, malloc, etc).
+#define INTERCEPT_WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#endif // SANITIZER_WIN_DLL_THUNK_H
diff --git a/lib/tsan/sanitizer_common/sanitizer_win_weak_interception.h b/lib/tsan/sanitizer_common/sanitizer_win_weak_interception.h
new file mode 100644
index 0000000000..5e4d8b8def
--- /dev/null
+++ b/lib/tsan/sanitizer_common/sanitizer_win_weak_interception.h
@@ -0,0 +1,32 @@
+//===-- sanitizer_win_weak_interception.h ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls of weak functions to the
+// implementation in the main executable when a strong definition is present.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_WEAK_INTERCEPTION_H
+#define SANITIZER_WIN_WEAK_INTERCEPTION_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+int interceptWhenPossible(uptr dll_function, const char *real_function);
+}
+
+// ----------------- Function interception helper macros -------------------- //
+// Weak functions, could be redefined in the main executable, but that is not
+// necessary, so we shouldn't die if we can not find a reference.
+#define INTERCEPT_WEAK(Name) interceptWhenPossible((uptr) Name, #Name);
+
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) \
+ static int intercept_##Name() { \
+ return __sanitizer::interceptWhenPossible((__sanitizer::uptr) Name, #Name);\
+ } \
+ __pragma(section(".WEAK$M", long, read)) \
+ __declspec(allocate(".WEAK$M")) int (*__weak_intercept_##Name)() = \
+ intercept_##Name;
+
+#endif // SANITIZER_WIN_WEAK_INTERCEPTION_H
diff --git a/lib/tsan/tsan_clock.cpp b/lib/tsan/tsan_clock.cpp
new file mode 100644
index 0000000000..c91b29cb22
--- /dev/null
+++ b/lib/tsan/tsan_clock.cpp
@@ -0,0 +1,655 @@
+//===-- tsan_clock.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_clock.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const {
+// for (int i = 0; i < kMaxThreads; i++) {
+// tmp = clock[i];
+// clock[i] = max(clock[i], sc->clock[i]);
+// sc->clock[i] = tmp;
+// }
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
+// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
+// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cpp.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly for dirty elements).
+// dirty_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#if !SANITIZER_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
+
+namespace __tsan {
+
+static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
+ return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
+}
+
+// Drop reference to the first level block idx.
+static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ atomic_uint32_t *ref = ref_ptr(cb);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ for (;;) {
+ CHECK_GT(v, 0);
+ if (v == 1)
+ break;
+ if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
+ return;
+ }
+ // First level block owns second level blocks, so them as well.
+ for (uptr i = 0; i < blocks; i++)
+ ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
+ ctx->clock_alloc.Free(c, idx);
+}
+
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) // 0 has special meaning
+ , last_acquire_()
+ , global_acquire_()
+ , cached_idx_()
+ , cached_size_()
+ , cached_blocks_() {
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ internal_memset(clk_, 0, sizeof(clk_));
+}
+
+void ThreadClock::ResetCached(ClockCache *c) {
+ if (cached_idx_) {
+ UnrefClockBlock(c, cached_idx_, cached_blocks_);
+ cached_idx_ = 0;
+ cached_size_ = 0;
+ cached_blocks_ = 0;
+ }
+}
+
+void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(src->size_, kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
+
+ // Check if it's empty -> no need to do anything.
+ const uptr nclk = src->size_;
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
+ return;
+ }
+
+ bool acquired = false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty dirty = src->dirty_[i];
+ unsigned tid = dirty.tid;
+ if (tid != kInvalidTid) {
+ if (clk_[tid] < dirty.epoch) {
+ clk_[tid] = dirty.epoch;
+ acquired = true;
+ }
+ }
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
+ nclk_ = max(nclk_, nclk);
+ u64 *dst_pos = &clk_[0];
+ for (ClockElem &src_elem : *src) {
+ u64 epoch = src_elem.epoch;
+ if (*dst_pos < epoch) {
+ *dst_pos = epoch;
+ acquired = true;
+ }
+ dst_pos++;
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+ }
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+ }
+}
+
+void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(sc->size_, kMaxTid);
+
+ if (sc->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, sc);
+ return;
+ }
+
+ nclk_ = max(nclk_, (uptr) sc->size_);
+
+ // Check if we need to resize sc.
+ if (sc->size_ < nclk_)
+ sc->Resize(c, nclk_);
+
+ bool acquired = false;
+
+ sc->Unshare(c);
+ // Update sc->clk_.
+ sc->FlushDirty();
+ uptr i = 0;
+ for (ClockElem &ce : *sc) {
+ u64 tmp = clk_[i];
+ if (clk_[i] < ce.epoch) {
+ clk_[i] = ce.epoch;
+ acquired = true;
+ }
+ ce.epoch = tmp;
+ ce.reused = 0;
+ i++;
+ }
+ sc->release_store_tid_ = kInvalidTid;
+ sc->release_store_reused_ = 0;
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+ }
+}
+
+void ThreadClock::release(ClockCache *c, SyncClock *dst) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+
+ if (dst->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->elem(tid_).
+ if (!HasAcquiredAfterRelease(dst)) {
+ UpdateCurrentThread(c, dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
+
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ dst->Unshare(c);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
+ dst->FlushDirty();
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = max(ce.epoch, clk_[i]);
+ ce.reused = 0;
+ i++;
+ }
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->size_)
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->elem(tid_).reused = reused_;
+}
+
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+ CPP_STAT_INC(StatClockStore);
+
+ if (dst->size_ == 0 && cached_idx_ != 0) {
+ // Reuse the cached clock.
+ // Note: we could reuse/cache the cached clock in more cases:
+ // we could update the existing clock and cache it, or replace it with the
+ // currently cached clock and release the old one. And for a shared
+ // existing clock, we could replace it with the currently cached;
+ // or unshare, update and cache. But, for simplicity, we currnetly reuse
+ // cached clock only when the target clock is empty.
+ dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
+ dst->tab_idx_ = cached_idx_;
+ dst->size_ = cached_size_;
+ dst->blocks_ = cached_blocks_;
+ CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+ // The cached clock is shared (immutable),
+ // so this is where we store the current clock.
+ dst->dirty_[0].tid = tid_;
+ dst->dirty_[0].epoch = clk_[tid_];
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+ // Grab a reference.
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ return;
+ }
+
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ !HasAcquiredAfterRelease(dst)) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(c, dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ dst->Unshare(c);
+ // Note: dst can be larger than this ThreadClock.
+ // This is fine since clk_ beyond size is all zeros.
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = clk_[i];
+ ce.reused = 0;
+ i++;
+ }
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dst->dirty_[i].tid = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+
+ // If the resulting clock is cachable, cache it for future release operations.
+ // The clock is always cachable if we released to an empty sync object.
+ if (cached_idx_ == 0 && dst->Cachable()) {
+ // Grab a reference to the ClockBlock.
+ atomic_uint32_t *ref = ref_ptr(dst->tab_);
+ if (atomic_load(ref, memory_order_acquire) == 1)
+ atomic_store_relaxed(ref, 2);
+ else
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ cached_idx_ = dst->tab_idx_;
+ cached_size_ = dst->size_;
+ cached_blocks_ = dst->blocks_;
+ }
+}
+
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty *dirty = &dst->dirty_[i];
+ const unsigned tid = dirty->tid;
+ if (tid == tid_ || tid == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast);
+ dirty->tid = tid_;
+ dirty->epoch = clk_[tid_];
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ // We are going to touch dst elements, so we need to unshare it.
+ dst->Unshare(c);
+ CPP_STAT_INC(StatClockReleaseSlow);
+ dst->elem(tid_).epoch = clk_[tid_];
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ dst->FlushDirty();
+}
+
+// Checks whether the current thread has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->elem(tid_).reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty dirty = src->dirty_[i];
+ if (dirty.tid != kInvalidTid) {
+ if (clk_[dirty.tid] < dirty.epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+// Checks whether the current thread has acquired anything
+// from other clocks after releasing to dst (directly or indirectly).
+bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const {
+ const u64 my_epoch = dst->elem(tid_).epoch;
+ return my_epoch <= last_acquire_ ||
+ my_epoch <= atomic_load_relaxed(&global_acquire_);
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid]);
+ clk_[tid] = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
+ printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
+}
+
+SyncClock::SyncClock() {
+ ResetImpl();
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_)
+ UnrefClockBlock(c, tab_idx_, blocks_);
+ ResetImpl();
+}
+
+void SyncClock::ResetImpl() {
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ blocks_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_[i].tid = kInvalidTid;
+}
+
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ Unshare(c);
+ if (nclk <= capacity()) {
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (size_ == 0) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ atomic_store_relaxed(ref_ptr(tab_), 1);
+ size_ = 1;
+ } else if (size_ > blocks_ * ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
+ uptr top = size_ - blocks_ * ClockBlock::kClockCount;
+ CHECK_LT(top, ClockBlock::kClockCount);
+ const uptr move = top * sizeof(tab_->clock[0]);
+ internal_memcpy(&new_cb->clock[0], tab_->clock, move);
+ internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
+ internal_memset(tab_->clock, 0, move);
+ append_block(idx);
+ }
+ // At this point we have first level table allocated and all clock elements
+ // are evacuated from it to a second level block.
+ // Add second level tables as necessary.
+ while (nclk > capacity()) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ append_block(idx);
+ }
+ size_ = nclk;
+}
+
+// Flushes all dirty elements into the main clock array.
+void SyncClock::FlushDirty() {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty *dirty = &dirty_[i];
+ if (dirty->tid != kInvalidTid) {
+ CHECK_LT(dirty->tid, size_);
+ elem(dirty->tid).epoch = dirty->epoch;
+ dirty->tid = kInvalidTid;
+ }
+ }
+}
+
+bool SyncClock::IsShared() const {
+ if (size_ == 0)
+ return false;
+ atomic_uint32_t *ref = ref_ptr(tab_);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ CHECK_GT(v, 0);
+ return v > 1;
+}
+
+// Unshares the current clock if it's shared.
+// Shared clocks are immutable, so they need to be unshared before any updates.
+// Note: this does not apply to dirty entries as they are not shared.
+void SyncClock::Unshare(ClockCache *c) {
+ if (!IsShared())
+ return;
+ // First, copy current state into old.
+ SyncClock old;
+ old.tab_ = tab_;
+ old.tab_idx_ = tab_idx_;
+ old.size_ = size_;
+ old.blocks_ = blocks_;
+ old.release_store_tid_ = release_store_tid_;
+ old.release_store_reused_ = release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ old.dirty_[i] = dirty_[i];
+ // Then, clear current object.
+ ResetImpl();
+ // Allocate brand new clock in the current object.
+ Resize(c, old.size_);
+ // Now copy state back into this object.
+ Iter old_iter(&old);
+ for (ClockElem &ce : *this) {
+ ce = *old_iter;
+ ++old_iter;
+ }
+ release_store_tid_ = old.release_store_tid_;
+ release_store_reused_ = old.release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dirty_[i] = old.dirty_[i];
+ // Drop reference to old and delete if necessary.
+ old.Reset(c);
+}
+
+// Can we cache this clock for future release operations?
+ALWAYS_INLINE bool SyncClock::Cachable() const {
+ if (size_ == 0)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dirty_[i].tid != kInvalidTid)
+ return false;
+ }
+ return atomic_load_relaxed(ref_ptr(tab_)) == 1;
+}
+
+// elem linearizes the two-level structure into linear array.
+// Note: this is used only for one time accesses, vector operations use
+// the iterator as it is much faster.
+ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ const uptr block = tid / ClockBlock::kClockCount;
+ DCHECK_LE(block, blocks_);
+ tid %= ClockBlock::kClockCount;
+ if (block == blocks_)
+ return tab_->clock[tid];
+ u32 idx = get_block(block);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid];
+}
+
+ALWAYS_INLINE uptr SyncClock::capacity() const {
+ if (size_ == 0)
+ return 0;
+ uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
+ // How many clock elements we can fit into the first level block.
+ // +1 for ref counter.
+ uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
+ return blocks_ * ClockBlock::kClockCount + top;
+}
+
+ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
+ DCHECK(size_);
+ DCHECK_LT(bi, blocks_);
+ return tab_->table[ClockBlock::kBlockIdx - bi];
+}
+
+ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
+ uptr bi = blocks_++;
+ CHECK_EQ(get_block(bi), 0);
+ tab_->table[ClockBlock::kBlockIdx - bi] = idx;
+}
+
+// Used only by tests.
+u64 SyncClock::get(unsigned tid) const {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty dirty = dirty_[i];
+ if (dirty.tid == tid)
+ return dirty.epoch;
+ }
+ return elem(tid).epoch;
+}
+
+// Used only by Iter test.
+u64 SyncClock::get_clean(unsigned tid) const {
+ return elem(tid).epoch;
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
+ release_store_tid_, release_store_reused_,
+ dirty_[0].tid, dirty_[0].epoch,
+ dirty_[1].tid, dirty_[1].epoch);
+}
+
+void SyncClock::Iter::Next() {
+ // Finished with the current block, move on to the next one.
+ block_++;
+ if (block_ < parent_->blocks_) {
+ // Iterate over the next second level block.
+ u32 idx = parent_->get_block(block_);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ pos_ = &cb->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ if (block_ == parent_->blocks_ &&
+ parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
+ // Iterate over elements in the first level block.
+ pos_ = &parent_->tab_->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ parent_ = nullptr; // denotes end
+}
+} // namespace __tsan
diff --git a/lib/tsan/tsan_clock.h b/lib/tsan/tsan_clock.h
new file mode 100644
index 0000000000..736cdae06b
--- /dev/null
+++ b/lib/tsan/tsan_clock.h
@@ -0,0 +1,283 @@
+//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_CLOCK_H
+#define TSAN_CLOCK_H
+
+#include "tsan_defs.h"
+#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
+// The clock that lives in sync variables (mutexes, atomics, etc).
+class SyncClock {
+ public:
+ SyncClock();
+ ~SyncClock();
+
+ uptr size() const;
+
+ // These are used only in tests.
+ u64 get(unsigned tid) const;
+ u64 get_clean(unsigned tid) const;
+
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
+ // Clock element iterator.
+ // Note: it iterates only over the table without regard to dirty entries.
+ class Iter {
+ public:
+ explicit Iter(SyncClock* parent);
+ Iter& operator++();
+ bool operator!=(const Iter& other);
+ ClockElem &operator*();
+
+ private:
+ SyncClock *parent_;
+ // [pos_, end_) is the current continuous range of clock elements.
+ ClockElem *pos_;
+ ClockElem *end_;
+ int block_; // Current number of second level block.
+
+ NOINLINE void Next();
+ };
+
+ Iter begin();
+ Iter end();
+
+ private:
+ friend class ThreadClock;
+ friend class Iter;
+ static const uptr kDirtyTids = 2;
+
+ struct Dirty {
+ u64 epoch : kClkBits;
+ u64 tid : 64 - kClkBits; // kInvalidId if not active
+ };
+
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ Dirty dirty_[kDirtyTids];
+ // If size_ is 0, tab_ is nullptr.
+ // If size <= 64 (kClockCount), tab_ contains pointer to an array with
+ // 64 ClockElem's (ClockBlock::clock).
+ // Otherwise, tab_ points to an array with up to 127 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ // Unused space in the first level ClockBlock is used to store additional
+ // clock elements.
+ // The last u32 element in the first level ClockBlock is always used as
+ // reference counter.
+ //
+ // See the following scheme for details.
+ // All memory blocks are 512 bytes (allocated from ClockAlloc).
+ // Clock (clk) elements are 64 bits.
+ // Idx and ref are 32 bits.
+ //
+ // tab_
+ // |
+ // \/
+ // +----------------------------------------------------+
+ // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
+ // +----------------------------------------------------+
+ // | |
+ // | \/
+ // | +----------------+
+ // | | clk0 ... clk63 |
+ // | +----------------+
+ // \/
+ // +------------------+
+ // | clk64 ... clk127 |
+ // +------------------+
+ //
+ // Note: dirty entries, if active, always override what's stored in the clock.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u16 size_;
+ u16 blocks_; // Number of second level blocks.
+
+ void Unshare(ClockCache *c);
+ bool IsShared() const;
+ bool Cachable() const;
+ void ResetImpl();
+ void FlushDirty();
+ uptr capacity() const;
+ u32 get_block(uptr bi) const;
+ void append_block(u32 idx);
+ ClockElem &elem(unsigned tid) const;
+};
+
+// The clock that lives in threads.
+class ThreadClock {
+ public:
+ typedef DenseSlabAllocCache Cache;
+
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
+
+ u64 get(unsigned tid) const;
+ void set(ClockCache *c, unsigned tid, u64 v);
+ void set(u64 v);
+ void tick();
+ uptr size() const;
+
+ void acquire(ClockCache *c, SyncClock *src);
+ void releaseStoreAcquire(ClockCache *c, SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst);
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst);
+ void ResetCached(ClockCache *c);
+ void NoteGlobalAcquire(u64 v);
+
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
+
+ private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ // Index of the thread associated with he clock ("current thread").
+ const unsigned tid_;
+ const unsigned reused_; // tid_ reuse count.
+ // Current thread time when it acquired something from other threads.
+ u64 last_acquire_;
+
+ // Last time another thread has done a global acquire of this thread's clock.
+ // It helps to avoid problem described in:
+ // https://github.com/golang/go/issues/39186
+ // See test/tsan/java_finalizer2.cpp for a regression test.
+ // Note the failuire is _extremely_ hard to hit, so if you are trying
+ // to reproduce it, you may want to run something like:
+ // $ go get golang.org/x/tools/cmd/stress
+ // $ stress -p=64 ./a.out
+ //
+ // The crux of the problem is roughly as follows.
+ // A number of O(1) optimizations in the clocks algorithm assume proper
+ // transitive cumulative propagation of clock values. The AcquireGlobal
+ // operation may produce an inconsistent non-linearazable view of
+ // thread clocks. Namely, it may acquire a later value from a thread
+ // with a higher ID, but fail to acquire an earlier value from a thread
+ // with a lower ID. If a thread that executed AcquireGlobal then releases
+ // to a sync clock, it will spoil the sync clock with the inconsistent
+ // values. If another thread later releases to the sync clock, the optimized
+ // algorithm may break.
+ //
+ // The exact sequence of events that leads to the failure.
+ // - thread 1 executes AcquireGlobal
+ // - thread 1 acquires value 1 for thread 2
+ // - thread 2 increments clock to 2
+ // - thread 2 releases to sync object 1
+ // - thread 3 at time 1
+ // - thread 3 acquires from sync object 1
+ // - thread 3 increments clock to 2
+ // - thread 1 acquires value 2 for thread 3
+ // - thread 1 releases to sync object 2
+ // - sync object 2 clock has 1 for thread 2 and 2 for thread 3
+ // - thread 3 releases to sync object 2
+ // - thread 3 sees value 2 in the clock for itself
+ // and decides that it has already released to the clock
+ // and did not acquire anything from other threads after that
+ // (the last_acquire_ check in release operation)
+ // - thread 3 does not update the value for thread 2 in the clock from 1 to 2
+ // - thread 4 acquires from sync object 2
+ // - thread 4 detects a false race with thread 2
+ // as it should have been synchronized with thread 2 up to time 2,
+ // but because of the broken clock it is now synchronized only up to time 1
+ //
+ // The global_acquire_ value helps to prevent this scenario.
+ // Namely, thread 3 will not trust any own clock values up to global_acquire_
+ // for the purposes of the last_acquire_ optimization.
+ atomic_uint64_t global_acquire_;
+
+ // Cached SyncClock (without dirty entries and release_store_tid_).
+ // We reuse it for subsequent store-release operations without intervening
+ // acquire operations. Since it is shared (and thus constant), clock value
+ // for the current thread is then stored in dirty entries in the SyncClock.
+ // We host a refernece to the table while it is cached here.
+ u32 cached_idx_;
+ u16 cached_size_;
+ u16 cached_blocks_;
+
+ // Number of active elements in the clk_ table (the rest is zeros).
+ uptr nclk_;
+ u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ bool HasAcquiredAfterRelease(const SyncClock *dst) const;
+ void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
+};
+
+ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
+ DCHECK_LT(tid, kMaxTidInClock);
+ return clk_[tid];
+}
+
+ALWAYS_INLINE void ThreadClock::set(u64 v) {
+ DCHECK_GE(v, clk_[tid_]);
+ clk_[tid_] = v;
+}
+
+ALWAYS_INLINE void ThreadClock::tick() {
+ clk_[tid_]++;
+}
+
+ALWAYS_INLINE uptr ThreadClock::size() const {
+ return nclk_;
+}
+
+ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) {
+ // Here we rely on the fact that AcquireGlobal is protected by
+ // ThreadRegistryLock, thus only one thread at a time executes it
+ // and values passed to this function should not go backwards.
+ CHECK_LE(atomic_load_relaxed(&global_acquire_), v);
+ atomic_store_relaxed(&global_acquire_, v);
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
+ return Iter(this);
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
+ return Iter(nullptr);
+}
+
+ALWAYS_INLINE uptr SyncClock::size() const {
+ return size_;
+}
+
+ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
+ : parent_(parent)
+ , pos_(nullptr)
+ , end_(nullptr)
+ , block_(-1) {
+ if (parent)
+ Next();
+}
+
+ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
+ pos_++;
+ if (UNLIKELY(pos_ >= end_))
+ Next();
+ return *this;
+}
+
+ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
+ return parent_ != other.parent_;
+}
+
+ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
+ return *pos_;
+}
+} // namespace __tsan
+
+#endif // TSAN_CLOCK_H
diff --git a/lib/tsan/tsan_debugging.cpp b/lib/tsan/tsan_debugging.cpp
new file mode 100644
index 0000000000..d3d6255090
--- /dev/null
+++ b/lib/tsan/tsan_debugging.cpp
@@ -0,0 +1,262 @@
+//===-- tsan_debugging.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// TSan debugging API implementation.
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+using namespace __tsan;
+
+static const char *ReportTypeDescription(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace: return "data-race";
+ case ReportTypeVptrRace: return "data-race-vptr";
+ case ReportTypeUseAfterFree: return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree: return "heap-use-after-free-vptr";
+ case ReportTypeExternalRace: return "external-race";
+ case ReportTypeThreadLeak: return "thread-leak";
+ case ReportTypeMutexDestroyLocked: return "locked-mutex-destroy";
+ case ReportTypeMutexDoubleLock: return "mutex-double-lock";
+ case ReportTypeMutexInvalidAccess: return "mutex-invalid-access";
+ case ReportTypeMutexBadUnlock: return "mutex-bad-unlock";
+ case ReportTypeMutexBadReadLock: return "mutex-bad-read-lock";
+ case ReportTypeMutexBadReadUnlock: return "mutex-bad-read-unlock";
+ case ReportTypeSignalUnsafe: return "signal-unsafe-call";
+ case ReportTypeErrnoInSignal: return "errno-in-signal-handler";
+ case ReportTypeDeadlock: return "lock-order-inversion";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static const char *ReportLocationTypeDescription(ReportLocationType typ) {
+ switch (typ) {
+ case ReportLocationGlobal: return "global";
+ case ReportLocationHeap: return "heap";
+ case ReportLocationStack: return "stack";
+ case ReportLocationTLS: return "tls";
+ case ReportLocationFD: return "fd";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static void CopyTrace(SymbolizedStack *first_frame, void **trace,
+ uptr trace_size) {
+ uptr i = 0;
+ for (SymbolizedStack *frame = first_frame; frame != nullptr;
+ frame = frame->next) {
+ trace[i++] = (void *)frame->info.address;
+ if (i >= trace_size) break;
+ }
+}
+
+// Meant to be called by the debugger.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report() {
+ return const_cast<ReportDesc*>(cur_thread()->current_report);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *description = ReportTypeDescription(rep->typ);
+ *count = rep->count;
+ *stack_count = rep->stacks.Size();
+ *mop_count = rep->mops.Size();
+ *loc_count = rep->locs.Size();
+ *mutex_count = rep->mutexes.Size();
+ *thread_count = rep->threads.Size();
+ *unique_tid_count = rep->unique_tids.Size();
+ if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *tag = rep->tag;
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->stacks.Size());
+ ReportStack *stack = rep->stacks[idx];
+ if (stack) CopyTrace(stack->frames, trace, trace_size);
+ return stack ? 1 : 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mops.Size());
+ ReportMop *mop = rep->mops[idx];
+ *tid = mop->tid;
+ *addr = (void *)mop->addr;
+ *size = mop->size;
+ *write = mop->write ? 1 : 0;
+ *atomic = mop->atomic ? 1 : 0;
+ if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *type = ReportLocationTypeDescription(loc->type);
+ *addr = (void *)loc->global.start;
+ *start = loc->heap_chunk_start;
+ *size = loc->heap_chunk_size;
+ *tid = loc->tid;
+ *fd = loc->fd;
+ *suppressable = loc->suppressable;
+ if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *object_type = GetObjectTypeFromTag(loc->external_tag);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mutexes.Size());
+ ReportMutex *mutex = rep->mutexes[idx];
+ *mutex_id = mutex->id;
+ *addr = (void *)mutex->addr;
+ *destroyed = mutex->destroyed;
+ if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->threads.Size());
+ ReportThread *thread = rep->threads[idx];
+ *tid = thread->id;
+ *os_id = thread->os_id;
+ *running = thread->running;
+ *name = thread->name;
+ *parent_tid = thread->parent_tid;
+ if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->unique_tids.Size());
+ *tid = rep->unique_tids[idx];
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(addr)) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(addr)) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry->Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry->Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
diff --git a/lib/tsan/tsan_defs.h b/lib/tsan/tsan_defs.h
new file mode 100644
index 0000000000..293d7deccc
--- /dev/null
+++ b/lib/tsan/tsan_defs.h
@@ -0,0 +1,195 @@
+//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_DEFS_H
+#define TSAN_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_stat.h"
+#include "ubsan/ubsan_platform.h"
+
+// Setup defaults for compile definitions.
+#ifndef TSAN_NO_HISTORY
+# define TSAN_NO_HISTORY 0
+#endif
+
+#ifndef TSAN_COLLECT_STATS
+# define TSAN_COLLECT_STATS 0
+#endif
+
+#ifndef TSAN_CONTAINS_UBSAN
+# if CAN_SANITIZE_UB && !SANITIZER_GO
+# define TSAN_CONTAINS_UBSAN 1
+# else
+# define TSAN_CONTAINS_UBSAN 0
+# endif
+#endif
+
+namespace __tsan {
+
+const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits; // tid reuse count
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+ static const uptr kRefIdx = kTableSize - 1;
+ static const uptr kBlockIdx = kTableSize - 2;
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+const int kTidBits = 13;
+// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
+// occupied by reference counter, so total number of elements we can store
+// in SyncClock is kClockCount * (kTableSize - 1).
+const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
+#if !SANITIZER_GO
+const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+#else
+const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
+#endif
+const uptr kShadowStackSize = 64 * 1024;
+
+// Count of shadow values in a shadow cell.
+const uptr kShadowCnt = 4;
+
+// That many user bytes are mapped onto a single shadow cell.
+const uptr kShadowCell = 8;
+
+// Size of a single shadow value (u64).
+const uptr kShadowSize = 8;
+
+// Shadow memory is kShadowMultiplier times larger than user memory.
+const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+#if TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
+const u16 kInvalidTid = kMaxTid + 1;
+
+// The following "build consistency" machinery ensures that all source files
+// are built in the same configuration. Inconsistent builds lead to
+// hard to debug crashes.
+#if SANITIZER_DEBUG
+void build_consistency_debug();
+#else
+void build_consistency_release();
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats();
+#else
+void build_consistency_nostats();
+#endif
+
+static inline void USED build_consistency() {
+#if SANITIZER_DEBUG
+ build_consistency_debug();
+#else
+ build_consistency_release();
+#endif
+#if TSAN_COLLECT_STATS
+ build_consistency_stats();
+#else
+ build_consistency_nostats();
+#endif
+}
+
+template<typename T>
+T min(T a, T b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+T max(T a, T b) {
+ return a > b ? a : b;
+}
+
+template<typename T>
+T RoundUp(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)(((u64)p + align - 1) & ~(align - 1));
+}
+
+template<typename T>
+T RoundDown(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)((u64)p & ~(align - 1));
+}
+
+// Zeroizes high part, returns 'bits' lsb bits.
+template<typename T>
+T GetLsb(T v, int bits) {
+ return (T)((u64)v & ((1ull << bits) - 1));
+}
+
+struct MD5Hash {
+ u64 hash[2];
+ bool operator==(const MD5Hash &other) const;
+};
+
+MD5Hash md5_hash(const void *data, uptr size);
+
+struct Processor;
+struct ThreadState;
+class ThreadContext;
+struct Context;
+struct ReportStack;
+class ReportDesc;
+class RegionAlloc;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz : 48;
+ u64 tag : 16;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+enum ExternalTag : uptr {
+ kExternalTagNone = 0,
+ kExternalTagSwiftModifyingAccess = 1,
+ kExternalTagFirstUserAvailable = 2,
+ kExternalTagMax = 1024,
+ // Don't set kExternalTagMax over 65,536, since MBlock only stores tags
+ // as 16-bit values, see tsan_defs.h.
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DEFS_H
diff --git a/lib/tsan/tsan_dense_alloc.h b/lib/tsan/tsan_dense_alloc.h
new file mode 100644
index 0000000000..64fc50e95c
--- /dev/null
+++ b/lib/tsan/tsan_dense_alloc.h
@@ -0,0 +1,141 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ explicit DenseSlabAlloc(const char *name) {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ name_ = name;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+ const char *name_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
+ name_, kL1Size, kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
+ name_, fillpos_, kL1Size, kL2Size);
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T;
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
diff --git a/lib/tsan/tsan_external.cpp b/lib/tsan/tsan_external.cpp
new file mode 100644
index 0000000000..0faa1ee93a
--- /dev/null
+++ b/lib/tsan/tsan_external.cpp
@@ -0,0 +1,124 @@
+//===-- tsan_external.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+#include "tsan_interceptors.h"
+
+namespace __tsan {
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+struct TagData {
+ const char *object_type;
+ const char *header;
+};
+
+static TagData registered_tags[kExternalTagMax] = {
+ {},
+ {"Swift variable", "Swift access race"},
+};
+static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable};
+static TagData *GetTagData(uptr tag) {
+ // Invalid/corrupted tag? Better return NULL and let the caller deal with it.
+ if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
+ return &registered_tags[tag];
+}
+
+const char *GetObjectTypeFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->object_type : nullptr;
+}
+
+const char *GetReportHeaderFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->header : nullptr;
+}
+
+void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)&registered_tags[tag]);
+}
+
+uptr TagFromShadowStackFrame(uptr pc) {
+ uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
+ void *pc_ptr = (void *)pc;
+ if (pc_ptr < GetTagData(0) || pc_ptr > GetTagData(tag_count - 1))
+ return 0;
+ return (TagData *)pc_ptr - GetTagData(0);
+}
+
+#if !SANITIZER_GO
+
+typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
+void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+ InsertShadowStackFrameForTag(thr, (uptr)tag);
+ bool in_ignored_lib;
+ if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+ access(thr, CALLERPC, (uptr)addr, kSizeLog1);
+ }
+ FuncExit(thr);
+ if (caller_pc) FuncExit(thr);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type) {
+ uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
+ CHECK_LT(new_tag, kExternalTagMax);
+ GetTagData(new_tag)->object_type = internal_strdup(object_type);
+ char header[127] = {0};
+ internal_snprintf(header, sizeof(header), "race on %s", object_type);
+ GetTagData(new_tag)->header = internal_strdup(header);
+ return (void *)new_tag;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header) {
+ CHECK_GE((uptr)tag, kExternalTagFirstUserAvailable);
+ CHECK_LT((uptr)tag, kExternalTagMax);
+ atomic_uintptr_t *header_ptr =
+ (atomic_uintptr_t *)&GetTagData((uptr)tag)->header;
+ header = internal_strdup(header);
+ char *old_header =
+ (char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
+ if (old_header) internal_free(old_header);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ Allocator *a = allocator();
+ MBlock *b = nullptr;
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b) {
+ b->tag = (uptr)tag;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryRead);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryWrite);
+}
+} // extern "C"
+
+#endif // !SANITIZER_GO
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_fd.cpp b/lib/tsan/tsan_fd.cpp
new file mode 100644
index 0000000000..50a6b56916
--- /dev/null
+++ b/lib/tsan/tsan_fd.cpp
@@ -0,0 +1,316 @@
+//===-- tsan_fd.cpp -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fd.h"
+#include "tsan_rtl.h"
+#include <sanitizer_common/sanitizer_atomic.h>
+
+namespace __tsan {
+
+const int kTableSizeL1 = 1024;
+const int kTableSizeL2 = 1024;
+const int kTableSize = kTableSizeL1 * kTableSizeL2;
+
+struct FdSync {
+ atomic_uint64_t rc;
+};
+
+struct FdDesc {
+ FdSync *sync;
+ int creation_tid;
+ u32 creation_stack;
+};
+
+struct FdContext {
+ atomic_uintptr_t tab[kTableSizeL1];
+ // Addresses used for synchronization.
+ FdSync globsync;
+ FdSync filesync;
+ FdSync socksync;
+ u64 connectsync;
+};
+
+static FdContext fdctx;
+
+static bool bogusfd(int fd) {
+ // Apparently a bogus fd value.
+ return fd < 0 || fd >= kTableSize;
+}
+
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
+ atomic_store(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static FdSync *ref(FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
+ atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static void unref(ThreadState *thr, uptr pc, FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
+ if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
+ CHECK_NE(s, &fdctx.globsync);
+ CHECK_NE(s, &fdctx.filesync);
+ CHECK_NE(s, &fdctx.socksync);
+ user_free(thr, pc, s, false);
+ }
+ }
+}
+
+static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
+ CHECK_GE(fd, 0);
+ CHECK_LT(fd, kTableSize);
+ atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
+ uptr l1 = atomic_load(pl1, memory_order_consume);
+ if (l1 == 0) {
+ uptr size = kTableSizeL2 * sizeof(FdDesc);
+ // We need this to reside in user memory to properly catch races on it.
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
+ internal_memset(p, 0, size);
+ MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
+ if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
+ l1 = (uptr)p;
+ else
+ user_free(thr, pc, p, false);
+ }
+ FdDesc *fds = reinterpret_cast<FdDesc *>(l1);
+ return &fds[fd % kTableSizeL2];
+}
+
+// pd must be already ref'ed.
+static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
+ bool write = true) {
+ FdDesc *d = fddesc(thr, pc, fd);
+ // As a matter of fact, we don't intercept all close calls.
+ // See e.g. libc __res_iclose().
+ if (d->sync) {
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ }
+ if (flags()->io_sync == 0) {
+ unref(thr, pc, s);
+ } else if (flags()->io_sync == 1) {
+ d->sync = s;
+ } else if (flags()->io_sync == 2) {
+ unref(thr, pc, s);
+ d->sync = &fdctx.globsync;
+ }
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
+ if (write) {
+ // To catch races between fd usage and open.
+ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+ } else {
+ // See the dup-related comment in FdClose.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
+}
+
+void FdInit() {
+ atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
+}
+
+void FdOnFork(ThreadState *thr, uptr pc) {
+ // On fork() we need to reset all fd's, because the child is going
+ // close all them, and that will cause races between previous read/write
+ // and the close.
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ for (int l2 = 0; l2 < kTableSizeL2; l2++) {
+ FdDesc *d = &tab[l2];
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ }
+ }
+}
+
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
+ int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
+ FdDesc *d = &tab[l2];
+ *fd = l1 * kTableSizeL1 + l2;
+ *tid = d->creation_tid;
+ *stack = d->creation_stack;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FdAcquire(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Acquire(thr, pc, (uptr)s);
+}
+
+void FdRelease(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Release(thr, pc, (uptr)s);
+}
+
+void FdAccess(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+}
+
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
+ DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ d->creation_tid = 0;
+ d->creation_stack = 0;
+}
+
+void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.filesync);
+}
+
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
+ DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
+ if (bogusfd(oldfd) || bogusfd(newfd))
+ return;
+ // Ignore the case when user dups not yet connected socket.
+ FdDesc *od = fddesc(thr, pc, oldfd);
+ MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+ FdClose(thr, pc, newfd, write);
+ init(thr, pc, newfd, ref(od->sync), write);
+}
+
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
+ DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
+ FdSync *s = allocsync(thr, pc);
+ init(thr, pc, rfd, ref(s));
+ init(thr, pc, wfd, ref(s));
+ unref(thr, pc, s);
+}
+
+void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // It can be a UDP socket.
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
+ DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Acquire(thr, pc, (uptr)&fdctx.connectsync);
+ init(thr, pc, newfd, &fdctx.socksync);
+}
+
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Release(thr, pc, (uptr)&fdctx.connectsync);
+}
+
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+uptr File2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+uptr Dir2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_fd.h b/lib/tsan/tsan_fd.h
new file mode 100644
index 0000000000..ce4f2f73ba
--- /dev/null
+++ b/lib/tsan/tsan_fd.h
@@ -0,0 +1,64 @@
+//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// This file handles synchronization via IO.
+// People use IO for synchronization along the lines of:
+//
+// int X;
+// int client_socket; // initialized elsewhere
+// int server_socket; // initialized elsewhere
+//
+// Thread 1:
+// X = 42;
+// send(client_socket, ...);
+//
+// Thread 2:
+// if (recv(server_socket, ...) > 0)
+// assert(X == 42);
+//
+// This file determines the scope of the file descriptor (pipe, socket,
+// all local files, etc) and executes acquire and release operations on
+// the scope as necessary. Some scopes are very fine grained (e.g. pipe
+// operations synchronize only with operations on the same pipe), while
+// others are corse-grained (e.g. all operations on local files synchronize
+// with each other).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FD_H
+#define TSAN_FD_H
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void FdInit();
+void FdAcquire(ThreadState *thr, uptr pc, int fd);
+void FdRelease(ThreadState *thr, uptr pc, int fd);
+void FdAccess(ThreadState *thr, uptr pc, int fd);
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true);
+void FdFileCreate(ThreadState *thr, uptr pc, int fd);
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write);
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
+void FdEventCreate(ThreadState *thr, uptr pc, int fd);
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+void FdOnFork(ThreadState *thr, uptr pc);
+
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
diff --git a/lib/tsan/tsan_flags.cpp b/lib/tsan/tsan_flags.cpp
new file mode 100644
index 0000000000..44bf325cd3
--- /dev/null
+++ b/lib/tsan/tsan_flags.cpp
@@ -0,0 +1,125 @@
+//===-- tsan_flags.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "ubsan/ubsan_flags.h"
+
+namespace __tsan {
+
+// Can be overriden in frontend.
+#ifdef TSAN_EXTERNAL_HOOKS
+extern "C" const char* __tsan_default_options();
+#else
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_options() {
+ return "";
+}
+#endif
+
+void Flags::SetDefaults() {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ second_deadlock_stack = false;
+}
+
+void RegisterTsanFlags(FlagParser *parser, Flags *f) {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ RegisterFlag(parser, "second_deadlock_stack",
+ "Report where each mutex is locked in deadlock reports",
+ &f->second_deadlock_stack);
+}
+
+void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+ if (SANITIZER_GO) {
+ // Does not work as expected for Go: runtime handles SIGABRT and crashes.
+ cf.abort_on_error = false;
+ // Go does not have mutexes.
+ cf.detect_deadlocks = false;
+ }
+ cf.print_suppressions = false;
+ cf.stack_trace_format = " #%n %f %S %M";
+ cf.exitcode = 66;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterTsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Let a frontend override.
+ parser.ParseString(__tsan_default_options());
+#if TSAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+ // Override from command line.
+ parser.ParseString(env, env_option_name);
+#if TSAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ // Sanity check.
+ if (!f->report_bugs) {
+ f->report_thread_leaks = false;
+ f->report_destroy_locked = false;
+ f->report_signal_unsafe = false;
+ }
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
+
+ if (f->io_sync < 0 || f->io_sync > 2) {
+ Printf("ThreadSanitizer: incorrect value for io_sync"
+ " (must be [0..2])\n");
+ Die();
+ }
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_flags.h b/lib/tsan/tsan_flags.h
new file mode 100644
index 0000000000..da27d5b992
--- /dev/null
+++ b/lib/tsan/tsan_flags.h
@@ -0,0 +1,34 @@
+//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FLAGS_H
+#define TSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+
+namespace __tsan {
+
+struct Flags : DDFlags {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+
+ void SetDefaults();
+ void ParseFromString(const char *str);
+};
+
+void InitializeFlags(Flags *flags, const char *env,
+ const char *env_option_name = nullptr);
+} // namespace __tsan
+
+#endif // TSAN_FLAGS_H
diff --git a/lib/tsan/tsan_flags.inc b/lib/tsan/tsan_flags.inc
new file mode 100644
index 0000000000..2105c75448
--- /dev/null
+++ b/lib/tsan/tsan_flags.inc
@@ -0,0 +1,85 @@
+//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// TSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FLAG
+# error "Define TSAN_FLAG prior to including this file!"
+#endif
+
+// TSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+TSAN_FLAG(bool, enable_annotations, true,
+ "Enable dynamic annotations, otherwise they are no-ops.")
+// Suppress a race report if we've already output another race report
+// with the same stack.
+TSAN_FLAG(bool, suppress_equal_stacks, true,
+ "Suppress a race report if we've already output another race report "
+ "with the same stack.")
+TSAN_FLAG(bool, suppress_equal_addresses, true,
+ "Suppress a race report if we've already output another race report "
+ "on the same address.")
+
+TSAN_FLAG(bool, report_bugs, true,
+ "Turns off bug reporting entirely (useful for benchmarking).")
+TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
+TSAN_FLAG(bool, report_destroy_locked, true,
+ "Report destruction of a locked mutex?")
+TSAN_FLAG(bool, report_mutex_bugs, true,
+ "Report incorrect usages of mutexes and mutex annotations?")
+TSAN_FLAG(bool, report_signal_unsafe, true,
+ "Report violations of async signal-safety "
+ "(e.g. malloc() call from a signal handler).")
+TSAN_FLAG(bool, report_atomic_races, true,
+ "Report races between atomic and plain memory accesses.")
+TSAN_FLAG(
+ bool, force_seq_cst_atomics, false,
+ "If set, all atomics are effectively sequentially consistent (seq_cst), "
+ "regardless of what user actually specified.")
+TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
+TSAN_FLAG(int, atexit_sleep_ms, 1000,
+ "Sleep in main thread before exiting for that many ms "
+ "(useful to catch \"at exit\" races).")
+TSAN_FLAG(const char *, profile_memory, "",
+ "If set, periodically write memory profile to that file.")
+TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
+TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
+TSAN_FLAG(
+ int, memory_limit_mb, 0,
+ "Resident memory limit in MB to aim at."
+ "If the process consumes more memory, then TSan will flush shadow memory.")
+TSAN_FLAG(bool, stop_on_start, false,
+ "Stops on start until __tsan_resume() is called (for debugging).")
+TSAN_FLAG(bool, running_on_valgrind, false,
+ "Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
+TSAN_FLAG(
+ int, history_size, SANITIZER_GO ? 1 : 3,
+ "Per-thread history size, controls how many previous memory accesses "
+ "are remembered per thread. Possible values are [0..7]. "
+ "history_size=0 amounts to 32K memory accesses. Each next value doubles "
+ "the amount of memory accesses, up to history_size=7 that amounts to "
+ "4M memory accesses. The default value is 2 (128K memory accesses).")
+TSAN_FLAG(int, io_sync, 1,
+ "Controls level of synchronization implied by IO operations. "
+ "0 - no synchronization "
+ "1 - reasonable level of synchronization (write->read)"
+ "2 - global synchronization of all IO operations.")
+TSAN_FLAG(bool, die_after_fork, true,
+ "Die after multi-threaded fork if the child creates new threads.")
+TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+ "Ignore reads and writes from all interceptors.")
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+ "Interceptors should only detect races when called from instrumented "
+ "modules.")
+TSAN_FLAG(bool, shared_ptr_interceptor, true,
+ "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
diff --git a/lib/tsan/tsan_ignoreset.cpp b/lib/tsan/tsan_ignoreset.cpp
new file mode 100644
index 0000000000..f6e41f6686
--- /dev/null
+++ b/lib/tsan/tsan_ignoreset.cpp
@@ -0,0 +1,46 @@
+//===-- tsan_ignoreset.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_ignoreset.h"
+
+namespace __tsan {
+
+const uptr IgnoreSet::kMaxSize;
+
+IgnoreSet::IgnoreSet()
+ : size_() {
+}
+
+void IgnoreSet::Add(u32 stack_id) {
+ if (size_ == kMaxSize)
+ return;
+ for (uptr i = 0; i < size_; i++) {
+ if (stacks_[i] == stack_id)
+ return;
+ }
+ stacks_[size_++] = stack_id;
+}
+
+void IgnoreSet::Reset() {
+ size_ = 0;
+}
+
+uptr IgnoreSet::Size() const {
+ return size_;
+}
+
+u32 IgnoreSet::At(uptr i) const {
+ CHECK_LT(i, size_);
+ CHECK_LE(size_, kMaxSize);
+ return stacks_[i];
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_ignoreset.h b/lib/tsan/tsan_ignoreset.h
new file mode 100644
index 0000000000..3e318bd674
--- /dev/null
+++ b/lib/tsan/tsan_ignoreset.h
@@ -0,0 +1,37 @@
+//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// IgnoreSet holds a set of stack traces where ignores were enabled.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_IGNORESET_H
+#define TSAN_IGNORESET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class IgnoreSet {
+ public:
+ static const uptr kMaxSize = 16;
+
+ IgnoreSet();
+ void Add(u32 stack_id);
+ void Reset();
+ uptr Size() const;
+ u32 At(uptr i) const;
+
+ private:
+ uptr size_;
+ u32 stacks_[kMaxSize];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_IGNORESET_H
diff --git a/lib/tsan/tsan_interceptors.h b/lib/tsan/tsan_interceptors.h
new file mode 100644
index 0000000000..88d1edd775
--- /dev/null
+++ b/lib/tsan/tsan_interceptors.h
@@ -0,0 +1,76 @@
+#ifndef TSAN_INTERCEPTORS_H
+#define TSAN_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ void DisableIgnores();
+ void EnableIgnores();
+ private:
+ ThreadState *const thr_;
+ const uptr pc_;
+ bool in_ignored_lib_;
+ bool ignoring_;
+};
+
+LibIgnore *libignore();
+
+#if !SANITIZER_GO
+INLINE bool in_symbolizer() {
+ cur_thread_init();
+ return UNLIKELY(cur_thread()->in_symbolizer);
+}
+#endif
+
+} // namespace __tsan
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ cur_thread_init(); \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (REAL(func) == 0) { \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ } \
+ if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
+ si.DisableIgnores();
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
+ si.EnableIgnores();
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+
+#if SANITIZER_NETBSD
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func2));
+#else
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
+#endif
+
+#endif // TSAN_INTERCEPTORS_H
diff --git a/lib/tsan/tsan_interceptors_posix.cpp b/lib/tsan/tsan_interceptors_posix.cpp
new file mode 100644
index 0000000000..9c3e0369bc
--- /dev/null
+++ b/lib/tsan/tsan_interceptors_posix.cpp
@@ -0,0 +1,2854 @@
+//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.inc
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_platform.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_fd.h"
+
+using namespace __tsan;
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+#if SANITIZER_NETBSD
+#define dirfd(dirp) (*(int *)(dirp))
+#define fileno_unlocked(fp) \
+ (((__sanitizer_FILE *)fp)->_file == -1 \
+ ? -1 \
+ : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
+
+#define stdout ((__sanitizer_FILE*)&__sF[1])
+#define stderr ((__sanitizer_FILE*)&__sF[2])
+
+#define nanosleep __nanosleep50
+#define vfork __vfork14
+#endif
+
+#if SANITIZER_ANDROID
+#define mallopt(a, b)
+#endif
+
+#ifdef __mips__
+const int kSigCount = 129;
+#else
+const int kSigCount = 65;
+#endif
+
+#ifdef __mips__
+struct ucontext_t {
+ u64 opaque[768 / sizeof(u64) + 1];
+};
+#else
+struct ucontext_t {
+ // The size is determined by looking at sizeof of real ucontext_t on linux.
+ u64 opaque[936 / sizeof(u64) + 1];
+};
+#endif
+
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
+#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
+#elif defined(__aarch64__) || SANITIZER_PPC64V2
+#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#endif
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
+extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+extern "C" int pthread_setspecific(unsigned key, const void *v);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+extern "C" void *pthread_self();
+extern "C" void _exit(int status);
+#if !SANITIZER_NETBSD
+extern "C" int fileno_unlocked(void *stream);
+extern "C" int dirfd(void *dirp);
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
+extern "C" int mallopt(int param, int value);
+#endif
+#if SANITIZER_NETBSD
+extern __sanitizer_FILE __sF[];
+#else
+extern __sanitizer_FILE *stdout, *stderr;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int PTHREAD_MUTEX_RECURSIVE = 1;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+#else
+const int PTHREAD_MUTEX_RECURSIVE = 2;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int EPOLL_CTL_ADD = 1;
+#endif
+const int SIGILL = 4;
+const int SIGTRAP = 5;
+const int SIGABRT = 6;
+const int SIGFPE = 8;
+const int SIGSEGV = 11;
+const int SIGPIPE = 13;
+const int SIGTERM = 15;
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SIGBUS = 10;
+const int SIGSYS = 12;
+#else
+const int SIGBUS = 7;
+const int SIGSYS = 31;
+#endif
+void *const MAP_FAILED = (void*)-1;
+#if SANITIZER_NETBSD
+const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
+#elif !SANITIZER_MAC
+const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+#endif
+const int MAP_FIXED = 0x10;
+typedef long long_t;
+
+// From /usr/include/unistd.h
+# define F_ULOCK 0 /* Unlock a previously locked region. */
+# define F_LOCK 1 /* Lock a region for exclusive use. */
+# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
+# define F_TEST 3 /* Test a region for other processes locks. */
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SA_SIGINFO = 0x40;
+const int SIG_SETMASK = 3;
+#elif defined(__mips__)
+const int SA_SIGINFO = 8;
+const int SIG_SETMASK = 3;
+#else
+const int SA_SIGINFO = 4;
+const int SIG_SETMASK = 2;
+#endif
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (cur_thread_init(), !cur_thread()->is_inited)
+
+namespace __tsan {
+struct SignalDesc {
+ bool armed;
+ bool sigaction;
+ __sanitizer_siginfo siginfo;
+ ucontext_t ctx;
+};
+
+struct ThreadSignalContext {
+ int int_signal_send;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
+ SignalDesc pending_signals[kSigCount];
+ // emptyset and oldset are too big for stack.
+ __sanitizer_sigset_t emptyset;
+ __sanitizer_sigset_t oldset;
+};
+
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
+
+// InterceptorContext holds all global data required for interceptors.
+// It's explicitly constructed in InitializeInterceptors with placement new
+// and is never destroyed. This allows usage of members with non-trivial
+// constructors and destructors.
+struct InterceptorContext {
+ // The object is 64-byte aligned, because we want hot data to be located
+ // in a single cache line if possible (it's accessed in every interceptor).
+ ALIGNED(64) LibIgnore libignore;
+ __sanitizer_sigaction sigactions[kSigCount];
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+ unsigned finalize_key;
+#endif
+
+ BlockingMutex atexit_mu;
+ Vector<struct AtExitCtx *> AtExitStack;
+
+ InterceptorContext()
+ : libignore(LINKER_INITIALIZED), AtExitStack() {
+ }
+};
+
+static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
+InterceptorContext *interceptor_ctx() {
+ return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
+}
+
+LibIgnore *libignore() {
+ return &interceptor_ctx()->libignore;
+}
+
+void InitializeLibIgnore() {
+ const SuppressionContext &supp = *Suppressions();
+ const uptr n = supp.SuppressionCount();
+ for (uptr i = 0; i < n; i++) {
+ const Suppression *s = supp.SuppressionAt(i);
+ if (0 == internal_strcmp(s->type, kSuppressionLib))
+ libignore()->AddIgnoredLibrary(s->templ);
+ }
+ if (flags()->ignore_noninstrumented_modules)
+ libignore()->IgnoreNoninstrumentedModules(true);
+ libignore()->OnLibraryLoaded(0);
+}
+
+// The following two hooks can be used by for cooperative scheduling when
+// locking.
+#ifdef TSAN_EXTERNAL_HOOKS
+void OnPotentiallyBlockingRegionBegin();
+void OnPotentiallyBlockingRegionEnd();
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
+#endif
+
+} // namespace __tsan
+
+static ThreadSignalContext *SigCtx(ThreadState *thr) {
+ ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ if (ctx == 0 && !thr->is_dead) {
+ ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
+ thr->signal_ctx = ctx;
+ }
+ return ctx;
+}
+
+ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
+ uptr pc)
+ : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
+ Initialize(thr);
+ if (!thr_->is_inited) return;
+ if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
+ ignoring_ =
+ !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
+ libignore()->IsIgnored(pc, &in_ignored_lib_));
+ EnableIgnores();
+}
+
+ScopedInterceptor::~ScopedInterceptor() {
+ if (!thr_->is_inited) return;
+ DisableIgnores();
+ if (!thr_->ignore_interceptors) {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
+ }
+}
+
+void ScopedInterceptor::EnableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
+ }
+ }
+}
+
+void ScopedInterceptor::DisableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreEnd(thr_, pc_);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
+ }
+ }
+}
+
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#elif SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#endif
+
+#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
+ MemoryAccessRange((thr), (pc), (uptr)(s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n), false)
+
+#define READ_STRING(thr, pc, s, n) \
+ READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
+
+#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
+
+struct BlockingCall {
+ explicit BlockingCall(ThreadState *thr)
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
+ }
+
+ ~BlockingCall() {
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ }
+
+ ThreadState *thr;
+ ThreadSignalContext *ctx;
+};
+
+TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
+ SCOPED_TSAN_INTERCEPTOR(sleep, sec);
+ unsigned res = BLOCK_REAL(sleep)(sec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, usleep, long_t usec) {
+ SCOPED_TSAN_INTERCEPTOR(usleep, usec);
+ int res = BLOCK_REAL(usleep)(usec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
+ SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
+ int res = BLOCK_REAL(nanosleep)(req, rem);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pause, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(pause, fake);
+ return BLOCK_REAL(pause)(fake);
+}
+
+static void at_exit_wrapper() {
+ AtExitCtx *ctx;
+ {
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ // Pop AtExitCtx from the top of the stack of callback functions
+ uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
+ ctx = interceptor_ctx()->AtExitStack[element];
+ interceptor_ctx()->AtExitStack.PopBack();
+ }
+
+ Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ((void(*)())ctx->f)();
+ InternalFree(ctx);
+}
+
+static void cxa_at_exit_wrapper(void *arg) {
+ Acquire(cur_thread(), 0, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ InternalFree(ctx);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
+
+#if !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ if (in_symbolizer())
+ return 0;
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
+}
+#endif
+
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res;
+ if (!dso) {
+ // NetBSD does not preserve the 2nd argument if dso is equal to 0
+ // Store ctx in a local stack-like structure
+
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ // Push AtExitCtx on the top of the stack of callback functions
+ if (!res) {
+ interceptor_ctx()->AtExitStack.PushBack(ctx);
+ }
+ } else {
+ res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ }
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ InternalFree(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT
+#endif
+
+// Cleanup old bufs.
+static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp <= sp) {
+ uptr sz = thr->jmp_bufs.Size();
+ internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
+ thr->jmp_bufs.PopBack();
+ i--;
+ }
+ }
+}
+
+static void SetJmp(ThreadState *thr, uptr sp) {
+ if (!thr->is_inited) // called from libc guts during bootstrap
+ return;
+ // Cleanup old bufs.
+ JmpBufGarbageCollect(thr, sp);
+ // Remember the buf.
+ JmpBuf *buf = thr->jmp_bufs.PushBack();
+ buf->sp = sp;
+ buf->shadow_stack_pos = thr->shadow_stack_pos;
+ ThreadSignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
+}
+
+static void LongJmp(ThreadState *thr, uptr *env) {
+ uptr sp = ExtractLongJmpSp(env);
+ // Find the saved buf with matching sp.
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp == sp) {
+ CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
+ // Unwind the stack.
+ while (thr->shadow_stack_pos > buf->shadow_stack_pos)
+ FuncExit(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
+ JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
+ return;
+ }
+ }
+ Printf("ThreadSanitizer: can't find longjmp buf\n");
+ CHECK(0);
+}
+
+// FIXME: put everything below into a common extern "C" block?
+extern "C" void __tsan_setjmp(uptr sp) {
+ cur_thread_init();
+ SetJmp(cur_thread(), sp);
+}
+
+#if SANITIZER_MAC
+TSAN_INTERCEPTOR(int, setjmp, void *env);
+TSAN_INTERCEPTOR(int, _setjmp, void *env);
+TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
+#else // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define setjmp_symname __setjmp14
+#define sigsetjmp_symname __sigsetjmp14
+#else
+#define setjmp_symname setjmp
+#define sigsetjmp_symname sigsetjmp
+#endif
+
+#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
+#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
+#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
+#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
+
+#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
+#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
+
+// Not called. Merely to satisfy TSAN_INTERCEPT().
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+// FIXME: any reason to have a separate declaration?
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor__setjmp(void *env);
+extern "C" int __interceptor__setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+#if !SANITIZER_NETBSD
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor___sigsetjmp(void *env);
+extern "C" int __interceptor___sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+#endif
+
+extern "C" int setjmp_symname(void *env);
+extern "C" int _setjmp(void *env);
+extern "C" int sigsetjmp_symname(void *env);
+#if !SANITIZER_NETBSD
+extern "C" int __sigsetjmp(void *env);
+#endif
+DEFINE_REAL(int, setjmp_symname, void *env)
+DEFINE_REAL(int, _setjmp, void *env)
+DEFINE_REAL(int, sigsetjmp_symname, void *env)
+#if !SANITIZER_NETBSD
+DEFINE_REAL(int, __sigsetjmp, void *env)
+#endif
+#endif // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define longjmp_symname __longjmp14
+#define siglongjmp_symname __siglongjmp14
+#else
+#define longjmp_symname longjmp
+#define siglongjmp_symname siglongjmp
+#endif
+
+TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
+ // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
+ // bad things will happen. We will jump over ScopedInterceptor dtor and can
+ // leave thr->in_ignored_lib set.
+ {
+ SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(longjmp_symname)(env, val);
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(siglongjmp_symname)(env, val);
+}
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(_longjmp)(env, val);
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ if (in_symbolizer())
+ return InternalAlloc(size);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(malloc, size);
+ p = user_alloc(thr, pc, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalCalloc(size, n);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(calloc, size, n);
+ p = user_calloc(thr, pc, size, n);
+ }
+ invoke_malloc_hook(p, n * size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (in_symbolizer())
+ return InternalRealloc(p, size);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(realloc, p, size);
+ p = user_realloc(thr, pc, p, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalReallocArray(p, size, n);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
+ p = user_reallocarray(thr, pc, p, size, n);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void, free, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void, cfree, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(cfree, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
+ SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
+ return user_alloc_usable_size(p);
+}
+#endif
+
+TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
+ SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
+ uptr srclen = internal_strlen(src);
+ MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
+ MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
+ return REAL(strcpy)(dst, src);
+}
+
+TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
+ return REAL(strncpy)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(char*, strdup, const char *str) {
+ SCOPED_TSAN_INTERCEPTOR(strdup, str);
+ // strdup will call malloc, so no instrumentation is required here.
+ return REAL(strdup)(str);
+}
+
+// Zero out addr if it points into shadow memory and was provided as a hint
+// only, i.e., MAP_FIXED is not set.
+static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
+ if (*addr) {
+ if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
+ if (flags & MAP_FIXED) {
+ errno = errno_EINVAL;
+ return false;
+ } else {
+ *addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+template <class Mmap>
+static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
+ void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
+ if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
+ void *res = real_mmap(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ if (fd > 0) FdAccess(thr, pc, fd);
+ MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ UnmapShadow(thr, (uptr)addr, sz);
+ int res = REAL(munmap)(addr, sz);
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, GetPageSizeCached());
+ SCOPED_INTERCEPTOR_RAW(valloc, sz);
+ return user_valloc(thr, pc, sz);
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ if (in_symbolizer()) {
+ uptr PageSize = GetPageSizeCached();
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return InternalAlloc(sz, nullptr, PageSize);
+ }
+ SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
+ return user_pvalloc(thr, pc, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ if (in_symbolizer()) {
+ void *p = InternalAlloc(sz, nullptr, align);
+ if (!p)
+ return errno_ENOMEM;
+ *memptr = p;
+ return 0;
+ }
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
+ return user_posix_memalign(thr, pc, memptr, align, sz);
+}
+#endif
+
+// __cxa_guard_acquire and friends need to be intercepted in a special way -
+// regular interceptors will break statically-linked libstdc++. Linux
+// interceptors are especially defined as weak functions (so that they don't
+// cause link errors when user defines them as well). So they silently
+// auto-disable themselves when such symbol is already present in the binary. If
+// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
+// will silently replace our interceptor. That's why on Linux we simply export
+// these interceptors with INTERFACE_ATTRIBUTE.
+// On OS X, we don't support statically linking, so we just use a regular
+// interceptor.
+#if SANITIZER_MAC
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+#else
+#define STDCXX_INTERCEPTOR(rettype, name, ...) \
+ extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
+#endif
+
+// Used in thread-safe function static initialization.
+STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
+ OnPotentiallyBlockingRegionBegin();
+ auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == 0) {
+ if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
+ return 1;
+ } else if (cmp == 1) {
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ internal_sched_yield();
+ }
+ }
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
+ Release(thr, pc, (uptr)g);
+ atomic_store(g, 1, memory_order_release);
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
+ atomic_store(g, 0, memory_order_relaxed);
+}
+
+namespace __tsan {
+void DestroyThreadState() {
+ ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc();
+ ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
+ DTLS_Destroy();
+ cur_thread_finalize();
+}
+
+void PlatformCleanUpThreadState(ThreadState *thr) {
+ ThreadSignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ UnmapOrDie(sctx, sizeof(*sctx));
+ }
+}
+} // namespace __tsan
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void*)(iter - 1))) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ return;
+ }
+ DestroyThreadState();
+}
+#endif
+
+
+struct ThreadParam {
+ void* (*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__tsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ int tid = 0;
+ {
+ cur_thread_init();
+ ThreadState *thr = cur_thread();
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ ThreadIgnoreBegin(thr, 0);
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void *)GetPthreadDestructorIterations())) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ ThreadIgnoreEnd(thr, 0);
+#endif
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ internal_sched_yield();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
+ atomic_store(&p->tid, 0, memory_order_release);
+ }
+ void *res = callback(param);
+ // Prevent the callback from being tail called,
+ // it mixes up stack traces.
+ volatile int foo = 42;
+ foo++;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_create,
+ void *th, void *attr, void *(*callback)(void*), void * param) {
+ SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+
+ MaybeSpawnBackgroundThread();
+
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
+ __sanitizer_pthread_attr_t myattr;
+ if (attr == 0) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ int detached = 0;
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ AdjustStackSize(attr);
+
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
+ if (res == 0) {
+ int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
+ CHECK_NE(tid, 0);
+ // Synchronization on p.tid serves two purposes:
+ // 1. ThreadCreate must finish before the new thread starts.
+ // Otherwise the new thread can call pthread_detach, but the pthread_t
+ // identifier is not yet registered in ThreadRegistry by ThreadCreate.
+ // 2. ThreadStart must finish before this thread continues.
+ // Otherwise, this thread can call pthread_detach and reset thr->sync
+ // before the new thread got a chance to acquire from it in ThreadStart.
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ internal_sched_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0) {
+ ThreadJoin(thr, pc, tid);
+ }
+ return res;
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
+TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+ SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
+ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
+ {
+ SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ CHECK_EQ(thr, &cur_thread_placeholder);
+#endif
+ }
+ REAL(pthread_exit)(retval);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
+ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(pthread_tryjoin_np)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
+ const struct timespec *abstime) {
+ SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
+ int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+#endif
+
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ScopedInterceptor *si;
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ // pthread_cond_wait interceptor has enabled async signal delivery
+ // (see BlockingCall below). Disable async signals since we are running
+ // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
+ // since the thread is cancelled, so we have to manually execute them
+ // (the thread still can run some user code due to pthread_cleanup_push).
+ ThreadSignalContext *ctx = SigCtx(arg->thr);
+ CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
+ // Undo BlockingCall ctor effects.
+ arg->thr->ignore_interceptors--;
+ arg->si->~ScopedInterceptor();
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
+ int (*fn)(void *c, void *m, void *abstime), void *c,
+ void *m, void *t) {
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ MutexUnlock(thr, pc, (uptr)m);
+ CondMutexUnlockCtx arg = {si, thr, pc, m};
+ int res = 0;
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cpp.
+ {
+ // Enable signal delivery while the thread is blocked.
+ BlockingCall bc(thr);
+ res = call_pthread_cancel_with_cleanup(
+ fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
+ }
+ if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+ pthread_cond_wait),
+ cond, m, 0);
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
+ abstime);
+}
+
+#if SANITIZER_MAC
+INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
+ void *reltime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
+ m, reltime);
+}
+#endif
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
+ int res = REAL(pthread_mutex_init)(m, a);
+ if (res == 0) {
+ u32 flagz = 0;
+ if (a) {
+ int type = 0;
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
+ if (type == PTHREAD_MUTEX_RECURSIVE ||
+ type == PTHREAD_MUTEX_RECURSIVE_NP)
+ flagz |= MutexFlagWriteReentrant;
+ }
+ MutexCreate(thr, pc, (uptr)m, flagz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ int res = REAL(pthread_mutex_destroy)(m);
+ if (res == 0 || res == errno_EBUSY) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ int res = REAL(pthread_mutex_timedlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ int res = REAL(pthread_spin_init)(m, pshared);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ int res = REAL(pthread_spin_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_lock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_unlock)(m);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ int res = REAL(pthread_rwlock_init)(m, a);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ int res = REAL(pthread_rwlock_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ MutexPreReadLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
+ MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_unlock)(m);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_init)(b, a, count);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_destroy)(b);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
+ Release(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_wait)(b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
+ Acquire(thr, pc, (uptr)b);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+ SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
+ if (o == 0 || f == 0)
+ return errno_EINVAL;
+ atomic_uint32_t *a;
+
+ if (SANITIZER_MAC)
+ a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
+ else if (SANITIZER_NETBSD)
+ a = static_cast<atomic_uint32_t*>
+ ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
+ else
+ a = static_cast<atomic_uint32_t*>(o);
+
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
+ memory_order_relaxed)) {
+ (*f)();
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)o);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)o);
+ }
+ return 0;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
+
+TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(0, fd, buf);
+#endif
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(0, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
+
+TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open64)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
+
+TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat64)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
+
+TSAN_INTERCEPTOR(int, dup, int oldfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
+ int newfd = REAL(dup)(oldfd);
+ if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
+ FdDup(thr, pc, oldfd, newfd, true);
+ return newfd;
+}
+
+TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
+ int newfd2 = REAL(dup2)(oldfd, newfd);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
+ int newfd2 = REAL(dup3)(oldfd, newfd, flags);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
+ int fd = REAL(eventfd)(initval, flags);
+ if (fd >= 0)
+ FdEventCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ fd = REAL(signalfd)(fd, mask, flags);
+ if (fd >= 0)
+ FdSignalCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
+ int fd = REAL(inotify_init)(fake);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
+ int fd = REAL(inotify_init1)(flags);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
+
+TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
+ SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
+ int fd = REAL(socket)(domain, type, protocol);
+ if (fd >= 0)
+ FdSocketCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
+ SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
+ int res = REAL(socketpair)(domain, type, protocol, fd);
+ if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
+ FdPipeCreate(thr, pc, fd[0], fd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
+ FdSocketConnecting(thr, pc, fd);
+ int res = REAL(connect)(fd, addr, addrlen);
+ if (res == 0 && fd >= 0)
+ FdSocketConnect(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
+ int res = REAL(bind)(fd, addr, addrlen);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
+ SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
+ int res = REAL(listen)(fd, backlog);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(close)(fd);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, __close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(__close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(__close)(fd);
+}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
+
+// glibc guts
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
+ SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ int fds[64];
+ int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++) {
+ if (fds[i] > 0)
+ FdClose(thr, pc, fds[i]);
+ }
+ REAL(__res_iclose)(state, free_addr);
+}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
+
+TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
+ SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
+ int res = REAL(pipe)(pipefd);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
+ int res = REAL(pipe2)(pipefd, flags);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, unlink, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(unlink, path);
+ Release(thr, pc, File2addr(path));
+ int res = REAL(unlink)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
+
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
+}
+
+TSAN_INTERCEPTOR(void, abort, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(abort, fake);
+ FlushStreams();
+ REAL(abort)(fake);
+}
+
+TSAN_INTERCEPTOR(int, rmdir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(rmdir, path);
+ Release(thr, pc, Dir2addr(path));
+ int res = REAL(rmdir)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, closedir, void *dirp) {
+ SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ if (dirp) {
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ }
+ return REAL(closedir)(dirp);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ if (epfd >= 0 && fd >= 0)
+ FdAccess(thr, pc, fd);
+ if (op == EPOLL_CTL_ADD && epfd >= 0)
+ FdRelease(thr, pc, epfd);
+ int res = REAL(epoll_ctl)(epfd, op, fd, ev);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
+ void *sigmask) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+#define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL
+#endif
+
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig,
+ __sanitizer_siginfo *info, void *uctx) {
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Signals are generally asynchronous, so if we receive a signals when
+ // ignores are enabled we should disable ignores. This is critical for sync
+ // and interceptors, because otherwise we can miss syncronization and report
+ // false races.
+ int ignore_reads_and_writes = thr->ignore_reads_and_writes;
+ int ignore_interceptors = thr->ignore_interceptors;
+ int ignore_sync = thr->ignore_sync;
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = 0;
+ thr->fast_state.ClearIgnoreBit();
+ thr->ignore_interceptors = 0;
+ thr->ignore_sync = 0;
+ }
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // This code races with sigaction. Be careful to not read sa_sigaction twice.
+ // Also need to remember pc for reporting before the call,
+ // because the handler can reset it.
+ volatile uptr pc =
+ sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+ if (pc != sig_dfl && pc != sig_ign) {
+ if (sigact)
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
+ else
+ ((__sanitizer_sighandler_ptr)pc)(sig);
+ }
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = ignore_reads_and_writes;
+ if (ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ thr->ignore_interceptors = ignore_interceptors;
+ thr->ignore_sync = ignore_sync;
+ }
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ return;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ internal_sigfillset(&sctx->emptyset);
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
+ &signal->siginfo, &signal->ctx);
+ }
+ }
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
+
+void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
+ __sanitizer_siginfo *info,
+ void *ctx) {
+ cur_thread_init();
+ ThreadState *thr = cur_thread();
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
+ // Don't mess with synchronous signals.
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
+ // If we are in blocking function, we can safely process it now
+ // (but check if we are in a recursive interceptor,
+ // i.e. pthread_join()->munmap()).
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+ return;
+ }
+
+ if (sctx == 0)
+ return;
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed == false) {
+ signal->armed = true;
+ signal->sigaction = sigact;
+ if (info)
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ if (ctx)
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
+ }
+}
+
+static void rtl_sighandler(int sig) {
+ rtl_generic_sighandler(false, sig, 0, 0);
+}
+
+static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
+ rtl_generic_sighandler(true, sig, info, ctx);
+}
+
+TSAN_INTERCEPTOR(int, raise, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(raise, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ sctx->int_signal_send = sig;
+ int res = REAL(raise)(sig);
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (pid == (int)internal_getpid()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(kill)(pid, sig);
+ if (pid == (int)internal_getpid()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (tid == pthread_self()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(pthread_kill)(tid, sig);
+ if (tid == pthread_self()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
+ SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
+ // It's intercepted merely to process pending signals.
+ return REAL(gettimeofday)(tv, tz);
+}
+
+TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
+ void *hints, void *rv) {
+ SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
+ // We miss atomic synchronization in getaddrinfo,
+ // and can report false race between malloc and free
+ // inside of getaddrinfo. So ignore memory accesses.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(getaddrinfo)(node, service, hints, rv);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (in_symbolizer())
+ return REAL(fork)(fake);
+ SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
+ int pid;
+ {
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ ScopedIgnoreInterceptors ignore;
+ pid = REAL(fork)(fake);
+ }
+ if (pid == 0) {
+ // child
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
+ }
+ return pid;
+}
+
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data);
+struct dl_iterate_phdr_data {
+ ThreadState *thr;
+ uptr pc;
+ dl_iterate_phdr_cb_t cb;
+ void *data;
+};
+
+static bool IsAppNotRodata(uptr addr) {
+ return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+}
+
+static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data) {
+ dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
+ // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
+ // accessible in dl_iterate_phdr callback. But we don't see synchronization
+ // inside of dynamic linker, so we "unpoison" it here in order to not
+ // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
+ // because some libc functions call __libc_dlopen.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ int res = cbdata->cb(info, size, cbdata->data);
+ // Perform the check one more time in case info->dlpi_name was overwritten
+ // by user callback.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
+ SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
+ dl_iterate_phdr_data cbdata;
+ cbdata.thr = thr;
+ cbdata.pc = pc;
+ cbdata.cb = cb;
+ cbdata.data = data;
+ int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
+ return res;
+}
+#endif
+
+static int OnExit(ThreadState *thr) {
+ int status = Finalize(thr);
+ FlushStreams();
+ return status;
+}
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr caller_pc;
+ const uptr pc;
+};
+
+#if !SANITIZER_MAC
+static void HandleRecvmsg(ThreadState *thr, uptr pc,
+ __sanitizer_msghdr *msg) {
+ int fds[64];
+ int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++)
+ FdEventCreate(thr, pc, fds[i]);
+}
+#endif
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+// Causes interceptor recursion (getaddrinfo() and fopen())
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+// We define our own.
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define NEED_TLS_GET_ADDR
+#endif
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ if (path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
+ Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
+
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
+ FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
+
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
+
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
+
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
+
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
+ OnExit(((TsanInterceptorContext *) ctx)->thr)
+
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
+ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
+ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
+ MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
+ MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
+ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ do { \
+ return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
+ off); \
+ } while (false)
+
+#if !SANITIZER_MAC
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, msg)
+#endif
+
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (TsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old);
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h);
+
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
+ { return sigaction_impl(signo, act, oldact); }
+
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
+ { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old) {
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through rtl_sigaction, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ __sanitizer_sigaction old_stored;
+ if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
+ __sanitizer_sigaction newact;
+ if (act) {
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].handler =
+ *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
+ sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
+ internal_memcpy(&newact, act, sizeof(newact));
+ internal_sigfillset(&newact.sa_mask);
+ if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
+ if (newact.sa_flags & SA_SIGINFO)
+ newact.sigaction = rtl_sigaction;
+ else
+ newact.handler = rtl_sighandler;
+ }
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
+ act = &newact;
+ }
+ int res = REAL(sigaction)(sig, act, old);
+ if (res == 0 && old) {
+ uptr cb = (uptr)old->sigaction;
+ if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
+ internal_memcpy(old, &old_stored, sizeof(*old));
+ }
+ }
+ return res;
+}
+
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h) {
+ __sanitizer_sigaction act;
+ act.handler = h;
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ __sanitizer_sigaction old;
+ int res = sigaction_symname(sig, &act, &old);
+ if (res) return (__sanitizer_sighandler_ptr)sig_err;
+ return old.handler;
+}
+
+#define TSAN_SYSCALL() \
+ ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
+ ScopedSyscall scoped_syscall(thr) \
+/**/
+
+struct ScopedSyscall {
+ ThreadState *thr;
+
+ explicit ScopedSyscall(ThreadState *thr)
+ : thr(thr) {
+ Initialize(thr);
+ }
+
+ ~ScopedSyscall() {
+ ProcessPendingSignals(thr);
+ }
+};
+
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
+ TSAN_SYSCALL();
+ MemoryAccessRange(thr, pc, p, s, write);
+}
+
+static void syscall_acquire(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Acquire(thr, pc, addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
+}
+
+static void syscall_release(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_release(%p)\n", addr);
+ Release(thr, pc, addr);
+}
+
+static void syscall_fd_close(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdClose(thr, pc, fd);
+}
+
+static USED void syscall_fd_acquire(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdAcquire(thr, pc, fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
+}
+
+static USED void syscall_fd_release(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_fd_release(%p)\n", fd);
+ FdRelease(thr, pc, fd);
+}
+
+static void syscall_pre_fork(uptr pc) {
+ TSAN_SYSCALL();
+ ForkBefore(thr, pc);
+}
+
+static void syscall_post_fork(uptr pc, int pid) {
+ TSAN_SYSCALL();
+ if (pid == 0) {
+ // child
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
+ }
+}
+#endif
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
+
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
+
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_RELEASE(addr) \
+ syscall_release(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_PRE_FORK() \
+ syscall_pre_fork(GET_CALLER_PC())
+
+#define COMMON_SYSCALL_POST_FORK(res) \
+ syscall_post_fork(GET_CALLER_PC(), res)
+
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+#ifdef NEED_TLS_GET_ADDR
+// Define own interceptor instead of sanitizer_common's for three reasons:
+// 1. It must not process pending signals.
+// Signal handlers may contain MOVDQA instruction (see below).
+// 2. It must be as simple as possible to not contain MOVDQA.
+// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
+// is empty for tsan (meant only for msan).
+// Note: __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// So the interceptor must work with mis-aligned stack, in particular, does not
+// execute MOVDQA with stack addresses.
+TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *res = REAL(__tls_get_addr)(arg);
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return res;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
+ thr->tls_addr + thr->tls_size);
+ if (!dtv)
+ return res;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+ return res;
+}
+#endif
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _lwp_exit) {
+ SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
+ DestroyThreadState();
+ REAL(_lwp_exit)();
+}
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
+ SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
+ DestroyThreadState();
+ REAL(thr_exit(state));
+}
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
+ void *c)
+
+namespace __tsan {
+
+static void finalize(void *arg) {
+ ThreadState *thr = cur_thread();
+ int status = Finalize(thr);
+ // Make sure the output is not lost.
+ FlushStreams();
+ if (status)
+ Die();
+}
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+static void unreachable() {
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
+ Die();
+}
+#endif
+
+// Define default implementation since interception of libdispatch is optional.
+SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
+
+void InitializeInterceptors() {
+#if !SANITIZER_MAC
+ // We need to setup it early, because functions like dlsym() can call it.
+ REAL(memset) = internal_memset;
+ REAL(memcpy) = internal_memcpy;
+#endif
+
+ // Instruct libc malloc to consume less memory.
+#if SANITIZER_LINUX
+ mallopt(1, 0); // M_MXFAST
+ mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
+
+ new(interceptor_ctx()) InterceptorContext();
+
+ InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
+ InitializeLibdispatchInterceptors();
+
+#if !SANITIZER_MAC
+ // We can not use TSAN_INTERCEPT to get setjmp addr,
+ // because it does &setjmp and setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+#endif
+
+ TSAN_INTERCEPT(longjmp_symname);
+ TSAN_INTERCEPT(siglongjmp_symname);
+#if SANITIZER_NETBSD
+ TSAN_INTERCEPT(_longjmp);
+#endif
+
+ TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(__libc_memalign);
+ TSAN_INTERCEPT(calloc);
+ TSAN_INTERCEPT(realloc);
+ TSAN_INTERCEPT(reallocarray);
+ TSAN_INTERCEPT(free);
+ TSAN_INTERCEPT(cfree);
+ TSAN_INTERCEPT(munmap);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
+ TSAN_INTERCEPT(valloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
+ TSAN_INTERCEPT(posix_memalign);
+
+ TSAN_INTERCEPT(strcpy);
+ TSAN_INTERCEPT(strncpy);
+ TSAN_INTERCEPT(strdup);
+
+ TSAN_INTERCEPT(pthread_create);
+ TSAN_INTERCEPT(pthread_join);
+ TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT(pthread_exit);
+ #if SANITIZER_LINUX
+ TSAN_INTERCEPT(pthread_tryjoin_np);
+ TSAN_INTERCEPT(pthread_timedjoin_np);
+ #endif
+
+ TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+
+ TSAN_INTERCEPT(pthread_mutex_init);
+ TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_trylock);
+ TSAN_INTERCEPT(pthread_mutex_timedlock);
+
+ TSAN_INTERCEPT(pthread_spin_init);
+ TSAN_INTERCEPT(pthread_spin_destroy);
+ TSAN_INTERCEPT(pthread_spin_lock);
+ TSAN_INTERCEPT(pthread_spin_trylock);
+ TSAN_INTERCEPT(pthread_spin_unlock);
+
+ TSAN_INTERCEPT(pthread_rwlock_init);
+ TSAN_INTERCEPT(pthread_rwlock_destroy);
+ TSAN_INTERCEPT(pthread_rwlock_rdlock);
+ TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_wrlock);
+ TSAN_INTERCEPT(pthread_rwlock_trywrlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
+ TSAN_INTERCEPT(pthread_rwlock_unlock);
+
+ TSAN_INTERCEPT(pthread_barrier_init);
+ TSAN_INTERCEPT(pthread_barrier_destroy);
+ TSAN_INTERCEPT(pthread_barrier_wait);
+
+ TSAN_INTERCEPT(pthread_once);
+
+ TSAN_INTERCEPT(fstat);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
+ TSAN_INTERCEPT(open);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
+ TSAN_INTERCEPT(creat);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
+ TSAN_INTERCEPT(dup);
+ TSAN_INTERCEPT(dup2);
+ TSAN_INTERCEPT(dup3);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
+ TSAN_INTERCEPT(socket);
+ TSAN_INTERCEPT(socketpair);
+ TSAN_INTERCEPT(connect);
+ TSAN_INTERCEPT(bind);
+ TSAN_INTERCEPT(listen);
+ TSAN_MAYBE_INTERCEPT_EPOLL;
+ TSAN_INTERCEPT(close);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
+ TSAN_INTERCEPT(pipe);
+ TSAN_INTERCEPT(pipe2);
+
+ TSAN_INTERCEPT(unlink);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
+ TSAN_INTERCEPT(abort);
+ TSAN_INTERCEPT(rmdir);
+ TSAN_INTERCEPT(closedir);
+
+ TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
+ TSAN_INTERCEPT(raise);
+ TSAN_INTERCEPT(kill);
+ TSAN_INTERCEPT(pthread_kill);
+ TSAN_INTERCEPT(sleep);
+ TSAN_INTERCEPT(usleep);
+ TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(pause);
+ TSAN_INTERCEPT(gettimeofday);
+ TSAN_INTERCEPT(getaddrinfo);
+
+ TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(vfork);
+#if !SANITIZER_ANDROID
+ TSAN_INTERCEPT(dl_iterate_phdr);
+#endif
+ TSAN_MAYBE_INTERCEPT_ON_EXIT;
+ TSAN_INTERCEPT(__cxa_atexit);
+ TSAN_INTERCEPT(_exit);
+
+#ifdef NEED_TLS_GET_ADDR
+ TSAN_INTERCEPT(__tls_get_addr);
+#endif
+
+ TSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ TSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ // Need to setup it, because interceptors check that the function is resolved.
+ // But atexit is emitted directly into the module, so can't be resolved.
+ REAL(atexit) = (int(*)(void(*)()))unreachable;
+#endif
+
+ if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
+ Printf("ThreadSanitizer: failed to setup atexit callback\n");
+ Die();
+ }
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
+ Printf("ThreadSanitizer: failed to create thread key\n");
+ Die();
+ }
+#endif
+
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
+
+ FdInit();
+}
+
+} // namespace __tsan
+
+// Invisible barrier for tests.
+// There were several unsuccessful iterations for this functionality:
+// 1. Initially it was implemented in user code using
+// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
+// MacOS. Futexes are linux-specific for this matter.
+// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
+// "as-if synchronized via sleep" messages in reports which failed some
+// output tests.
+// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
+// visible events, which lead to "failed to restore stack trace" failures.
+// Note that no_sanitize_thread attribute does not turn off atomic interception
+// so attaching it to the function defined in user code does not help.
+// That's why we now have what we have.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
+ if (count >= (1 << 8)) {
+ Printf("barrier_init: count is too large (%d)\n", count);
+ Die();
+ }
+ // 8 lsb is thread count, the remaining are count of entered threads.
+ *barrier = count;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_wait(u64 *barrier) {
+ unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
+ unsigned old_epoch = (old >> 8) / (old & 0xff);
+ for (;;) {
+ unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
+ unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
+ if (cur_epoch != old_epoch)
+ return;
+ internal_sched_yield();
+ }
+}
diff --git a/lib/tsan/tsan_interface.cpp b/lib/tsan/tsan_interface.cpp
new file mode 100644
index 0000000000..2b3a0889b7
--- /dev/null
+++ b/lib/tsan/tsan_interface.cpp
@@ -0,0 +1,160 @@
+//===-- tsan_interface.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+
+void __tsan_init() {
+ cur_thread_init();
+ Initialize(cur_thread());
+}
+
+void __tsan_flush_memory() {
+ FlushShadowMemory();
+}
+
+void __tsan_read16(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_read16_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+// __tsan_unaligned_read/write calls are emitted by compiler.
+
+void __tsan_unaligned_read2(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
+}
+
+void __tsan_unaligned_read4(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
+}
+
+void __tsan_unaligned_read8(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
+}
+
+void __tsan_unaligned_read16(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
+}
+
+void __tsan_unaligned_write2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
+}
+
+void __tsan_unaligned_write4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
+}
+
+void __tsan_unaligned_write8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+}
+
+void __tsan_unaligned_write16(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+}
+
+// __sanitizer_unaligned_load/store are for user instrumentation.
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ __tsan_unaligned_write2(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ __tsan_unaligned_write4(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ __tsan_unaligned_write8(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_fiber() {
+ return cur_thread();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_create_fiber(unsigned flags) {
+ return FiberCreate(cur_thread(), CALLERPC, flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_destroy_fiber(void *fiber) {
+ FiberDestroy(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber));
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_switch_to_fiber(void *fiber, unsigned flags) {
+ FiberSwitch(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber), flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_set_fiber_name(void *fiber, const char *name) {
+ ThreadSetName(static_cast<ThreadState *>(fiber), name);
+}
+} // extern "C"
+
+void __tsan_acquire(void *addr) {
+ Acquire(cur_thread(), CALLERPC, (uptr)addr);
+}
+
+void __tsan_release(void *addr) {
+ Release(cur_thread(), CALLERPC, (uptr)addr);
+}
diff --git a/lib/tsan/tsan_interface.h b/lib/tsan/tsan_interface.h
new file mode 100644
index 0000000000..6d7286ca5b
--- /dev/null
+++ b/lib/tsan/tsan_interface.h
@@ -0,0 +1,422 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// The functions declared in this header will be inserted by the instrumentation
+// module.
+// This header can be included by the instrumented program or by TSan tests.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_H
+#define TSAN_INTERFACE_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
+using __sanitizer::tid_t;
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !SANITIZER_GO
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_vptr_update(void **vptr_p, void *new_val);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range(void *addr, unsigned long size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range(void *addr, unsigned long size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+
+// User may provide function that would be called right when TSan detects
+// an error. The argument 'report' is an opaque pointer that can be used to
+// gather additional information using other TSan report API functions.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_report(void *report);
+
+// If TSan is currently reporting a detected issue on the current thread,
+// returns an opaque pointer to the current report. Otherwise returns NULL.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report();
+
+// Returns a report's description (issue type), number of duplicate issues
+// found, counts of array data (stack traces, memory operations, locations,
+// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
+// one was involved in the issue).
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size);
+
+/// Retrieves the "tag" from a report (for external-race report types). External
+/// races can be associated with a tag which give them more meaning. For example
+/// tag value '1' means "Swift access race". Tag value '0' indicated a plain
+/// external race.
+///
+/// \param report opaque pointer to the current report (obtained as argument in
+/// __tsan_on_report, or from __tsan_get_current_report)
+/// \param [out] tag points to storage that will be filled with the tag value
+///
+/// \returns non-zero value on success, zero on failure
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag);
+
+// Returns information about stack traces included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size);
+
+// Returns information about memory operations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size);
+
+// Returns information about locations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type);
+
+// Returns information about mutexes included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size);
+
+// Returns information about threads included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size);
+
+// Returns information about unique thread IDs included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id);
+
+#endif // SANITIZER_GO
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+namespace __tsan {
+
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16;
+typedef unsigned int a32;
+typedef unsigned long long a64;
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
+
+struct ThreadState;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+} // extern "C"
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
diff --git a/lib/tsan/tsan_interface_ann.cpp b/lib/tsan/tsan_interface_ann.cpp
new file mode 100644
index 0000000000..99516d94bb
--- /dev/null
+++ b/lib/tsan/tsan_interface_ann.cpp
@@ -0,0 +1,552 @@
+//===-- tsan_interface_ann.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_interface_ann.h"
+#include "tsan_mutex.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+namespace __tsan {
+
+class ScopedAnnotation {
+ public:
+ ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
+ }
+
+ ~ScopedAnnotation() {
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
+ }
+ private:
+ ThreadState *const thr_;
+};
+
+#define SCOPED_ANNOTATION_RET(typ, ret) \
+ if (!flags()->enable_annotations) \
+ return ret; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ StatInc(thr, StatAnnotation); \
+ StatInc(thr, Stat##typ); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
+
+static const int kMaxDescLen = 128;
+
+struct ExpectRace {
+ ExpectRace *next;
+ ExpectRace *prev;
+ atomic_uintptr_t hitcount;
+ atomic_uintptr_t addcount;
+ uptr addr;
+ uptr size;
+ char *file;
+ int line;
+ char desc[kMaxDescLen];
+};
+
+struct DynamicAnnContext {
+ Mutex mtx;
+ ExpectRace expect;
+ ExpectRace benign;
+
+ DynamicAnnContext()
+ : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
+ }
+};
+
+static DynamicAnnContext *dyn_ann_ctx;
+static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
+
+static void AddExpectRace(ExpectRace *list,
+ char *f, int l, uptr addr, uptr size, char *desc) {
+ ExpectRace *race = list->next;
+ for (; race != list; race = race->next) {
+ if (race->addr == addr && race->size == size) {
+ atomic_store_relaxed(&race->addcount,
+ atomic_load_relaxed(&race->addcount) + 1);
+ return;
+ }
+ }
+ race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race->addr = addr;
+ race->size = size;
+ race->file = f;
+ race->line = l;
+ race->desc[0] = 0;
+ atomic_store_relaxed(&race->hitcount, 0);
+ atomic_store_relaxed(&race->addcount, 1);
+ if (desc) {
+ int i = 0;
+ for (; i < kMaxDescLen - 1 && desc[i]; i++)
+ race->desc[i] = desc[i];
+ race->desc[i] = 0;
+ }
+ race->prev = list;
+ race->next = list->next;
+ race->next->prev = race;
+ list->next = race;
+}
+
+static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ uptr maxbegin = max(race->addr, addr);
+ uptr minend = min(race->addr + race->size, addr + size);
+ if (maxbegin < minend)
+ return race;
+ }
+ return 0;
+}
+
+static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
+ ExpectRace *race = FindRace(list, addr, size);
+ if (race == 0)
+ return false;
+ DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
+ race->desc, race->addr, (int)race->size, race->file, race->line);
+ atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
+ return true;
+}
+
+static void InitList(ExpectRace *list) {
+ list->next = list;
+ list->prev = list;
+}
+
+void InitializeDynamicAnnotations() {
+ dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
+ InitList(&dyn_ann_ctx->expect);
+ InitList(&dyn_ann_ctx->benign);
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ ReadLock lock(&dyn_ann_ctx->mtx);
+ if (CheckContains(&dyn_ann_ctx->expect, addr, size))
+ return true;
+ if (CheckContains(&dyn_ann_ctx->benign, addr, size))
+ return true;
+ return false;
+}
+
+static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
+ int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
+ ExpectRace *list = &dyn_ann_ctx->benign;
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ (*unique_count)++;
+ const uptr cnt = atomic_load_relaxed(&(race->*counter));
+ if (cnt == 0)
+ continue;
+ *hit_count += cnt;
+ uptr i = 0;
+ for (; i < matched->Size(); i++) {
+ ExpectRace *race0 = &(*matched)[i];
+ if (race->line == race0->line
+ && internal_strcmp(race->file, race0->file) == 0
+ && internal_strcmp(race->desc, race0->desc) == 0) {
+ atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
+ break;
+ }
+ }
+ if (i == matched->Size())
+ matched->PushBack(*race);
+ }
+}
+
+void PrintMatchedBenignRaces() {
+ Lock lock(&dyn_ann_ctx->mtx);
+ int unique_count = 0;
+ int hit_count = 0;
+ int add_count = 0;
+ Vector<ExpectRace> hit_matched;
+ CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
+ &ExpectRace::hitcount);
+ Vector<ExpectRace> add_matched;
+ CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
+ &ExpectRace::addcount);
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
+ hit_count, (int)internal_getpid());
+ for (uptr i = 0; i < hit_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ atomic_load_relaxed(&hit_matched[i].hitcount),
+ hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
+ }
+ }
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
+ " (pid=%d):\n",
+ add_count, unique_count, (int)internal_getpid());
+ for (uptr i = 0; i < add_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ atomic_load_relaxed(&add_matched[i].addcount),
+ add_matched[i].file, add_matched[i].line, add_matched[i].desc);
+ }
+ }
+}
+
+static void ReportMissedExpectedRace(ExpectRace *race) {
+ Printf("==================\n");
+ Printf("WARNING: ThreadSanitizer: missed expected data race\n");
+ Printf(" %s addr=%zx %s:%d\n",
+ race->desc, race->addr, race->file, race->line);
+ Printf("==================\n");
+}
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+ Release(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+ Acquire(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignal);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
+ uptr lock) {
+ SCOPED_ANNOTATION(AnnotateCondVarWait);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreate);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockDestroy);
+ MutexDestroy(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockAcquired);
+ if (is_w)
+ MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+ else
+ MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockReleased);
+ if (is_w)
+ MutexUnlock(thr, pc, m);
+ else
+ MutexReadUnlock(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateTraceMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushState);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
+ uptr size) {
+ SCOPED_ANNOTATION(AnnotateNewMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateNoOp);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
+ Lock lock(&dyn_ann_ctx->mtx);
+ while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
+ ExpectRace *race = dyn_ann_ctx->expect.next;
+ if (atomic_load_relaxed(&race->hitcount) == 0) {
+ ctx->nmissed_expected++;
+ ReportMissedExpectedRace(race);
+ }
+ race->prev->next = race->next;
+ race->next->prev = race->prev;
+ internal_free(race);
+ }
+}
+
+void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
+ char *f, int l, int enable) {
+ SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
+ // FIXME: Reconsider this functionality later. It may be irrelevant.
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
+ char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQGet(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQGet);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQPut(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQPut);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQDestroy);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQCreate);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateExpectRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateExpectRace);
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->expect,
+ f, l, mem, 1, desc);
+ DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+static void BenignRaceImpl(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->benign,
+ f, l, mem, size, desc);
+ DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
+void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, size, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateBenignRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRace);
+ BenignRaceImpl(f, l, mem, 1, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
+ ThreadIgnoreSyncBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
+ ThreadIgnoreSyncEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateThreadName(
+ char *f, int l, char *name) {
+ SCOPED_ANNOTATION(AnnotateThreadName);
+ ThreadSetName(thr, name);
+}
+
+// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
+// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
+// atomic operations, which should be handled by ThreadSanitizer correctly.
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr sz, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, sz, desc);
+}
+
+int INTERFACE_ATTRIBUTE RunningOnValgrind() {
+ return flags()->running_on_valgrind;
+}
+
+double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
+ return 10.0;
+}
+
+const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
+ if (internal_strcmp(query, "pure_happens_before") == 0)
+ return "1";
+ else
+ return "0";
+}
+
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_create(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_create);
+ MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_destroy(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_destroy);
+ MutexDestroy(thr, pc, (uptr)m, flagz);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
+ if (!(flagz & MutexFlagTryLock)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPreReadLock(thr, pc, (uptr)m);
+ else
+ MutexPreLock(thr, pc, (uptr)m);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_lock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+ if (!(flagz & MutexFlagTryLockFailed)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPostReadLock(thr, pc, (uptr)m, flagz);
+ else
+ MutexPostLock(thr, pc, (uptr)m, flagz, rec);
+ }
+}
+
+INTERFACE_ATTRIBUTE
+int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
+ int ret = 0;
+ if (flagz & MutexFlagReadLock) {
+ CHECK(!(flagz & MutexFlagRecursiveUnlock));
+ MutexReadUnlock(thr, pc, (uptr)m);
+ } else {
+ ret = MutexUnlock(thr, pc, (uptr)m, flagz);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ return ret;
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_signal);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
+ // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_divert);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+} // extern "C"
diff --git a/lib/tsan/tsan_interface_ann.h b/lib/tsan/tsan_interface_ann.h
new file mode 100644
index 0000000000..458d61f533
--- /dev/null
+++ b/lib/tsan/tsan_interface_ann.h
@@ -0,0 +1,32 @@
+//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for dynamic annotations.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ANN_H
+#define TSAN_INTERFACE_ANN_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_acquire(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_release(void *addr);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ANN_H
diff --git a/lib/tsan/tsan_interface_atomic.cpp b/lib/tsan/tsan_interface_atomic.cpp
new file mode 100644
index 0000000000..3f459aff53
--- /dev/null
+++ b/lib/tsan/tsan_interface_atomic.cpp
@@ -0,0 +1,955 @@
+//===-- tsan_interface_atomic.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// ThreadSanitizer atomic operations are based on C++11/C1x standards.
+// For background see C++11 standard. A slightly older, publicly
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_flags.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+#if !SANITIZER_GO && __TSAN_HAS_INT128
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
+
+static bool IsLoadOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_consume
+ || mo == mo_acquire || mo == mo_seq_cst;
+}
+
+static bool IsStoreOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+}
+
+static bool IsReleaseOrder(morder mo) {
+ return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcquireOrder(morder mo) {
+ return mo == mo_consume || mo == mo_acquire
+ || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcqRelOrder(morder mo) {
+ return mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+template<typename T> T func_xchg(volatile T *v, T op) {
+ T res = __sync_lock_test_and_set(v, op);
+ // __sync_lock_test_and_set does not contain full barrier.
+ __sync_synchronize();
+ return res;
+}
+
+template<typename T> T func_add(volatile T *v, T op) {
+ return __sync_fetch_and_add(v, op);
+}
+
+template<typename T> T func_sub(volatile T *v, T op) {
+ return __sync_fetch_and_sub(v, op);
+}
+
+template<typename T> T func_and(volatile T *v, T op) {
+ return __sync_fetch_and_and(v, op);
+}
+
+template<typename T> T func_or(volatile T *v, T op) {
+ return __sync_fetch_and_or(v, op);
+}
+
+template<typename T> T func_xor(volatile T *v, T op) {
+ return __sync_fetch_and_xor(v, op);
+}
+
+template<typename T> T func_nand(volatile T *v, T op) {
+ // clang does not support __sync_fetch_and_nand.
+ T cmp = *v;
+ for (;;) {
+ T newv = ~(cmp & op);
+ T cur = __sync_val_compare_and_swap(v, cmp, newv);
+ if (cmp == cur)
+ return cmp;
+ cmp = cur;
+ }
+}
+
+template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+ return __sync_val_compare_and_swap(v, cmp, xch);
+}
+
+// clang does not support 128-bit atomic ops.
+// Atomic ops are executed under tsan internal mutex,
+// here we assume that the atomic variables are not accessed
+// from non-instrumented code.
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
+ && __TSAN_HAS_INT128
+a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = op;
+ return cmp;
+}
+
+a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp + op;
+ return cmp;
+}
+
+a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp - op;
+ return cmp;
+}
+
+a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp & op;
+ return cmp;
+}
+
+a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp | op;
+ return cmp;
+}
+
+a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp ^ op;
+ return cmp;
+}
+
+a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = ~(cmp & op);
+ return cmp;
+}
+
+a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
+ a128 cur = *v;
+ if (cur == cmp)
+ *v = xch;
+ return cur;
+}
+#endif
+
+template<typename T>
+static int SizeLog() {
+ if (sizeof(T) <= 1)
+ return kSizeLog1;
+ else if (sizeof(T) <= 2)
+ return kSizeLog2;
+ else if (sizeof(T) <= 4)
+ return kSizeLog4;
+ else
+ return kSizeLog8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
+
+#if !SANITIZER_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
+template<typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+ CHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo)) {
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return NoTsanAtomicLoad(a, mo);
+ }
+ // Don't create sync object if it does not exist yet. For example, an atomic
+ // pointer is initialized to nullptr and then periodically acquire-loaded.
+ T v = NoTsanAtomicLoad(a, mo);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+ if (s) {
+ AcquireImpl(thr, pc, &s->clock);
+ // Re-read under sync mutex because we need a consistent snapshot
+ // of the value and the clock we acquire.
+ v = NoTsanAtomicLoad(a, mo);
+ s->mtx.ReadUnlock();
+ }
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return v;
+}
+
+template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template<typename T>
+static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ CHECK(IsStoreOrder(mo));
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
+ return;
+ }
+ __sync_synchronize();
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ NoTsanAtomicStore(a, v, mo);
+ s->mtx.Unlock();
+}
+
+template<typename T, T (*F)(volatile T *v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ v = F(a, v);
+ if (s)
+ s->mtx.Unlock();
+ return v;
+}
+
+template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
+static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template<typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T *c, T v, morder mo, morder fmo) {
+ (void)fmo; // Unused because llvm does not pass it yet.
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
+ if (mo != mo_relaxed) {
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
+}
+
+template<typename T>
+static T AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T c, T v, morder mo, morder fmo) {
+ AtomicCAS(thr, pc, a, &c, v, mo, fmo);
+ return c;
+}
+
+#if !SANITIZER_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
+static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
+ __sync_synchronize();
+}
+#endif
+
+// Interface functions follow.
+#if !SANITIZER_GO
+
+// C/C++
+
+static morder convert_morder(morder mo) {
+ if (flags()->force_seq_cst_atomics)
+ return (morder)mo_seq_cst;
+
+ // Filter out additional memory order flags:
+ // MEMMODEL_SYNC = 1 << 15
+ // __ATOMIC_HLE_ACQUIRE = 1 << 16
+ // __ATOMIC_HLE_RELEASE = 1 << 17
+ //
+ // HLE is an optimization, and we pretend that elision always fails.
+ // MEMMODEL_SYNC is used when lowering __sync_ atomics,
+ // since we use __sync_ atomics for actual atomic operations,
+ // we can safely ignore it as well. It also subtly affects semantics,
+ // but we don't model the difference.
+ return (morder)(mo & 0x7fff);
+}
+
+#define SCOPED_ATOMIC(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
+ ProcessPendingSignals(thr); \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ } \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = convert_morder(mo); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo) {
+ char* a = 0;
+ SCOPED_ATOMIC(Fence, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo) {
+}
+} // extern "C"
+
+#else // #if !SANITIZER_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #if !SANITIZER_GO
diff --git a/lib/tsan/tsan_interface_inl.h b/lib/tsan/tsan_interface_inl.h
new file mode 100644
index 0000000000..f955ddf992
--- /dev/null
+++ b/lib/tsan/tsan_interface_inl.h
@@ -0,0 +1,132 @@
+//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_read1(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ if (*vptr_p != new_val) {
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+ }
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+}
+
+void __tsan_func_entry(void *pc) {
+ FuncEntry(cur_thread(), (uptr)pc);
+}
+
+void __tsan_func_exit() {
+ FuncExit(cur_thread());
+}
+
+void __tsan_ignore_thread_begin() {
+ ThreadIgnoreBegin(cur_thread(), CALLERPC);
+}
+
+void __tsan_ignore_thread_end() {
+ ThreadIgnoreEnd(cur_thread(), CALLERPC);
+}
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
+
+void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, false);
+}
+
+void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, true);
+}
diff --git a/lib/tsan/tsan_interface_java.cpp b/lib/tsan/tsan_interface_java.cpp
new file mode 100644
index 0000000000..081c6ff102
--- /dev/null
+++ b/lib/tsan/tsan_interface_java.cpp
@@ -0,0 +1,267 @@
+//===-- tsan_interface_java.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface_java.h"
+#include "tsan_rtl.h"
+#include "tsan_mutex.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+using namespace __tsan;
+
+const jptr kHeapAlignment = 8;
+
+namespace __tsan {
+
+struct JavaContext {
+ const uptr heap_begin;
+ const uptr heap_size;
+
+ JavaContext(jptr heap_begin, jptr heap_size)
+ : heap_begin(heap_begin)
+ , heap_size(heap_size) {
+ }
+};
+
+class ScopedJavaFunc {
+ public:
+ ScopedJavaFunc(ThreadState *thr, uptr pc)
+ : thr_(thr) {
+ Initialize(thr_);
+ FuncEntry(thr, pc);
+ }
+
+ ~ScopedJavaFunc() {
+ FuncExit(thr_);
+ // FIXME(dvyukov): process pending signals.
+ }
+
+ private:
+ ThreadState *thr_;
+};
+
+static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
+static JavaContext *jctx;
+
+} // namespace __tsan
+
+#define SCOPED_JAVA_FUNC(func) \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+ ScopedJavaFunc scoped(thr, caller_pc); \
+/**/
+
+void __tsan_java_init(jptr heap_begin, jptr heap_size) {
+ SCOPED_JAVA_FUNC(__tsan_java_init);
+ DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
+ CHECK_EQ(jctx, 0);
+ CHECK_GT(heap_begin, 0);
+ CHECK_GT(heap_size, 0);
+ CHECK_EQ(heap_begin % kHeapAlignment, 0);
+ CHECK_EQ(heap_size % kHeapAlignment, 0);
+ CHECK_LT(heap_begin, heap_begin + heap_size);
+ jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
+}
+
+int __tsan_java_fini() {
+ SCOPED_JAVA_FUNC(__tsan_java_fini);
+ DPrintf("#%d: java_fini()\n", thr->tid);
+ CHECK_NE(jctx, 0);
+ // FIXME(dvyukov): this does not call atexit() callbacks.
+ int status = Finalize(thr);
+ DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
+ return status;
+}
+
+void __tsan_java_alloc(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ OnUserAlloc(thr, pc, ptr, size, false);
+}
+
+void __tsan_java_free(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_free);
+ DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ ctx->metamap.FreeRange(thr->proc(), ptr, size);
+}
+
+void __tsan_java_move(jptr src, jptr dst, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_move);
+ DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(src % kHeapAlignment, 0);
+ CHECK_EQ(dst % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(src, jctx->heap_begin);
+ CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ CHECK_GE(dst, jctx->heap_begin);
+ CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
+
+ // Assuming it's not running concurrently with threads that do
+ // memory accesses and mutex operations (stop-the-world phase).
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
+ }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
+ }
+}
+
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ SCOPED_JAVA_FUNC(__tsan_java_find);
+ DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
+ CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ CHECK_EQ(to % kHeapAlignment, 0);
+ CHECK_GE(*from_ptr, jctx->heap_begin);
+ CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
+void __tsan_java_mutex_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_read_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
+ MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_read_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexReadUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ CHECK_GT(rec, 0);
+
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
+}
+
+int __tsan_java_mutex_unlock_rec(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
+}
+
+void __tsan_java_acquire(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Acquire(thr, caller_pc, addr);
+}
+
+void __tsan_java_release(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Release(thr, caller_pc, addr);
+}
+
+void __tsan_java_release_store(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ ReleaseStore(thr, caller_pc, addr);
+}
diff --git a/lib/tsan/tsan_interface_java.h b/lib/tsan/tsan_interface_java.h
new file mode 100644
index 0000000000..51b445251e
--- /dev/null
+++ b/lib/tsan/tsan_interface_java.h
@@ -0,0 +1,99 @@
+//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for verification of Java or mixed Java/C++ programs.
+// The interface is intended to be used from within a JVM and notify TSan
+// about such events like Java locks and GC memory compaction.
+//
+// For plain memory accesses and function entry/exit a JVM is intended to use
+// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
+//
+// For volatile memory accesses and atomic operations JVM is intended to use
+// standard atomics API: __tsan_atomicN_load/store/etc.
+//
+// For usage examples see lit_tests/java_*.cpp
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_JAVA_H
+#define TSAN_INTERFACE_JAVA_H
+
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long jptr;
+
+// Must be called before any other callback from Java.
+void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
+// Must be called when the application exits.
+// Not necessary the last callback (concurrently running threads are OK).
+// Returns exit status or 0 if tsan does not want to override it.
+int __tsan_java_fini() INTERFACE_ATTRIBUTE;
+
+// Callback for memory allocations.
+// May be omitted for allocations that are not subject to data races
+// nor contain synchronization objects (e.g. String).
+void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory free.
+// Can be aggregated for several objects (preferably).
+void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory move by GC.
+// Can be aggregated for several objects (preferably).
+// The ranges can overlap.
+void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
+
+// Mutex lock.
+// Addr is any unique address associated with the mutex.
+// Can be called on recursive reentry.
+void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex unlock.
+void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read lock.
+void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read unlock.
+void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Recursive mutex lock, intended for handling of Object.wait().
+// The 'rec' value must be obtained from the previous
+// __tsan_java_mutex_unlock_rec().
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
+// Recursive mutex unlock, intended for handling of Object.wait().
+// The return value says how many times this thread called lock()
+// w/o a pairing unlock() (i.e. how many recursive levels it unlocked).
+// It must be passed back to __tsan_java_mutex_lock_rec() to restore
+// the same recursion level.
+int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+
+// Raw acquire/release primitives.
+// Can be used to establish happens-before edges on volatile/final fields,
+// in atomic operations, etc. release_store is the same as release, but it
+// breaks release sequence on addr (see C++ standard 1.10/7 for details).
+void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#undef INTERFACE_ATTRIBUTE
+
+#endif // #ifndef TSAN_INTERFACE_JAVA_H
diff --git a/lib/tsan/tsan_malloc_mac.cpp b/lib/tsan/tsan_malloc_mac.cpp
new file mode 100644
index 0000000000..0e861bf1f9
--- /dev/null
+++ b/lib/tsan/tsan_malloc_mac.cpp
@@ -0,0 +1,71 @@
+//===-- tsan_malloc_mac.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_errno.h"
+#include "tsan_interceptors.h"
+#include "tsan_stack_trace.h"
+
+using namespace __tsan;
+#define COMMON_MALLOC_ZONE_NAME "tsan"
+#define COMMON_MALLOC_ENTER()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED (cur_thread()->is_inited)
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ void *p = \
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
+#define COMMON_MALLOC_MALLOC(size) \
+ if (in_symbolizer()) return InternalAlloc(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ void *p = user_alloc(thr, pc, size)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (in_symbolizer()) return InternalRealloc(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ void *p = user_realloc(thr, pc, ptr, size)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (in_symbolizer()) return InternalCalloc(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ void *p = user_calloc(thr, pc, size, count)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ if (in_symbolizer()) { \
+ void *p = InternalAlloc(size, nullptr, alignment); \
+ if (!p) return errno_ENOMEM; \
+ *memptr = p; \
+ return 0; \
+ } \
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, alignment, size); \
+ int res = user_posix_memalign(thr, pc, memptr, alignment, size);
+#define COMMON_MALLOC_VALLOC(size) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, GetPageSizeCached()); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
+ void *p = user_valloc(thr, pc, size)
+#define COMMON_MALLOC_FREE(ptr) \
+ if (in_symbolizer()) return InternalFree(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
+ user_free(thr, pc, ptr)
+#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __tsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif
diff --git a/lib/tsan/tsan_md5.cpp b/lib/tsan/tsan_md5.cpp
new file mode 100644
index 0000000000..72857b773f
--- /dev/null
+++ b/lib/tsan/tsan_md5.cpp
@@ -0,0 +1,250 @@
+//===-- tsan_md5.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+
+#define STEP(f, a, b, c, d, x, t, s) \
+ (a) += f((b), (c), (d)) + (x) + (t); \
+ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+ (a) += (b);
+
+#define SET(n) \
+ (*(const MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+ SET(n)
+
+typedef unsigned int MD5_u32plus;
+typedef unsigned long ulong_t;
+
+typedef struct {
+ MD5_u32plus lo, hi;
+ MD5_u32plus a, b, c, d;
+ unsigned char buffer[64];
+ MD5_u32plus block[16];
+} MD5_CTX;
+
+static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) {
+ const unsigned char *ptr = (const unsigned char *)data;
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+ d = ctx->d;
+
+ do {
+ saved_a = a;
+ saved_b = b;
+ saved_c = c;
+ saved_d = d;
+
+ STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+ STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+ STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+ STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+ STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+ STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+ STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+ STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+ STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+ STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+ STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+ STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+ STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+ STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+ STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+ STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+
+ STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+ STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+ STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+ STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+ STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+ STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+ STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+ STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+ STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+ STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+ STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+ STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+ STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+ STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+ STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+ STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+
+ STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+ STEP(H, d, a, b, c, GET(8), 0x8771f681, 11)
+ STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+ STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23)
+ STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+ STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+ STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+ STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23)
+ STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+ STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11)
+ STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+ STEP(H, b, c, d, a, GET(6), 0x04881d05, 23)
+ STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+ STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11)
+ STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+ STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23)
+
+ STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+ STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+ STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+ STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+ STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+ STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+ STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+ STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+ STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+ STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+ STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+ STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+ STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+ STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+ STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+ STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+
+ a += saved_a;
+ b += saved_b;
+ c += saved_c;
+ d += saved_d;
+
+ ptr += 64;
+ } while (size -= 64);
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+ ctx->d = d;
+
+ return ptr;
+}
+
+#undef F
+#undef G
+#undef H
+#undef I
+#undef STEP
+#undef SET
+#undef GET
+
+void MD5_Init(MD5_CTX *ctx) {
+ ctx->a = 0x67452301;
+ ctx->b = 0xefcdab89;
+ ctx->c = 0x98badcfe;
+ ctx->d = 0x10325476;
+
+ ctx->lo = 0;
+ ctx->hi = 0;
+}
+
+void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) {
+ MD5_u32plus saved_lo;
+ ulong_t used, free;
+
+ saved_lo = ctx->lo;
+ if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+ ctx->hi++;
+ ctx->hi += size >> 29;
+
+ used = saved_lo & 0x3f;
+
+ if (used) {
+ free = 64 - used;
+
+ if (size < free) {
+ internal_memcpy(&ctx->buffer[used], data, size);
+ return;
+ }
+
+ internal_memcpy(&ctx->buffer[used], data, free);
+ data = (const unsigned char *)data + free;
+ size -= free;
+ body(ctx, ctx->buffer, 64);
+ }
+
+ if (size >= 64) {
+ data = body(ctx, data, size & ~(ulong_t)0x3f);
+ size &= 0x3f;
+ }
+
+ internal_memcpy(ctx->buffer, data, size);
+}
+
+void MD5_Final(unsigned char *result, MD5_CTX *ctx) {
+ ulong_t used, free;
+
+ used = ctx->lo & 0x3f;
+
+ ctx->buffer[used++] = 0x80;
+
+ free = 64 - used;
+
+ if (free < 8) {
+ internal_memset(&ctx->buffer[used], 0, free);
+ body(ctx, ctx->buffer, 64);
+ used = 0;
+ free = 64;
+ }
+
+ internal_memset(&ctx->buffer[used], 0, free - 8);
+
+ ctx->lo <<= 3;
+ ctx->buffer[56] = ctx->lo;
+ ctx->buffer[57] = ctx->lo >> 8;
+ ctx->buffer[58] = ctx->lo >> 16;
+ ctx->buffer[59] = ctx->lo >> 24;
+ ctx->buffer[60] = ctx->hi;
+ ctx->buffer[61] = ctx->hi >> 8;
+ ctx->buffer[62] = ctx->hi >> 16;
+ ctx->buffer[63] = ctx->hi >> 24;
+
+ body(ctx, ctx->buffer, 64);
+
+ result[0] = ctx->a;
+ result[1] = ctx->a >> 8;
+ result[2] = ctx->a >> 16;
+ result[3] = ctx->a >> 24;
+ result[4] = ctx->b;
+ result[5] = ctx->b >> 8;
+ result[6] = ctx->b >> 16;
+ result[7] = ctx->b >> 24;
+ result[8] = ctx->c;
+ result[9] = ctx->c >> 8;
+ result[10] = ctx->c >> 16;
+ result[11] = ctx->c >> 24;
+ result[12] = ctx->d;
+ result[13] = ctx->d >> 8;
+ result[14] = ctx->d >> 16;
+ result[15] = ctx->d >> 24;
+
+ internal_memset(ctx, 0, sizeof(*ctx));
+}
+
+MD5Hash md5_hash(const void *data, uptr size) {
+ MD5Hash res;
+ MD5_CTX ctx;
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, data, size);
+ MD5_Final((unsigned char*)&res.hash[0], &ctx);
+ return res;
+}
+} // namespace __tsan
diff --git a/lib/tsan/tsan_mman.cpp b/lib/tsan/tsan_mman.cpp
new file mode 100644
index 0000000000..743e67bf2f
--- /dev/null
+++ b/lib/tsan/tsan_mman.cpp
@@ -0,0 +1,405 @@
+//===-- tsan_mman.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_report.h"
+#include "tsan_flags.h"
+
+// May be overriden by front-end.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+
+namespace __tsan {
+
+struct MapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ DontNeedShadowFor(p, size);
+ // Mark the corresponding meta shadow memory as not needed.
+ // Note the block does not contain any meta info at this point
+ // (this happens after free).
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ // Block came from LargeMmapAllocator, so must be large.
+ // We rely on this in the calculations below.
+ CHECK_GE(size, 2 * kPageSize);
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ p += diff;
+ size -= diff;
+ }
+ diff = p + size - RoundDown(p + size, kPageSize);
+ if (diff != 0)
+ size -= diff;
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
+ }
+};
+
+static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
+Allocator *allocator() {
+ return reinterpret_cast<Allocator*>(&allocator_placeholder);
+}
+
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
+ , proc(ProcCreate()) {
+ }
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+static uptr max_user_defined_malloc_size;
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
+ max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
+ ? common_flags()->max_allocation_size_mb
+ << 20
+ : kMaxAllowedMallocSize;
+}
+
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorPrintStats() {
+ allocator()->PrintStats();
+}
+
+static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
+ if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
+ !flags()->report_signal_unsafe)
+ return;
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeSignalUnsafe);
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+}
+
+
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
+ sz > max_user_defined_malloc_size) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ uptr malloc_limit =
+ Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
+ }
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportOutOfMemory(sz, &stack);
+ }
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportCallocOverflow(n, size, &stack);
+ }
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportReallocArrayOverflow(size, n, &stack);
+ }
+ return user_realloc(thr, pc, p, size * n);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
+}
+
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
+ // FIXME: Handle "shrinking" more efficiently,
+ // it seems that some software actually does this.
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
+ user_free(thr, pc, p);
+ return nullptr;
+ }
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
+ user_free(thr, pc, p);
+ }
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAllocationAlignment(align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidPosixMemalignAlignment(align, &stack);
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by user_alloc_internal.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAlignedAllocAlignment(sz, align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportPvallocOverflow(sz, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
+}
+
+uptr user_alloc_usable_size(const void *p) {
+ if (p == 0)
+ return 0;
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
+void invoke_malloc_hook(void *ptr, uptr size) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
+}
+
+void invoke_free_hook(void *ptr) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
+}
+
+void internal_free(void *p) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ return allocator()->GetBlockBegin(p) != 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
+}
+
+void __tsan_on_thread_idle() {
+ ThreadState *thr = cur_thread();
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
+}
+} // extern "C"
diff --git a/lib/tsan/tsan_mman.h b/lib/tsan/tsan_mman.h
new file mode 100644
index 0000000000..a5280d4472
--- /dev/null
+++ b/lib/tsan/tsan_mman.h
@@ -0,0 +1,89 @@
+//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MMAN_H
+#define TSAN_MMAN_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+const uptr kDefaultAlignment = 16;
+
+void InitializeAllocator();
+void InitializeAllocatorLate();
+void ReplaceSystemMalloc();
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
+void AllocatorPrintStats();
+
+// For user allocations.
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
+// Does not accept NULL.
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr sz, uptr n);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
+uptr user_alloc_usable_size(const void *p);
+
+// Invoking malloc/free hooks that may be installed by the user.
+void invoke_malloc_hook(void *ptr, uptr size);
+void invoke_free_hook(void *ptr);
+
+enum MBlockType {
+ MBlockScopedBuf,
+ MBlockString,
+ MBlockStackTrace,
+ MBlockShadowStack,
+ MBlockSync,
+ MBlockClock,
+ MBlockThreadContex,
+ MBlockDeadInfo,
+ MBlockRacyStacks,
+ MBlockRacyAddresses,
+ MBlockAtExit,
+ MBlockFlag,
+ MBlockReport,
+ MBlockReportMop,
+ MBlockReportThread,
+ MBlockReportMutex,
+ MBlockReportLoc,
+ MBlockReportStack,
+ MBlockSuppression,
+ MBlockExpectRace,
+ MBlockSignal,
+ MBlockJmpBuf,
+
+ // This must be the last.
+ MBlockTypeCount
+};
+
+// For internal data structures.
+void *internal_alloc(MBlockType typ, uptr sz);
+void internal_free(void *p);
+
+template <typename T>
+void DestroyAndFree(T *p) {
+ p->~T();
+ internal_free(p);
+}
+
+} // namespace __tsan
+#endif // TSAN_MMAN_H
diff --git a/lib/tsan/tsan_mutex.cpp b/lib/tsan/tsan_mutex.cpp
new file mode 100644
index 0000000000..7a0918f2a2
--- /dev/null
+++ b/lib/tsan/tsan_mutex.cpp
@@ -0,0 +1,289 @@
+//===-- tsan_mutex.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_mutex.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
+// Readers have preference, can possibly starvate writers.
+
+// The table fixes what mutexes can be locked under what mutexes.
+// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
+// then Report mutex can be locked while under Threads mutex.
+// The leaf mutexes can be locked under any other mutexes.
+// Recursive locking is not supported.
+#if SANITIZER_DEBUG && !SANITIZER_GO
+const MutexType MutexTypeLeaf = (MutexType)-1;
+static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
+ /*0 MutexTypeInvalid*/ {},
+ /*1 MutexTypeTrace*/ {MutexTypeLeaf},
+ /*2 MutexTypeThreads*/ {MutexTypeReport},
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
+ MutexTypeMBlock, MutexTypeJavaMBlock},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
+ /*5 MutexTypeSyncTab*/ {}, // unused
+ /*6 MutexTypeSlab*/ {MutexTypeLeaf},
+ /*7 MutexTypeAnnotations*/ {},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
+ /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
+ /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
+ /*12 MutexTypeFired*/ {MutexTypeLeaf},
+ /*13 MutexTypeRacy*/ {MutexTypeLeaf},
+ /*14 MutexTypeGlobalProc*/ {},
+};
+
+static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
+#endif
+
+void InitializeMutex() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ // Build the "can lock" adjacency matrix.
+ // If [i][j]==true, then one can lock mutex j while under mutex i.
+ const int N = MutexTypeCount;
+ int cnt[N] = {};
+ bool leaf[N] = {};
+ for (int i = 1; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ MutexType z = CanLockTab[i][j];
+ if (z == MutexTypeInvalid)
+ continue;
+ if (z == MutexTypeLeaf) {
+ CHECK(!leaf[i]);
+ leaf[i] = true;
+ continue;
+ }
+ CHECK(!CanLockAdj[i][(int)z]);
+ CanLockAdj[i][(int)z] = true;
+ cnt[i]++;
+ }
+ }
+ for (int i = 0; i < N; i++) {
+ CHECK(!leaf[i] || cnt[i] == 0);
+ }
+ // Add leaf mutexes.
+ for (int i = 0; i < N; i++) {
+ if (!leaf[i])
+ continue;
+ for (int j = 0; j < N; j++) {
+ if (i == j || leaf[j] || j == MutexTypeInvalid)
+ continue;
+ CHECK(!CanLockAdj[j][i]);
+ CanLockAdj[j][i] = true;
+ }
+ }
+ // Build the transitive closure.
+ bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ CanLockAdj2[i][j] = CanLockAdj[i][j];
+ }
+ }
+ for (int k = 0; k < N; k++) {
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
+ CanLockAdj2[i][j] = true;
+ }
+ }
+ }
+ }
+#if 0
+ Printf("Can lock graph:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj[i][j]);
+ }
+ Printf("\n");
+ }
+ Printf("Can lock graph closure:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj2[i][j]);
+ }
+ Printf("\n");
+ }
+#endif
+ // Verify that the graph is acyclic.
+ for (int i = 0; i < N; i++) {
+ if (CanLockAdj2[i][i]) {
+ Printf("Mutex %d participates in a cycle\n", i);
+ Die();
+ }
+ }
+#endif
+}
+
+InternalDeadlockDetector::InternalDeadlockDetector() {
+ // Rely on zero initialization because some mutexes can be locked before ctor.
+}
+
+#if SANITIZER_DEBUG && !SANITIZER_GO
+void InternalDeadlockDetector::Lock(MutexType t) {
+ // Printf("LOCK %d @%zu\n", t, seq_ + 1);
+ CHECK_GT(t, MutexTypeInvalid);
+ CHECK_LT(t, MutexTypeCount);
+ u64 max_seq = 0;
+ u64 max_idx = MutexTypeInvalid;
+ for (int i = 0; i != MutexTypeCount; i++) {
+ if (locked_[i] == 0)
+ continue;
+ CHECK_NE(locked_[i], max_seq);
+ if (max_seq < locked_[i]) {
+ max_seq = locked_[i];
+ max_idx = i;
+ }
+ }
+ locked_[t] = ++seq_;
+ if (max_idx == MutexTypeInvalid)
+ return;
+ // Printf(" last %d @%zu\n", max_idx, max_seq);
+ if (!CanLockAdj[max_idx][t]) {
+ Printf("ThreadSanitizer: internal deadlock detected\n");
+ Printf("ThreadSanitizer: can't lock %d while under %zu\n",
+ t, (uptr)max_idx);
+ CHECK(0);
+ }
+}
+
+void InternalDeadlockDetector::Unlock(MutexType t) {
+ // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
+ CHECK(locked_[t]);
+ locked_[t] = 0;
+}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
+#endif
+
+void CheckNoLocks(ThreadState *thr) {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
+#endif
+}
+
+const uptr kUnlocked = 0;
+const uptr kWriteLock = 1;
+const uptr kReadLock = 2;
+
+class Backoff {
+ public:
+ Backoff()
+ : iter_() {
+ }
+
+ bool Do() {
+ if (iter_++ < kActiveSpinIters)
+ proc_yield(kActiveSpinCnt);
+ else
+ internal_sched_yield();
+ return true;
+ }
+
+ u64 Contention() const {
+ u64 active = iter_ % kActiveSpinIters;
+ u64 passive = iter_ - active;
+ return active + 10 * passive;
+ }
+
+ private:
+ int iter_;
+ static const int kActiveSpinIters = 10;
+ static const int kActiveSpinCnt = 20;
+};
+
+Mutex::Mutex(MutexType type, StatType stat_type) {
+ CHECK_GT(type, MutexTypeInvalid);
+ CHECK_LT(type, MutexTypeCount);
+#if SANITIZER_DEBUG
+ type_ = type;
+#endif
+#if TSAN_COLLECT_STATS
+ stat_type_ = stat_type;
+#endif
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+}
+
+Mutex::~Mutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+}
+
+void Mutex::Lock() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
+#endif
+ uptr cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
+ cmp = kUnlocked;
+ if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire)) {
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+ }
+}
+
+void Mutex::Unlock() {
+ uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ (void)prev;
+ DCHECK_NE(prev & kWriteLock, 0);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::ReadLock() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
+#endif
+ uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0) {
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+}
+
+void Mutex::ReadUnlock() {
+ uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ (void)prev;
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_mutex.h b/lib/tsan/tsan_mutex.h
new file mode 100644
index 0000000000..80fdc6ed57
--- /dev/null
+++ b/lib/tsan/tsan_mutex.h
@@ -0,0 +1,90 @@
+//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEX_H
+#define TSAN_MUTEX_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum MutexType {
+ MutexTypeInvalid,
+ MutexTypeTrace,
+ MutexTypeThreads,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeSyncTab,
+ MutexTypeSlab,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+ MutexTypeMBlock,
+ MutexTypeJavaMBlock,
+ MutexTypeDDetector,
+ MutexTypeFired,
+ MutexTypeRacy,
+ MutexTypeGlobalProc,
+
+ // This must be the last.
+ MutexTypeCount
+};
+
+class Mutex {
+ public:
+ explicit Mutex(MutexType type, StatType stat_type);
+ ~Mutex();
+
+ void Lock();
+ void Unlock();
+
+ void ReadLock();
+ void ReadUnlock();
+
+ void CheckLocked();
+
+ private:
+ atomic_uintptr_t state_;
+#if SANITIZER_DEBUG
+ MutexType type_;
+#endif
+#if TSAN_COLLECT_STATS
+ StatType stat_type_;
+#endif
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
+
+class InternalDeadlockDetector {
+ public:
+ InternalDeadlockDetector();
+ void Lock(MutexType t);
+ void Unlock(MutexType t);
+ void CheckNoLocks();
+ private:
+ u64 seq_;
+ u64 locked_[MutexTypeCount];
+};
+
+void InitializeMutex();
+
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEX_H
diff --git a/lib/tsan/tsan_mutexset.cpp b/lib/tsan/tsan_mutexset.cpp
new file mode 100644
index 0000000000..813fa3bca9
--- /dev/null
+++ b/lib/tsan/tsan_mutexset.cpp
@@ -0,0 +1,88 @@
+//===-- tsan_mutexset.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_mutexset.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+const uptr MutexSet::kMaxSize;
+
+MutexSet::MutexSet() {
+ size_ = 0;
+ internal_memset(&descs_, 0, sizeof(descs_));
+}
+
+void MutexSet::Add(u64 id, bool write, u64 epoch) {
+ // Look up existing mutex with the same id.
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ descs_[i].count++;
+ descs_[i].epoch = epoch;
+ return;
+ }
+ }
+ // On overflow, find the oldest mutex and drop it.
+ if (size_ == kMaxSize) {
+ u64 minepoch = (u64)-1;
+ u64 mini = (u64)-1;
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].epoch < minepoch) {
+ minepoch = descs_[i].epoch;
+ mini = i;
+ }
+ }
+ RemovePos(mini);
+ CHECK_EQ(size_, kMaxSize - 1);
+ }
+ // Add new mutex descriptor.
+ descs_[size_].id = id;
+ descs_[size_].write = write;
+ descs_[size_].epoch = epoch;
+ descs_[size_].count = 1;
+ size_++;
+}
+
+void MutexSet::Del(u64 id, bool write) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ if (--descs_[i].count == 0)
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::Remove(u64 id) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::RemovePos(uptr i) {
+ CHECK_LT(i, size_);
+ descs_[i] = descs_[size_ - 1];
+ size_--;
+}
+
+uptr MutexSet::Size() const {
+ return size_;
+}
+
+MutexSet::Desc MutexSet::Get(uptr i) const {
+ CHECK_LT(i, size_);
+ return descs_[i];
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_mutexset.h b/lib/tsan/tsan_mutexset.h
new file mode 100644
index 0000000000..d63881f402
--- /dev/null
+++ b/lib/tsan/tsan_mutexset.h
@@ -0,0 +1,69 @@
+//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// MutexSet holds the set of mutexes currently held by a thread.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEXSET_H
+#define TSAN_MUTEXSET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class MutexSet {
+ public:
+ // Holds limited number of mutexes.
+ // The oldest mutexes are discarded on overflow.
+ static const uptr kMaxSize = 16;
+ struct Desc {
+ u64 id;
+ u64 epoch;
+ int count;
+ bool write;
+ };
+
+ MutexSet();
+ // The 'id' is obtained from SyncVar::GetId().
+ void Add(u64 id, bool write, u64 epoch);
+ void Del(u64 id, bool write);
+ void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ uptr Size() const;
+ Desc Get(uptr i) const;
+
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
+ private:
+#if !SANITIZER_GO
+ uptr size_;
+ Desc descs_[kMaxSize];
+#endif
+
+ void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
+};
+
+// Go does not have mutexes, so do not spend memory and time.
+// (Go sync.Mutex is actually a semaphore -- can be unlocked
+// in different goroutine).
+#if SANITIZER_GO
+MutexSet::MutexSet() {}
+void MutexSet::Add(u64 id, bool write, u64 epoch) {}
+void MutexSet::Del(u64 id, bool write) {}
+void MutexSet::Remove(u64 id) {}
+void MutexSet::RemovePos(uptr i) {}
+uptr MutexSet::Size() const { return 0; }
+MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+#endif
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEXSET_H
diff --git a/lib/tsan/tsan_platform.h b/lib/tsan/tsan_platform.h
new file mode 100644
index 0000000000..7256d64e50
--- /dev/null
+++ b/lib/tsan/tsan_platform.h
@@ -0,0 +1,1028 @@
+//===-- tsan_platform.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Platform-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+
+#if defined(__x86_64__)
+/*
+C/C++ on linux/x86_64 and freebsd/x86_64
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+
+C/C++ on netbsd/amd64 can reuse the same mapping:
+ * The address space starts from 0x1000 (option with 0x0) and ends with
+ 0x7f7ffffff000.
+ * LoAppMem-kHeapMemEnd can be reused as it is.
+ * No VDSO support.
+ * No MidAppMem region.
+ * No additional HeapMem region.
+ * HiAppMem contains the stack, loader, shared libraries and heap.
+ * Stack on NetBSD/amd64 has prereserved 128MB.
+ * Heap grows downwards (top-down).
+ * ASLR must be disabled per-process or globally.
+
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kAppMemMsk = 0x780000000000ull;
+ static const uptr kAppMemXor = 0x040000000000ull;
+ static const uptr kVdsoBeg = 0xf000000000000000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__mips64)
+/*
+C/C++ on linux/mips64 (40-bit VMA)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary (4 GB)
+0200 0000 00 - 2000 0000 00: - (120 GB)
+2000 0000 00 - 4000 0000 00: shadow (128 GB)
+4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
+5000 0000 00 - aa00 0000 00: - (360 GB)
+aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
+ab00 0000 00 - b000 0000 00: - (20 GB)
+b000 0000 00 - b200 0000 00: traces (8 GB)
+b200 0000 00 - fe00 0000 00: - (304 GB)
+fe00 0000 00 - ff00 0000 00: heap (4 GB)
+ff00 0000 00 - ff80 0000 00: - (2 GB)
+ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kHeapMemBeg = 0xfe00000000ull;
+ static const uptr kHeapMemEnd = 0xff00000000ull;
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
+ static const uptr kHiAppMemBeg = 0xff80000000ull;
+ static const uptr kHiAppMemEnd = 0xffffffffffull;
+ static const uptr kAppMemMsk = 0xf800000000ull;
+ static const uptr kAppMemXor = 0x0800000000ull;
+ static const uptr kVdsoBeg = 0xfffff00000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__aarch64__) && defined(__APPLE__)
+/*
+C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
+0200 0000 00 - 0300 0000 00: heap (4 GB)
+0300 0000 00 - 0400 0000 00: - (4 GB)
+0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
+0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
+0e00 0000 00 - 0f00 0000 00: - (4 GB)
+0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
+0fc0 0000 00 - 1000 0000 00: -
+*/
+struct Mapping {
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kHeapMemBeg = 0x0200000000ull;
+ static const uptr kHeapMemEnd = 0x0300000000ull;
+ static const uptr kShadowBeg = 0x0400000000ull;
+ static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kMetaShadowBeg = 0x0d00000000ull;
+ static const uptr kMetaShadowEnd = 0x0e00000000ull;
+ static const uptr kTraceMemBeg = 0x0f00000000ull;
+ static const uptr kTraceMemEnd = 0x0fc0000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000ull;
+ static const uptr kHiAppMemEnd = 0x0fc0000000ull;
+ static const uptr kAppMemMsk = 0x0ull;
+ static const uptr kAppMemXor = 0x0ull;
+ static const uptr kVdsoBeg = 0x7000000000000000ull;
+};
+
+#elif defined(__aarch64__)
+// AArch64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for AArch64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
+/*
+C/C++ on linux/aarch64 (39-bit VMA)
+0000 0010 00 - 0100 0000 00: main binary
+0100 0000 00 - 0800 0000 00: -
+0800 0000 00 - 2000 0000 00: shadow memory
+2000 0000 00 - 3100 0000 00: -
+3100 0000 00 - 3400 0000 00: metainfo
+3400 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5600 0000 00: main binary (PIE)
+5600 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - 7d00 0000 00: -
+7c00 0000 00 - 7d00 0000 00: heap
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack
+*/
+struct Mapping39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0100000000ull;
+ static const uptr kShadowBeg = 0x0800000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x3100000000ull;
+ static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kMidAppMemBeg = 0x5500000000ull;
+ static const uptr kMidAppMemEnd = 0x5600000000ull;
+ static const uptr kTraceMemBeg = 0x6000000000ull;
+ static const uptr kTraceMemEnd = 0x6200000000ull;
+ static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kHeapMemEnd = 0x7d00000000ull;
+ static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffull;
+ static const uptr kAppMemMsk = 0x7800000000ull;
+ static const uptr kAppMemXor = 0x0200000000ull;
+ static const uptr kVdsoBeg = 0x7f00000000ull;
+};
+
+/*
+C/C++ on linux/aarch64 (42-bit VMA)
+00000 0010 00 - 01000 0000 00: main binary
+01000 0000 00 - 10000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory
+20000 0000 00 - 26000 0000 00: -
+26000 0000 00 - 28000 0000 00: metainfo
+28000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
+2ab00 0000 00 - 36200 0000 00: -
+36200 0000 00 - 36240 0000 00: traces
+36240 0000 00 - 3e000 0000 00: -
+3e000 0000 00 - 3f000 0000 00: heap
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
+*/
+struct Mapping42 {
+ static const uptr kLoAppMemBeg = 0x00000001000ull;
+ static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kShadowBeg = 0x10000000000ull;
+ static const uptr kShadowEnd = 0x20000000000ull;
+ static const uptr kMetaShadowBeg = 0x26000000000ull;
+ static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMidAppMemBeg = 0x2aa00000000ull;
+ static const uptr kMidAppMemEnd = 0x2ab00000000ull;
+ static const uptr kTraceMemBeg = 0x36200000000ull;
+ static const uptr kTraceMemEnd = 0x36400000000ull;
+ static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kHeapMemEnd = 0x3f000000000ull;
+ static const uptr kHiAppMemBeg = 0x3f000000000ull;
+ static const uptr kHiAppMemEnd = 0x3ffffffffffull;
+ static const uptr kAppMemMsk = 0x3c000000000ull;
+ static const uptr kAppMemXor = 0x04000000000ull;
+ static const uptr kVdsoBeg = 0x37f00000000ull;
+};
+
+struct Mapping48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kAppMemMsk = 0x0fff800000000ull;
+ static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+// Indicates that mapping defines a mid range memory segment.
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__powerpc64__)
+// PPC64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for PPC64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
+/*
+C/C++ on linux/powerpc64 (44-bit VMA)
+0000 0000 0100 - 0001 0000 0000: main binary
+0001 0000 0000 - 0001 0000 0000: -
+0001 0000 0000 - 0b00 0000 0000: shadow
+0b00 0000 0000 - 0b00 0000 0000: -
+0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
+0d00 0000 0000 - 0d00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: traces
+0f00 0000 0000 - 0f00 0000 0000: -
+0f00 0000 0000 - 0f50 0000 0000: heap
+0f50 0000 0000 - 0f60 0000 0000: -
+0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
+*/
+struct Mapping44 {
+ static const uptr kMetaShadowBeg = 0x0b0000000000ull;
+ static const uptr kMetaShadowEnd = 0x0d0000000000ull;
+ static const uptr kTraceMemBeg = 0x0d0000000000ull;
+ static const uptr kTraceMemEnd = 0x0f0000000000ull;
+ static const uptr kShadowBeg = 0x000100000000ull;
+ static const uptr kShadowEnd = 0x0b0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000000100ull;
+ static const uptr kLoAppMemEnd = 0x000100000000ull;
+ static const uptr kHeapMemBeg = 0x0f0000000000ull;
+ static const uptr kHeapMemEnd = 0x0f5000000000ull;
+ static const uptr kHiAppMemBeg = 0x0f6000000000ull;
+ static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
+ static const uptr kAppMemMsk = 0x0f0000000000ull;
+ static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kVdsoBeg = 0x3c0000000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 3d00 0000 0000: -
+3d00 0000 0000 - 3e00 0000 0000: heap
+3e00 0000 0000 - 3e80 0000 0000: -
+3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
+*/
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x3d0000000000ull;
+ static const uptr kHeapMemEnd = 0x3e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x3e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
+ static const uptr kAppMemMsk = 0x3c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7d0000000000ull;
+ static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
+ static const uptr kAppMemMsk = 0x7c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+#endif
+
+#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__)
+
+/* Go on linux, darwin and freebsd on x86_64
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x076000000000ull;
+ static const uptr kMetaShadowEnd = 0x07d000000000ull;
+ static const uptr kTraceMemBeg = 0x056000000000ull;
+ static const uptr kTraceMemEnd = 0x076000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x050000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && defined(__powerpc64__)
+
+/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
+
+/* Go on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 3600 0000 0000: -
+3600 0000 0000 - 3800 0000 0000: traces
+3800 0000 0000 - 4000 0000 0000: -
+*/
+
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x240000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x360000000000ull;
+ static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+/* Go on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#define TSAN_RUNTIME_VMA 1
+
+#elif SANITIZER_GO && defined(__aarch64__)
+
+/* Go on linux/aarch64 (48-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+
+#else
+# error "Unknown platform"
+#endif
+
+
+#ifdef TSAN_RUNTIME_VMA
+extern uptr vmaSize;
+#endif
+
+
+enum MappingType {
+ MAPPING_LO_APP_BEG,
+ MAPPING_LO_APP_END,
+ MAPPING_HI_APP_BEG,
+ MAPPING_HI_APP_END,
+#ifdef TSAN_MID_APP_RANGE
+ MAPPING_MID_APP_BEG,
+ MAPPING_MID_APP_END,
+#endif
+ MAPPING_HEAP_BEG,
+ MAPPING_HEAP_END,
+ MAPPING_APP_BEG,
+ MAPPING_APP_END,
+ MAPPING_SHADOW_BEG,
+ MAPPING_SHADOW_END,
+ MAPPING_META_SHADOW_BEG,
+ MAPPING_META_SHADOW_END,
+ MAPPING_TRACE_BEG,
+ MAPPING_TRACE_END,
+ MAPPING_VDSO_BEG,
+};
+
+template<typename Mapping, int Type>
+uptr MappingImpl(void) {
+ switch (Type) {
+#if !SANITIZER_GO
+ case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
+ case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
+# ifdef TSAN_MID_APP_RANGE
+ case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
+ case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
+# endif
+ case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
+ case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
+ case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
+ case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
+ case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
+#else
+ case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
+ case MAPPING_APP_END: return Mapping::kAppMemEnd;
+#endif
+ case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
+ case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
+ case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
+ case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
+ case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
+ case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
+ }
+}
+
+template<int Type>
+uptr MappingArchImpl(void) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MappingImpl<Mapping44, Type>();
+#endif
+ case 46: return MappingImpl<Mapping46, Type>();
+ case 47: return MappingImpl<Mapping47, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MappingImpl<Mapping, Type>();
+#endif
+}
+
+#if !SANITIZER_GO
+ALWAYS_INLINE
+uptr LoAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_LO_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr LoAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_LO_APP_END>();
+}
+
+#ifdef TSAN_MID_APP_RANGE
+ALWAYS_INLINE
+uptr MidAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_MID_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr MidAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_MID_APP_END>();
+}
+#endif
+
+ALWAYS_INLINE
+uptr HeapMemBeg(void) {
+ return MappingArchImpl<MAPPING_HEAP_BEG>();
+}
+ALWAYS_INLINE
+uptr HeapMemEnd(void) {
+ return MappingArchImpl<MAPPING_HEAP_END>();
+}
+
+ALWAYS_INLINE
+uptr HiAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_HI_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr HiAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_HI_APP_END>();
+}
+
+ALWAYS_INLINE
+uptr VdsoBeg(void) {
+ return MappingArchImpl<MAPPING_VDSO_BEG>();
+}
+
+#else
+
+ALWAYS_INLINE
+uptr AppMemBeg(void) {
+ return MappingArchImpl<MAPPING_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr AppMemEnd(void) {
+ return MappingArchImpl<MAPPING_APP_END>();
+}
+
+#endif
+
+static inline
+bool GetUserRegion(int i, uptr *start, uptr *end) {
+ switch (i) {
+ default:
+ return false;
+#if !SANITIZER_GO
+ case 0:
+ *start = LoAppMemBeg();
+ *end = LoAppMemEnd();
+ return true;
+ case 1:
+ *start = HiAppMemBeg();
+ *end = HiAppMemEnd();
+ return true;
+ case 2:
+ *start = HeapMemBeg();
+ *end = HeapMemEnd();
+ return true;
+# ifdef TSAN_MID_APP_RANGE
+ case 3:
+ *start = MidAppMemBeg();
+ *end = MidAppMemEnd();
+ return true;
+# endif
+#else
+ case 0:
+ *start = AppMemBeg();
+ *end = AppMemEnd();
+ return true;
+#endif
+ }
+}
+
+ALWAYS_INLINE
+uptr ShadowBeg(void) {
+ return MappingArchImpl<MAPPING_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr ShadowEnd(void) {
+ return MappingArchImpl<MAPPING_SHADOW_END>();
+}
+
+ALWAYS_INLINE
+uptr MetaShadowBeg(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr MetaShadowEnd(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_END>();
+}
+
+ALWAYS_INLINE
+uptr TraceMemBeg(void) {
+ return MappingArchImpl<MAPPING_TRACE_BEG>();
+}
+ALWAYS_INLINE
+uptr TraceMemEnd(void) {
+ return MappingArchImpl<MAPPING_TRACE_END>();
+}
+
+
+template<typename Mapping>
+bool IsAppMemImpl(uptr mem) {
+#if !SANITIZER_GO
+ return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
+# ifdef TSAN_MID_APP_RANGE
+ (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
+# endif
+ (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
+ (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
+#else
+ return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
+#endif
+}
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsAppMemImpl<Mapping39>(mem);
+ case 42: return IsAppMemImpl<Mapping42>(mem);
+ case 48: return IsAppMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsAppMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsAppMemImpl<Mapping46>(mem);
+ case 47: return IsAppMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsAppMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsShadowMemImpl(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsShadowMemImpl<Mapping39>(mem);
+ case 42: return IsShadowMemImpl<Mapping42>(mem);
+ case 48: return IsShadowMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsShadowMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsShadowMemImpl<Mapping46>(mem);
+ case 47: return IsShadowMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsShadowMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsMetaMemImpl(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsMetaMemImpl<Mapping39>(mem);
+ case 42: return IsMetaMemImpl<Mapping42>(mem);
+ case 48: return IsMetaMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsMetaMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsMetaMemImpl<Mapping46>(mem);
+ case 47: return IsMetaMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsMetaMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+uptr MemToShadowImpl(uptr x) {
+ DCHECK(IsAppMem(x));
+#if !SANITIZER_GO
+ return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
+ ^ Mapping::kAppMemXor) * kShadowCnt;
+#else
+# ifndef SANITIZER_WINDOWS
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
+# else
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
+# endif
+#endif
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MemToShadowImpl<Mapping39>(x);
+ case 42: return MemToShadowImpl<Mapping42>(x);
+ case 48: return MemToShadowImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MemToShadowImpl<Mapping44>(x);
+#endif
+ case 46: return MemToShadowImpl<Mapping46>(x);
+ case 47: return MemToShadowImpl<Mapping47>(x);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MemToShadowImpl<Mapping>(x);
+#endif
+}
+
+
+template<typename Mapping>
+u32 *MemToMetaImpl(uptr x) {
+ DCHECK(IsAppMem(x));
+#if !SANITIZER_GO
+ return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+#else
+# ifndef SANITIZER_WINDOWS
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+# else
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
+# endif
+#endif
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MemToMetaImpl<Mapping39>(x);
+ case 42: return MemToMetaImpl<Mapping42>(x);
+ case 48: return MemToMetaImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MemToMetaImpl<Mapping44>(x);
+#endif
+ case 46: return MemToMetaImpl<Mapping46>(x);
+ case 47: return MemToMetaImpl<Mapping47>(x);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MemToMetaImpl<Mapping>(x);
+#endif
+}
+
+
+template<typename Mapping>
+uptr ShadowToMemImpl(uptr s) {
+ DCHECK(IsShadowMem(s));
+#if !SANITIZER_GO
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# ifdef TSAN_MID_APP_RANGE
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
+ (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
+ if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# endif
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #if !SANITIZER_GO
+# ifndef SANITIZER_WINDOWS
+ return (s & ~Mapping::kShadowBeg) / kShadowCnt;
+# else
+ return (s - Mapping::kShadowBeg) / kShadowCnt;
+# endif // SANITIZER_WINDOWS
+#endif
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return ShadowToMemImpl<Mapping39>(s);
+ case 42: return ShadowToMemImpl<Mapping42>(s);
+ case 48: return ShadowToMemImpl<Mapping48>(s);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return ShadowToMemImpl<Mapping44>(s);
+#endif
+ case 46: return ShadowToMemImpl<Mapping46>(s);
+ case 47: return ShadowToMemImpl<Mapping47>(s);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return ShadowToMemImpl<Mapping>(s);
+#endif
+}
+
+
+
+// The additional page is to catch shadow stack overflow as paging fault.
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
+
+template<typename Mapping>
+uptr GetThreadTraceImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+}
+
+ALWAYS_INLINE
+uptr GetThreadTrace(int tid) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return GetThreadTraceImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return GetThreadTraceImpl<Mapping44>(tid);
+#endif
+ case 46: return GetThreadTraceImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceImpl<Mapping47>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return GetThreadTraceImpl<Mapping>(tid);
+#endif
+}
+
+
+template<typename Mapping>
+uptr GetThreadTraceHeaderImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ + kTraceSize * sizeof(Event);
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+}
+
+ALWAYS_INLINE
+uptr GetThreadTraceHeader(int tid) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
+#endif
+ case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return GetThreadTraceHeaderImpl<Mapping>(tid);
+#endif
+}
+
+void InitializePlatform();
+void InitializePlatformEarly();
+void CheckAndProtect();
+void InitializeShadowMemoryPlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+int ExtractResolvFDs(void *state, int *fds, int nfd);
+int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+uptr ExtractLongJmpSp(uptr *env);
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
+
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
+
+void DestroyThreadState();
+void PlatformCleanUpThreadState(ThreadState *thr);
+
+} // namespace __tsan
+
+#endif // TSAN_PLATFORM_H
diff --git a/lib/tsan/tsan_ppc_regs.h b/lib/tsan/tsan_ppc_regs.h
new file mode 100644
index 0000000000..5b43f3ddad
--- /dev/null
+++ b/lib/tsan/tsan_ppc_regs.h
@@ -0,0 +1,96 @@
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
diff --git a/lib/tsan/tsan_preinit.cpp b/lib/tsan/tsan_preinit.cpp
new file mode 100644
index 0000000000..205bdbf93b
--- /dev/null
+++ b/lib/tsan/tsan_preinit.cpp
@@ -0,0 +1,26 @@
+//===-- tsan_preinit.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer.
+//
+// Call __tsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interface.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// The symbol is called __local_tsan_preinit, because it's not intended to be
+// exported.
+// This code linked into the main executable when -fsanitize=thread is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used))
+void (*__local_tsan_preinit)(void) = __tsan_init;
+
+#endif
diff --git a/lib/tsan/tsan_report.cpp b/lib/tsan/tsan_report.cpp
new file mode 100644
index 0000000000..368f1ca8ad
--- /dev/null
+++ b/lib/tsan/tsan_report.cpp
@@ -0,0 +1,486 @@
+//===-- tsan_report.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+
+namespace __tsan {
+
+ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
+
+ReportStack *ReportStack::New() {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ return new(mem) ReportStack();
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
+ const char *ThreadDescription() { return Cyan(); }
+ const char *Location() { return Green(); }
+ const char *Sleep() { return Yellow(); }
+ const char *Mutex() { return Magenta(); }
+};
+
+ReportDesc::ReportDesc()
+ : tag(kExternalTagNone)
+ , stacks()
+ , mops()
+ , locs()
+ , mutexes()
+ , threads()
+ , unique_tids()
+ , sleep()
+ , count() {
+}
+
+ReportMop::ReportMop()
+ : mset() {
+}
+
+ReportDesc::~ReportDesc() {
+ // FIXME(dvyukov): it must be leaking a lot of memory.
+}
+
+#if !SANITIZER_GO
+
+const int kThreadBufSize = 32;
+const char *thread_name(char *buf, int tid) {
+ if (tid == 0)
+ return "main thread";
+ internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
+ return buf;
+}
+
+static const char *ReportTypeString(ReportType typ, uptr tag) {
+ switch (typ) {
+ case ReportTypeRace:
+ return "data race";
+ case ReportTypeVptrRace:
+ return "data race on vptr (ctor/dtor vs virtual call)";
+ case ReportTypeUseAfterFree:
+ return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree:
+ return "heap-use-after-free (virtual call vs free)";
+ case ReportTypeExternalRace: {
+ const char *str = GetReportHeaderFromTag(tag);
+ return str ? str : "race on external object";
+ }
+ case ReportTypeThreadLeak:
+ return "thread leak";
+ case ReportTypeMutexDestroyLocked:
+ return "destroy of a locked mutex";
+ case ReportTypeMutexDoubleLock:
+ return "double lock of a mutex";
+ case ReportTypeMutexInvalidAccess:
+ return "use of an invalid mutex (e.g. uninitialized or destroyed)";
+ case ReportTypeMutexBadUnlock:
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ case ReportTypeMutexBadReadLock:
+ return "read lock of a write locked mutex";
+ case ReportTypeMutexBadReadUnlock:
+ return "read unlock of a write locked mutex";
+ case ReportTypeSignalUnsafe:
+ return "signal-unsafe call inside of a signal";
+ case ReportTypeErrnoInSignal:
+ return "signal handler spoils errno";
+ case ReportTypeDeadlock:
+ return "lock-order-inversion (potential deadlock)";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+#if SANITIZER_MAC
+static const char *const kInterposedFunctionPrefix = "wrap_";
+#else
+static const char *const kInterposedFunctionPrefix = "__interceptor_";
+#endif
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ Printf("%s\n", res.data());
+ }
+ Printf("\n");
+}
+
+static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
+ for (uptr i = 0; i < mset.Size(); i++) {
+ if (i == 0)
+ Printf(" (mutexes:");
+ const ReportMopMutex m = mset[i];
+ Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(i == mset.Size() - 1 ? ")" : ",");
+ }
+}
+
+static const char *MopDesc(bool first, bool write, bool atomic) {
+ return atomic ? (first ? (write ? "Atomic write" : "Atomic read")
+ : (write ? "Previous atomic write" : "Previous atomic read"))
+ : (first ? (write ? "Write" : "Read")
+ : (write ? "Previous write" : "Previous read"));
+}
+
+static const char *ExternalMopDesc(bool first, bool write) {
+ return first ? (write ? "Modifying" : "Read-only")
+ : (write ? "Previous modifying" : "Previous read-only");
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ Printf("%s", d.Access());
+ if (mop->external_tag == kExternalTagNone) {
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic), mop->size,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ } else {
+ const char *object_type = GetObjectTypeFromTag(mop->external_tag);
+ if (object_type == nullptr)
+ object_type = "external object";
+ Printf(" %s access of %s at %p by %s",
+ ExternalMopDesc(first, mop->write), object_type,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ }
+ PrintMutexSet(mop->mset);
+ Printf(":\n");
+ Printf("%s", d.Default());
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ bool print_stack = false;
+ Printf("%s", d.Location());
+ if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
+ if (global.size != 0)
+ Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
+ else
+ Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name,
+ global.start, StripModuleName(global.module),
+ global.module_offset);
+ } else if (loc->type == ReportLocationHeap) {
+ char thrbuf[kThreadBufSize];
+ const char *object_type = GetObjectTypeFromTag(loc->external_tag);
+ if (!object_type) {
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ } else {
+ Printf(" Location is %s of size %zu at %p allocated by %s:\n",
+ object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ }
+ print_stack = true;
+ } else if (loc->type == ReportLocationStack) {
+ Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationTLS) {
+ Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationFD) {
+ Printf(" Location is file descriptor %d created by %s at:\n",
+ loc->fd, thread_name(thrbuf, loc->tid));
+ print_stack = true;
+ }
+ Printf("%s", d.Default());
+ if (print_stack)
+ PrintStack(loc->stack);
+}
+
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
+}
+
+static void PrintMutex(const ReportMutex *rm) {
+ Decorator d;
+ if (rm->destroyed) {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
+ Printf("%s", d.Default());
+ } else {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ Decorator d;
+ if (rt->id == 0) // Little sense in describing the main thread.
+ return;
+ Printf("%s", d.ThreadDescription());
+ Printf(" Thread T%d", rt->id);
+ if (rt->name && rt->name[0] != '\0')
+ Printf(" '%s'", rt->name);
+ char thrbuf[kThreadBufSize];
+ const char *thread_status = rt->running ? "running" : "finished";
+ if (rt->thread_type == ThreadType::Worker) {
+ Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf("\n");
+ Printf("%s", d.Default());
+ return;
+ }
+ Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ thread_name(thrbuf, rt->parent_tid));
+ if (rt->stack)
+ Printf(" at:");
+ Printf("\n");
+ Printf("%s", d.Default());
+ PrintStack(rt->stack);
+}
+
+static void PrintSleep(const ReportStack *s) {
+ Decorator d;
+ Printf("%s", d.Sleep());
+ Printf(" As if synchronized via sleep:\n");
+ Printf("%s", d.Default());
+ PrintStack(s);
+}
+
+static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
+ if (rep->mops.Size())
+ return rep->mops[0]->stack;
+ if (rep->stacks.Size())
+ return rep->stacks[0];
+ if (rep->mutexes.Size())
+ return rep->mutexes[0]->stack;
+ if (rep->threads.Size())
+ return rep->threads[0]->stack;
+ return 0;
+}
+
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ const char *module = frame->info.module;
+ if (file != 0 &&
+ (internal_strstr(file, "tsan_interceptors_posix.cpp") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_")))
+ return true;
+ if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_")))
+ return true;
+ return false;
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Decorator d;
+ Printf("==================\n");
+ const char *rep_typ_str = ReportTypeString(rep->typ, rep->tag);
+ Printf("%s", d.Warning());
+ Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
+ (int)internal_getpid());
+ Printf("%s", d.Default());
+
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.Default());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
+ }
+
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+
+ if (rep->sleep)
+ PrintSleep(rep->sleep);
+
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
+
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+
+ if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
+ Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
+
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
+ ReportErrorSummary(rep_typ_str, frame->info);
+ }
+
+ if (common_flags()->print_module_map == 2) PrintModuleMap();
+
+ Printf("==================\n");
+}
+
+#else // #if !SANITIZER_GO
+
+const int kMainThreadId = 1;
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function,
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, (void *)info.module_offset);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Printf("\n");
+ Printf("%s at %p by ",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")), mop->addr);
+ if (mop->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ switch (loc->type) {
+ case ReportLocationHeap: {
+ Printf("\n");
+ Printf("Heap block of size %zu at %p allocated by ",
+ loc->heap_chunk_size, loc->heap_chunk_start);
+ if (loc->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", loc->tid);
+ PrintStack(loc->stack);
+ break;
+ }
+ case ReportLocationGlobal: {
+ Printf("\n");
+ Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
+ loc->global.name, loc->global.size, loc->global.start,
+ loc->global.file, loc->global.line);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == kMainThreadId)
+ return;
+ Printf("\n");
+ Printf("Goroutine %d (%s) created at:\n",
+ rt->id, rt->running ? "running" : "finished");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Printf("==================\n");
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
+ Printf("==================\n");
+}
+
+#endif
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_report.h b/lib/tsan/tsan_report.h
new file mode 100644
index 0000000000..b4e4d89893
--- /dev/null
+++ b/lib/tsan/tsan_report.h
@@ -0,0 +1,135 @@
+//===-- tsan_report.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_REPORT_H
+#define TSAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum ReportType {
+ ReportTypeRace,
+ ReportTypeVptrRace,
+ ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
+ ReportTypeExternalRace,
+ ReportTypeThreadLeak,
+ ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexInvalidAccess,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
+ ReportTypeSignalUnsafe,
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
+};
+
+struct ReportStack {
+ SymbolizedStack *frames;
+ bool suppressable;
+ static ReportStack *New();
+
+ private:
+ ReportStack();
+};
+
+struct ReportMopMutex {
+ u64 id;
+ bool write;
+};
+
+struct ReportMop {
+ int tid;
+ uptr addr;
+ int size;
+ bool write;
+ bool atomic;
+ uptr external_tag;
+ Vector<ReportMopMutex> mset;
+ ReportStack *stack;
+
+ ReportMop();
+};
+
+enum ReportLocationType {
+ ReportLocationGlobal,
+ ReportLocationHeap,
+ ReportLocationStack,
+ ReportLocationTLS,
+ ReportLocationFD
+};
+
+struct ReportLocation {
+ ReportLocationType type;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
+ uptr external_tag;
+ int tid;
+ int fd;
+ bool suppressable;
+ ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
+};
+
+struct ReportThread {
+ int id;
+ tid_t os_id;
+ bool running;
+ ThreadType thread_type;
+ char *name;
+ u32 parent_tid;
+ ReportStack *stack;
+};
+
+struct ReportMutex {
+ u64 id;
+ uptr addr;
+ bool destroyed;
+ ReportStack *stack;
+};
+
+class ReportDesc {
+ public:
+ ReportType typ;
+ uptr tag;
+ Vector<ReportStack*> stacks;
+ Vector<ReportMop*> mops;
+ Vector<ReportLocation*> locs;
+ Vector<ReportMutex*> mutexes;
+ Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
+ ReportStack *sleep;
+ int count;
+
+ ReportDesc();
+ ~ReportDesc();
+
+ private:
+ ReportDesc(const ReportDesc&);
+ void operator = (const ReportDesc&);
+};
+
+// Format and output the report to the console/log. No additional logic.
+void PrintReport(const ReportDesc *rep);
+void PrintStack(const ReportStack *stack);
+
+} // namespace __tsan
+
+#endif // TSAN_REPORT_H
diff --git a/lib/tsan/tsan_rtl.cpp b/lib/tsan/tsan_rtl.cpp
new file mode 100644
index 0000000000..13c9b770f5
--- /dev/null
+++ b/lib/tsan/tsan_rtl.cpp
@@ -0,0 +1,1127 @@
+//===-- tsan_rtl.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main file (entry points) for the TSan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_defs.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "ubsan/ubsan_init.h"
+
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
+
+volatile int __tsan_resumed = 0;
+
+extern "C" void __tsan_resume() {
+ __tsan_resumed = 1;
+}
+
+namespace __tsan {
+
+#if !SANITIZER_GO && !SANITIZER_MAC
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+#endif
+static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
+
+// Can be overriden by a front-end.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnFinalize(bool failed);
+void OnInitialize();
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnFinalize(bool failed) {
+ return failed;
+}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+void OnInitialize() {}
+#endif
+
+static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ // Map thread trace when context is created.
+ char name[50];
+ internal_snprintf(name, sizeof(name), "trace %u", tid);
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
+ const uptr hdr = GetThreadTraceHeader(tid);
+ internal_snprintf(name, sizeof(name), "trace header %u", tid);
+ MapThreadTrace(hdr, sizeof(Trace), name);
+ new((void*)hdr) Trace();
+ // We are going to use only a small part of the trace with the default
+ // value of history_size. However, the constructor writes to the whole trace.
+ // Unmap the unused part.
+ uptr hdr_end = hdr + sizeof(Trace);
+ hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
+ hdr_end = RoundUp(hdr_end, GetPageSizeCached());
+ if (hdr_end < hdr + sizeof(Trace))
+ UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
+ return new(mem) ThreadContext(tid);
+}
+
+#if !SANITIZER_GO
+static const u32 kThreadQuarantineSize = 16;
+#else
+static const u32 kThreadQuarantineSize = 64;
+#endif
+
+Context::Context()
+ : initialized()
+ , report_mtx(MutexTypeReport, StatMtxReport)
+ , nreported()
+ , nmissed_expected()
+ , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
+ , racy_mtx(MutexTypeRacy, StatMtxRacy)
+ , racy_stacks()
+ , racy_addresses()
+ , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
+ , clock_alloc("clock allocator") {
+ fired_suppressions.reserve(8);
+}
+
+// The objects are allocated in TLS, so one may rely on zero-initialization.
+ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
+#if !SANITIZER_GO
+ , jmp_bufs()
+#endif
+ , tid(tid)
+ , unique_id(unique_id)
+ , stk_addr(stk_addr)
+ , stk_size(stk_size)
+ , tls_addr(tls_addr)
+ , tls_size(tls_size)
+#if !SANITIZER_GO
+ , last_sleep_clock(tid)
+#endif
+{
+}
+
+#if !SANITIZER_GO
+static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
+ uptr n_threads;
+ uptr n_running_threads;
+ ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+ InternalMmapVector<char> buf(4096);
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
+ WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
+}
+
+static void *BackgroundThread(void *arg) {
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread_init();
+ cur_thread()->ignore_interceptors++;
+ const u64 kMs2Ns = 1000 * 1000;
+
+ fd_t mprof_fd = kInvalidFd;
+ if (flags()->profile_memory && flags()->profile_memory[0]) {
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
+ } else {
+ InternalScopedString filename(kMaxPathLength);
+ filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
+ fd_t fd = OpenFile(filename.data(), WrOnly);
+ if (fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = fd;
+ }
+ }
+ }
+
+ u64 last_flush = NanoTime();
+ uptr last_rss = 0;
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
+ u64 now = NanoTime();
+
+ // Flush memory if requested.
+ if (flags()->flush_memory_ms > 0) {
+ if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ FlushShadowMemory();
+ last_flush = NanoTime();
+ }
+ }
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
+ if (flags()->memory_limit_mb > 0) {
+ uptr rss = GetRSS();
+ uptr limit = uptr(flags()->memory_limit_mb) << 20;
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
+ if (2 * rss > limit + last_rss) {
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ FlushShadowMemory();
+ rss = GetRSS();
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ }
+ last_rss = rss;
+ }
+
+ // Write memory profile if requested.
+ if (mprof_fd != kInvalidFd)
+ MemoryProfiler(ctx, mprof_fd, i);
+
+ // Flush symbolizer cache if requested.
+ if (flags()->flush_symbolizer_ms > 0) {
+ u64 last = atomic_load(&ctx->last_symbolize_time_ns,
+ memory_order_relaxed);
+ if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
+ Lock l(&ctx->report_mtx);
+ ScopedErrorReportLock l2;
+ SymbolizeFlush();
+ atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
+ }
+ }
+ }
+ return nullptr;
+}
+
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef __mips__
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+#endif
+
+void DontNeedShadowFor(uptr addr, uptr size) {
+ ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
+}
+
+#if !SANITIZER_GO
+void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
+ if (size == 0) return;
+ DontNeedShadowFor(addr, size);
+ ScopedGlobalProcessor sgp;
+ ctx->metamap.ResetRange(thr->proc(), addr, size);
+}
+#endif
+
+void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
+}
+
+void MapThreadTrace(uptr addr, uptr size, const char *name) {
+ DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
+ CHECK_GE(addr, TraceMemBeg());
+ CHECK_LE(addr + size, TraceMemEnd());
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ if (!MmapFixedNoReserve(addr, size, name)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
+ addr, size);
+ Die();
+ }
+}
+
+static void CheckShadowMapping() {
+ uptr beg, end;
+ for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
+ // Skip cases for empty regions (heap definition for architectures that
+ // do not use 64-bit allocator).
+ if (beg == end)
+ continue;
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ uptr prev = 0;
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
+ const uptr p = RoundDown(p0 + x, kShadowCell);
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ const uptr m = (uptr)MemToMeta(p);
+ VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p, ShadowToMem(s));
+ CHECK(IsMetaMem(m));
+ if (prev) {
+ // Ensure that shadow and meta mappings are linear within a single
+ // user range. Lots of code that processes memory ranges assumes it.
+ const uptr prev_s = MemToShadow(prev);
+ const uptr prev_m = (uptr)MemToMeta(prev);
+ CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
+ CHECK_EQ((m - prev_m) / kMetaShadowSize,
+ (p - prev) / kMetaShadowCell);
+ }
+ prev = p;
+ }
+ }
+ }
+}
+
+#if !SANITIZER_GO
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+#endif
+
+void Initialize(ThreadState *thr) {
+ // Thread safe because done before all threads exist.
+ static bool is_initialized = false;
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
+ SanitizerToolName = "ThreadSanitizer";
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(TsanCheckFailed);
+
+ ctx = new(ctx_placeholder) Context;
+ const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
+ const char *options = GetEnv(env_name);
+ CacheBinaryName();
+ CheckASLR();
+ InitializeFlags(&ctx->flags, options, env_name);
+ AvoidCVE_2016_2143();
+ __sanitizer::InitializePlatformEarly();
+ __tsan::InitializePlatformEarly();
+
+#if !SANITIZER_GO
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ InitializeInterceptors();
+ CheckShadowMapping();
+ InitializePlatform();
+ InitializeMutex();
+ InitializeDynamicAnnotations();
+#if !SANITIZER_GO
+ InitializeShadowMemory();
+ InitializeAllocatorLate();
+ InstallDeadlySignalHandlers(TsanOnDeadlySignal);
+#endif
+ // Setup correct file descriptor for error reports.
+ __sanitizer_set_report_path(common_flags()->log_path);
+ InitializeSuppressions();
+#if !SANITIZER_GO
+ InitializeLibIgnore();
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+#endif
+
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
+
+ // Initialize thread 0.
+ int tid = ThreadCreate(thr, 0, 0, true);
+ CHECK_EQ(tid, 0);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+ ctx->initialized = true;
+
+#if !SANITIZER_GO
+ Symbolizer::LateInitialize();
+#endif
+
+ if (flags()->stop_on_start) {
+ Printf("ThreadSanitizer is suspended at startup (pid %d)."
+ " Call __tsan_resume().\n",
+ (int)internal_getpid());
+ while (__tsan_resumed == 0) {}
+ }
+
+ OnInitialize();
+}
+
+void MaybeSpawnBackgroundThread() {
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#if !SANITIZER_GO && !defined(__mips__)
+ static atomic_uint32_t bg_thread = {};
+ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
+ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
+ }
+#endif
+}
+
+
+int Finalize(ThreadState *thr) {
+ bool failed = false;
+
+ if (common_flags()->print_module_map == 1) PrintModuleMap();
+
+ if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
+ SleepForMillis(flags()->atexit_sleep_ms);
+
+ // Wait for pending reports.
+ ctx->report_mtx.Lock();
+ { ScopedErrorReportLock l; }
+ ctx->report_mtx.Unlock();
+
+#if !SANITIZER_GO
+ if (Verbosity()) AllocatorPrintStats();
+#endif
+
+ ThreadFinalize(thr);
+
+ if (ctx->nreported) {
+ failed = true;
+#if !SANITIZER_GO
+ Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
+#else
+ Printf("Found %d data race(s)\n", ctx->nreported);
+#endif
+ }
+
+ if (ctx->nmissed_expected) {
+ failed = true;
+ Printf("ThreadSanitizer: missed %d expected races\n",
+ ctx->nmissed_expected);
+ }
+
+ if (common_flags()->print_suppressions)
+ PrintMatchedSuppressions();
+#if !SANITIZER_GO
+ if (flags()->print_benign)
+ PrintMatchedBenignRaces();
+#endif
+
+ failed = OnFinalize(failed);
+
+#if TSAN_COLLECT_STATS
+ StatAggregate(ctx->stat, thr->stat);
+ StatOutput(ctx->stat);
+#endif
+
+ return failed ? common_flags()->exitcode : 0;
+}
+
+#if !SANITIZER_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+ // Ignore memory accesses in the pthread_atfork callbacks.
+ // If any of them triggers a data race we will deadlock
+ // on the report_mtx.
+ // We could ignore interceptors and sync operations as well,
+ // but so far it's unclear if it will do more good or harm.
+ // Unnecessarily ignoring things can lead to false positives later.
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ StartBackgroundThread();
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#if SANITIZER_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
+u32 CurrentStackId(ThreadState *thr, uptr pc) {
+ if (!thr->is_inited) // May happen during bootstrap.
+ return 0;
+ if (pc != 0) {
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+ }
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
+ thr->shadow_stack_pos--;
+ return id;
+}
+
+void TraceSwitch(ThreadState *thr) {
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork)
+ return;
+#endif
+ thr->nomalloc++;
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ Lock l(&thr_trace->mtx);
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
+ TraceHeader *hdr = &thr_trace->headers[trace];
+ hdr->epoch0 = thr->fast_state.epoch();
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
+ hdr->mset0 = thr->mset;
+ thr->nomalloc--;
+}
+
+Trace *ThreadTrace(int tid) {
+ return (Trace*)GetThreadTraceHeader(tid);
+}
+
+uptr TraceTopPC(ThreadState *thr) {
+ Event *events = (Event*)GetThreadTrace(thr->tid);
+ uptr pc = events[thr->fast_state.GetTracePos()];
+ return pc;
+}
+
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
+uptr TraceParts() {
+ return TraceSize() / kTracePartSize;
+}
+
+#if !SANITIZER_GO
+extern "C" void __tsan_trace_switch() {
+ TraceSwitch(cur_thread());
+}
+
+extern "C" void __tsan_report_race() {
+ ReportRace(cur_thread());
+}
+#endif
+
+ALWAYS_INLINE
+Shadow LoadShadow(u64 *p) {
+ u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
+ return Shadow(raw);
+}
+
+ALWAYS_INLINE
+void StoreShadow(u64 *sp, u64 s) {
+ atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
+}
+
+ALWAYS_INLINE
+void StoreIfNotYetStored(u64 *sp, u64 *s) {
+ StoreShadow(sp, *s);
+ *s = 0;
+}
+
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ Shadow cur, Shadow old) {
+ thr->racy_state[0] = cur.raw();
+ thr->racy_state[1] = old.raw();
+ thr->racy_shadow_addr = shadow_mem;
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_report_race);
+#else
+ ReportRace(thr);
+#endif
+}
+
+static inline bool HappensBefore(Shadow old, ThreadState *thr) {
+ return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
+}
+
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+
+ // This potentially can live in an MMX/SSE scratch register.
+ // The required intrinsics are:
+ // __m128i _mm_move_epi64(__m128i*);
+ // _mm_storel_epi64(u64*, __m128i);
+ u64 store_word = cur.raw();
+ bool stored = false;
+
+ // scan all the shadow values and dispatch to 4 categories:
+ // same, replace, candidate and race (see comments below).
+ // we consider only 3 cases regarding access sizes:
+ // equal, intersect and not intersect. initially I considered
+ // larger and smaller as well, it allowed to replace some
+ // 'candidates' with 'same' or 'replace', but I think
+ // it's just not worth it (performance- and complexity-wise).
+
+ Shadow old(0);
+
+ // It release mode we manually unroll the loop,
+ // because empirically gcc generates better code this way.
+ // However, we can't afford unrolling in debug mode, because the function
+ // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
+ // threads, which is not enough for the unrolled loop.
+#if SANITIZER_DEBUG
+ for (int idx = 0; idx < 4; idx++) {
+#include "tsan_update_shadow_word_inl.h"
+ }
+#else
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+ idx = 2;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+ idx = 3;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+#endif
+
+ // we did not find any races and had already stored
+ // the current access info, so we are done
+ if (LIKELY(stored))
+ return;
+ // choose a random candidate slot and replace it
+ StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
+ StatInc(thr, StatShadowReplace);
+ return;
+ RACE:
+ HandleRace(thr, shadow_mem, cur, old);
+ return;
+}
+
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic) {
+ while (size) {
+ int size1 = 1;
+ int kAccessSizeLog = kSizeLog1;
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
+ size1 = 8;
+ kAccessSizeLog = kSizeLog8;
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
+ size1 = 4;
+ kAccessSizeLog = kSizeLog4;
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
+ size1 = 2;
+ kAccessSizeLog = kSizeLog2;
+ }
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
+ addr += size1;
+ size -= size1;
+ }
+}
+
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__)
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__)
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads. But it still can be useful if you modify
+ // ContainsSameAccessFast and want to ensure that it's not completely broken.
+ // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
+ALWAYS_INLINE USED
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
+ " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
+ (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
+ (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
+ (uptr)shadow_mem[0], (uptr)shadow_mem[1],
+ (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+#endif
+
+ if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit())) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
+ return;
+ }
+
+ Shadow cur(fast_state);
+ cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
+ cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
+
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cpp
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ u64 val) {
+ (void)thr;
+ (void)pc;
+ if (size == 0)
+ return;
+ // FIXME: fix me.
+ uptr offset = addr % kShadowCell;
+ if (offset) {
+ offset = kShadowCell - offset;
+ if (size <= offset)
+ return;
+ addr += offset;
+ size -= offset;
+ }
+ DCHECK_EQ(addr % 8, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
+ u64 *p = (u64*)MemToShadow(addr);
+ CHECK(IsShadowMem((uptr)p));
+ CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
+ p[i++] = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ p[i++] = 0;
+ }
+ } else {
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ u64 *begin = (u64*)MemToShadow(addr);
+ u64 *end = begin + size / kShadowCell * kShadowCnt;
+ u64 *p = begin;
+ // Set at least first kPageSize/2 to page boundary.
+ while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ // Reset middle part.
+ u64 *p1 = p;
+ p = RoundDown(end, kPageSize);
+ UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
+ if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ Die();
+ // Set the ending.
+ while (p < end) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ }
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryRangeSet(thr, pc, addr, size, 0);
+}
+
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ // Processing more than 1k (4k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ if (size > 1024)
+ size = 1024;
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
+ MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.MarkAsFreed();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, addr, size);
+ else
+ MemoryResetRange(thr, pc, addr, size);
+}
+
+ALWAYS_INLINE USED
+void FuncEntry(ThreadState *thr, uptr pc) {
+ StatInc(thr, StatFuncEnter);
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
+
+ // Shadow stack maintenance can be replaced with
+ // stack unwinding during trace switch (which presumably must be faster).
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE USED
+void FuncExit(ThreadState *thr) {
+ StatInc(thr, StatFuncExit);
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
+
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
+ DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
+ thr->ignore_reads_and_writes++;
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->fast_state.SetIgnoreBit();
+#if !SANITIZER_GO
+ if (save_stack && !ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->ignore_reads_and_writes--;
+ if (thr->ignore_reads_and_writes == 0) {
+ thr->fast_state.ClearIgnoreBit();
+#if !SANITIZER_GO
+ thr->mop_ignore_set.Reset();
+#endif
+ }
+}
+
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
+ DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
+ thr->ignore_sync++;
+ CHECK_GT(thr->ignore_sync, 0);
+#if !SANITIZER_GO
+ if (save_stack && !ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_sync, 0);
+ thr->ignore_sync--;
+#if !SANITIZER_GO
+ if (thr->ignore_sync == 0)
+ thr->sync_ignore_set.Reset();
+#endif
+}
+
+bool MD5Hash::operator==(const MD5Hash &other) const {
+ return hash[0] == other.hash[0] && hash[1] == other.hash[1];
+}
+
+#if SANITIZER_DEBUG
+void build_consistency_debug() {}
+#else
+void build_consistency_release() {}
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats() {}
+#else
+void build_consistency_nostats() {}
+#endif
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+// Must be included in this file to make sure everything is inlined.
+#include "tsan_interface_inl.h"
+#endif
diff --git a/lib/tsan/tsan_rtl.h b/lib/tsan/tsan_rtl.h
new file mode 100644
index 0000000000..d3bb61ff87
--- /dev/null
+++ b/lib/tsan/tsan_rtl.h
@@ -0,0 +1,893 @@
+//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main internal TSan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __tsan, except for those
+// declared in tsan_interface.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headres included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_RTL_H
+#define TSAN_RTL_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_asm.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common/sanitizer_libignore.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_mutexset.h"
+#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
+
+#if SANITIZER_WORDSIZE != 64
+# error "ThreadSanitizer is supported only on 64-bit platforms"
+#endif
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+struct MapUnmapCallback;
+#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#else
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+Allocator *allocator();
+#endif
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+
+const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
+
+// FastState (from most significant bit):
+// ignore : 1
+// tid : kTidBits
+// unused : -
+// history_size : 3
+// epoch : kClkBits
+class FastState {
+ public:
+ FastState(u64 tid, u64 epoch) {
+ x_ = tid << kTidShift;
+ x_ |= epoch;
+ DCHECK_EQ(tid, this->tid());
+ DCHECK_EQ(epoch, this->epoch());
+ DCHECK_EQ(GetIgnoreBit(), false);
+ }
+
+ explicit FastState(u64 x)
+ : x_(x) {
+ }
+
+ u64 raw() const {
+ return x_;
+ }
+
+ u64 tid() const {
+ u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+ return res;
+ }
+
+ u64 TidWithIgnore() const {
+ u64 res = x_ >> kTidShift;
+ return res;
+ }
+
+ u64 epoch() const {
+ u64 res = x_ & ((1ull << kClkBits) - 1);
+ return res;
+ }
+
+ void IncrementEpoch() {
+ u64 old_epoch = epoch();
+ x_ += 1;
+ DCHECK_EQ(old_epoch + 1, epoch());
+ (void)old_epoch;
+ }
+
+ void SetIgnoreBit() { x_ |= kIgnoreBit; }
+ void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+ bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
+ }
+
+ ALWAYS_INLINE
+ int GetHistorySize() const {
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
+ }
+
+ void ClearHistorySize() {
+ SetHistorySize(0);
+ }
+
+ ALWAYS_INLINE
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
+
+ private:
+ friend class Shadow;
+ static const int kTidShift = 64 - kTidBits - 1;
+ static const u64 kIgnoreBit = 1ull << 63;
+ static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
+ u64 x_;
+};
+
+// Shadow (from most significant bit):
+// freed : 1
+// tid : kTidBits
+// is_atomic : 1
+// is_read : 1
+// size_log : 2
+// addr0 : 3
+// epoch : kClkBits
+class Shadow : public FastState {
+ public:
+ explicit Shadow(u64 x)
+ : FastState(x) {
+ }
+
+ explicit Shadow(const FastState &s)
+ : FastState(s.x_) {
+ ClearHistorySize();
+ }
+
+ void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
+ DCHECK_LE(addr0, 7);
+ DCHECK_LE(kAccessSizeLog, 3);
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
+ DCHECK_EQ(kAccessSizeLog, size_log());
+ DCHECK_EQ(addr0, this->addr0());
+ }
+
+ void SetWrite(unsigned kAccessIsWrite) {
+ DCHECK_EQ(x_ & kReadBit, 0);
+ if (!kAccessIsWrite)
+ x_ |= kReadBit;
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
+ }
+
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
+ }
+
+ bool IsAtomic() const {
+ return x_ & kAtomicBit;
+ }
+
+ bool IsZero() const {
+ return x_ == 0;
+ }
+
+ static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+ u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+ DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
+ return shifted_xor == 0;
+ }
+
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
+ return masked_xor == 0;
+ }
+
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ unsigned kS2AccessSize) {
+ bool res = false;
+ u64 diff = s1.addr0() - s2.addr0();
+ if ((s64)diff < 0) { // s1.addr0 < s2.addr0
+ // if (s1.addr0() + size1) > s2.addr0()) return true;
+ if (s1.size() > -diff)
+ res = true;
+ } else {
+ // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+ if (kS2AccessSize > diff)
+ res = true;
+ }
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
+ return res;
+ }
+
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
+
+ // The idea behind the freed bit is as follows.
+ // When the memory is freed (or otherwise unaccessible) we write to the shadow
+ // values with tid/epoch related to the free and the freed bit set.
+ // During memory accesses processing the freed bit is considered
+ // as msb of tid. So any access races with shadow with freed bit set
+ // (it is as if write from a thread with which we never synchronized before).
+ // This allows us to detect accesses to freed memory w/o additional
+ // overheads in memory access processing and at the same time restore
+ // tid/epoch of free.
+ void MarkAsFreed() {
+ x_ |= kFreedBit;
+ }
+
+ bool IsFreed() const {
+ return x_ & kFreedBit;
+ }
+
+ bool GetFreedAndReset() {
+ bool res = x_ & kFreedBit;
+ x_ &= ~kFreedBit;
+ return res;
+ }
+
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
+ private:
+ static const u64 kReadShift = 5 + kClkBits;
+ static const u64 kReadBit = 1ull << kReadShift;
+ static const u64 kAtomicShift = 6 + kClkBits;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
+
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
+ if (s1.addr0() == s2.addr0()) return true;
+ if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+ return true;
+ if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+ return true;
+ return false;
+ }
+};
+
+struct ThreadSignalContext;
+
+struct JmpBuf {
+ uptr sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
+ uptr *shadow_stack_pos;
+};
+
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#if !SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
+#if !SANITIZER_GO
+// ScopedGlobalProcessor temporary setups a global processor for the current
+// thread, if it does not have one. Intended for interceptors that can run
+// at the very thread end, when we already destroyed the thread processor.
+struct ScopedGlobalProcessor {
+ ScopedGlobalProcessor();
+ ~ScopedGlobalProcessor();
+};
+#endif
+
+// This struct is stored in TLS.
+struct ThreadState {
+ FastState fast_state;
+ // Synch epoch represents the threads's epoch before the last synchronization
+ // action. It allows to reduce number of shadow state updates.
+ // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
+ // if we are processing write to X from the same thread at epoch=200,
+ // we do nothing, because both writes happen in the same 'synch epoch'.
+ // That is, if another memory access does not race with the former write,
+ // it does not race with the latter as well.
+ // QUESTION: can we can squeeze this into ThreadState::Fast?
+ // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
+ // taken by epoch between synchs.
+ // This way we can save one load from tls.
+ u64 fast_synch_epoch;
+ // Technically `current` should be a separate THREADLOCAL variable;
+ // but it is placed here in order to share cache line with previous fields.
+ ThreadState* current;
+ // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
+ // We do not distinguish beteween ignoring reads and writes
+ // for better performance.
+ int ignore_reads_and_writes;
+ int ignore_sync;
+ int suppress_reports;
+ // Go does not support ignores.
+#if !SANITIZER_GO
+ IgnoreSet mop_ignore_set;
+ IgnoreSet sync_ignore_set;
+#endif
+ // C/C++ uses fixed size shadow stack embed into Trace.
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ uptr *shadow_stack;
+ uptr *shadow_stack_end;
+ uptr *shadow_stack_pos;
+ u64 *racy_shadow_addr;
+ u64 racy_state[2];
+ MutexSet mset;
+ ThreadClock clock;
+#if !SANITIZER_GO
+ Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
+#endif
+#if TSAN_COLLECT_STATS
+ u64 stat[StatCnt];
+#endif
+ const int tid;
+ const int unique_id;
+ bool in_symbolizer;
+ bool in_ignored_lib;
+ bool is_inited;
+ bool is_dead;
+ bool is_freeing;
+ bool is_vptr_access;
+ const uptr stk_addr;
+ const uptr stk_size;
+ const uptr tls_addr;
+ const uptr tls_size;
+ ThreadContext *tctx;
+
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ InternalDeadlockDetector internal_deadlock_detector;
+#endif
+ DDLogicalThread *dd_lt;
+
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc1;
+#if !SANITIZER_GO
+ Processor *proc() { return proc1; }
+#else
+ Processor *proc();
+#endif
+
+ atomic_uintptr_t in_signal_handler;
+ ThreadSignalContext *signal_ctx;
+
+#if !SANITIZER_GO
+ u32 last_sleep_stack_id;
+ ThreadClock last_sleep_clock;
+#endif
+
+ // Set in regions of runtime that must be signal-safe and fork-safe.
+ // If set, malloc must not be called.
+ int nomalloc;
+
+ const ReportDesc *current_report;
+
+ explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size);
+};
+
+#if !SANITIZER_GO
+#if SANITIZER_MAC || SANITIZER_ANDROID
+ThreadState *cur_thread();
+void set_cur_thread(ThreadState *thr);
+void cur_thread_finalize();
+INLINE void cur_thread_init() { }
+#else
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL char cur_thread_placeholder[];
+INLINE ThreadState *cur_thread() {
+ return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
+}
+INLINE void cur_thread_init() {
+ ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
+ if (UNLIKELY(!thr->current))
+ thr->current = thr;
+}
+INLINE void set_cur_thread(ThreadState *thr) {
+ reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
+}
+INLINE void cur_thread_finalize() { }
+#endif // SANITIZER_MAC || SANITIZER_ANDROID
+#endif // SANITIZER_GO
+
+class ThreadContext : public ThreadContextBase {
+ public:
+ explicit ThreadContext(int tid);
+ ~ThreadContext();
+ ThreadState *thr;
+ u32 creation_stack_id;
+ SyncClock sync;
+ // Epoch at which the thread had started.
+ // If we see an event from the thread stamped by an older epoch,
+ // the event is from a dead thread that shared tid with this thread.
+ u64 epoch0;
+ u64 epoch1;
+
+ // Override superclass callbacks.
+ void OnDead() override;
+ void OnJoined(void *arg) override;
+ void OnFinished() override;
+ void OnStarted(void *arg) override;
+ void OnCreated(void *arg) override;
+ void OnReset() override;
+ void OnDetached(void *arg) override;
+};
+
+struct RacyStacks {
+ MD5Hash hash[2];
+ bool operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
+ }
+};
+
+struct RacyAddress {
+ uptr addr_min;
+ uptr addr_max;
+};
+
+struct FiredSuppression {
+ ReportType type;
+ uptr pc_or_addr;
+ Suppression *supp;
+};
+
+struct Context {
+ Context();
+
+ bool initialized;
+#if !SANITIZER_GO
+ bool after_multithreaded_fork;
+#endif
+
+ MetaMap metamap;
+
+ Mutex report_mtx;
+ int nreported;
+ int nmissed_expected;
+ atomic_uint64_t last_symbolize_time_ns;
+
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
+ ThreadRegistry *thread_registry;
+
+ Mutex racy_mtx;
+ Vector<RacyStacks> racy_stacks;
+ Vector<RacyAddress> racy_addresses;
+ // Number of fired suppressions may be large enough.
+ Mutex fired_suppressions_mtx;
+ InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
+
+ Flags flags;
+
+ u64 stat[StatCnt];
+ u64 int_alloc_cnt[MBlockTypeCount];
+ u64 int_alloc_siz[MBlockTypeCount];
+};
+
+extern Context *ctx; // The one and the only global runtime context.
+
+ALWAYS_INLINE Flags *flags() {
+ return &ctx->flags;
+}
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
+};
+
+const char *GetObjectTypeFromTag(uptr tag);
+const char *GetReportHeaderFromTag(uptr tag);
+uptr TagFromShadowStackFrame(uptr pc);
+
+class ScopedReportBase {
+ public:
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
+ const MutexSet *mset);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
+ void AddUniqueTid(int unique_tid);
+ void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
+ void AddLocation(uptr addr, uptr size);
+ void AddSleep(u32 stack_id);
+ void SetCount(int count);
+
+ const ReportDesc *GetReport() const;
+
+ protected:
+ ScopedReportBase(ReportType typ, uptr tag);
+ ~ScopedReportBase();
+
+ private:
+ ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
+
+ void AddDeadMutex(u64 id);
+
+ ScopedReportBase(const ScopedReportBase &) = delete;
+ void operator=(const ScopedReportBase &) = delete;
+};
+
+class ScopedReport : public ScopedReportBase {
+ public:
+ explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
+ ~ScopedReport();
+
+ private:
+ ScopedErrorReportLock lock_;
+};
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag = nullptr);
+
+// The stack could look like:
+// <start> | <main> | <foo> | tag | <bar>
+// This will extract the tag and keep:
+// <start> | <main> | <foo> | <bar>
+template<typename StackTraceTy>
+void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
+ if (stack->size < 2) return;
+ uptr possible_tag_pc = stack->trace[stack->size - 2];
+ uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
+ if (possible_tag == kExternalTagNone) return;
+ stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
+ stack->size -= 1;
+ if (tag) *tag = possible_tag;
+}
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
+ uptr *tag = nullptr) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+ ExtractTagFromStack(stack, tag);
+}
+
+#define GET_STACK_TRACE_FATAL(thr, pc) \
+ VarSizeStackTrace stack; \
+ ObtainCurrentStack(thr, pc, &stack); \
+ stack.ReverseOrder();
+
+#if TSAN_COLLECT_STATS
+void StatAggregate(u64 *dst, u64 *src);
+void StatOutput(u64 *stat);
+#endif
+
+void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] += n;
+#endif
+}
+void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] = n;
+#endif
+}
+
+void MapShadow(uptr addr, uptr size);
+void MapThreadTrace(uptr addr, uptr size, const char *name);
+void DontNeedShadowFor(uptr addr, uptr size);
+void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
+void InitializeShadowMemory();
+void InitializeInterceptors();
+void InitializeLibIgnore();
+void InitializeDynamicAnnotations();
+
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
+void ReportRace(ThreadState *thr);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
+bool IsExpectedReport(uptr addr, uptr size);
+void PrintMatchedBenignRaces();
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
+# define DPrintf Printf
+#else
+# define DPrintf(...)
+#endif
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
+# define DPrintf2 Printf
+#else
+# define DPrintf2(...)
+#endif
+
+u32 CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(u32 stack_id);
+void PrintCurrentStack(ThreadState *thr, uptr pc);
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
+
+void Initialize(ThreadState *thr);
+void MaybeSpawnBackgroundThread();
+int Finalize(ThreadState *thr);
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
+
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur);
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write);
+void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, uptr step, bool is_write);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
+}
+
+void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
+}
+
+void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
+}
+
+void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size);
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
+
+void FuncEntry(ThreadState *thr, uptr pc);
+void FuncExit(ThreadState *thr);
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type);
+void ThreadFinish(ThreadState *thr);
+int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+void ThreadFinalize(ThreadState *thr);
+void ThreadSetName(ThreadState *thr, const char *name);
+int ThreadCount(ThreadState *thr);
+void ProcessPendingSignals(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
+
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
+ int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr);
+// AcquireGlobal synchronizes the current thread with all other threads.
+// In terms of happens-before relation, it draws a HB edge from all threads
+// (where they happen to execute right now) to the current thread. We use it to
+// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
+// right before executing finalizers. This provides a coarse, but simple
+// approximation of the actual required synchronization.
+void AcquireGlobal(ThreadState *thr, uptr pc);
+void Release(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
+void AfterSleep(ThreadState *thr, uptr pc);
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+
+// The hacky call uses custom calling convention and an assembly thunk.
+// It is considerably faster that a normal call for the caller
+// if it is not executed (it is intended for slow paths from hot functions).
+// The trick is that the call preserves all registers and the compiler
+// does not treat it as a call.
+// If it does not work for you, use normal call.
+#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
+// The caller may not create the stack frame for itself at all,
+// so we create a reserve stack frame for it (1024b must be enough).
+#define HACKY_CALL(f) \
+ __asm__ __volatile__("sub $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(1024) \
+ ".hidden " #f "_thunk;" \
+ "call " #f "_thunk;" \
+ "add $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(-1024) \
+ ::: "memory", "cc");
+#else
+#define HACKY_CALL(f) f()
+#endif
+
+void TraceSwitch(ThreadState *thr);
+uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
+uptr TraceParts();
+Trace *ThreadTrace(int tid);
+
+extern "C" void __tsan_trace_switch();
+void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
+ EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
+ DCHECK_GE((int)typ, 0);
+ DCHECK_LE((int)typ, 7);
+ DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
+ StatInc(thr, StatEvents);
+ u64 pos = fs.GetTracePos();
+ if (UNLIKELY((pos % kTracePartSize) == 0)) {
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_trace_switch);
+#else
+ TraceSwitch(thr);
+#endif
+ }
+ Event *trace = (Event*)GetThreadTrace(fs.tid());
+ Event *evp = &trace[pos];
+ Event ev = (u64)addr | ((u64)typ << kEventPCBits);
+ *evp = ev;
+}
+
+#if !SANITIZER_GO
+uptr ALWAYS_INLINE HeapEnd() {
+ return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
+}
+#endif
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
+void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
+
+// These need to match __tsan_switch_to_fiber_* flags defined in
+// tsan_interface.h. See documentation there as well.
+enum FiberSwitchFlags {
+ FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
+};
+
+} // namespace __tsan
+
+#endif // TSAN_RTL_H
diff --git a/lib/tsan/tsan_rtl_mutex.cpp b/lib/tsan/tsan_rtl_mutex.cpp
new file mode 100644
index 0000000000..ebd0d72218
--- /dev/null
+++ b/lib/tsan/tsan_rtl_mutex.cpp
@@ -0,0 +1,562 @@
+//===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_report.h"
+#include "tsan_symbolize.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->proc()->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ u32 Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->unique_id; }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (SANITIZER_GO)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ StatInc(thr, StatMutexCreate);
+ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->SetFlags(flagz & MutexCreationFlagMask);
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
+ s->mtx.Unlock();
+}
+
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexDestroy);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ if (s == 0)
+ return;
+ if ((flagz & MutexFlagLinkerInit)
+ || s->IsFlagSet(MutexFlagLinkerInit)
+ || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ s->mtx.Unlock();
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
+ if (flags()->report_destroy_locked
+ && s->owner_tid != SyncVar::kInvalidTid
+ && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u64 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr->proc()); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ FastState last(last_lock);
+ RestoreStack(last.tid(), last.epoch(), &trace, 0);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ if (s != 0) {
+ s->Reset(thr->proc());
+ s->mtx.Unlock();
+ }
+ }
+ thr->mset.Remove(mid);
+ // Imitate a memory write to catch unlock-destroy races.
+ // Do this outside of sync mutex, because it can report a race which locks
+ // sync mutexes.
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ // s will be destroyed and freed in MetaMap::FreeBlock.
+}
+
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ } else {
+ s->mtx.ReadUnlock();
+ }
+ }
+}
+
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+ DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
+ thr->tid, addr, flagz, rec);
+ if (flagz & MutexFlagRecursiveLock)
+ CHECK_GT(rec, 0);
+ else
+ rec = 1;
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state.raw();
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ const bool first = s->recursion == 0;
+ s->recursion += rec;
+ if (first) {
+ StatInc(thr, StatMutexLock);
+ AcquireImpl(thr, pc, &s->clock);
+ AcquireImpl(thr, pc, &s->read_clock);
+ } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
+ StatInc(thr, StatMutexRecLock);
+ }
+ thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ bool pre_lock = false;
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ s = 0;
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (first && pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ int rec = 0;
+ bool report_bad_unlock = false;
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ } else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ }
+ thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+ return rec;
+}
+
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ StatInc(thr, StatMutexReadLock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ AcquireImpl(thr, pc, &s->clock);
+ s->last_lock = thr->fast_state.raw();
+ thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ bool pre_lock = false;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ }
+ u64 mid = s->GetId();
+ s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ s = 0;
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadUnlock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ bool write = true;
+ bool report_bad_unlock = false;
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ StatInc(thr, StatMutexReadUnlock);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ ReleaseImpl(thr, pc, &s->read_clock);
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->owner_tid = SyncVar::kInvalidTid;
+ s->recursion = 0;
+ s->mtx.Unlock();
+}
+
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+}
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ if (!s)
+ return;
+ AcquireImpl(thr, pc, &s->clock);
+ s->mtx.ReadUnlock();
+}
+
+static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning) {
+ epoch = tctx->thr->fast_state.epoch();
+ tctx->thr->clock.NoteGlobalAcquire(epoch);
+ }
+ thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AcquireGlobal(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AcquireGlobal\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateClockCallback, thr);
+}
+
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreAcquireImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+#if !SANITIZER_GO
+static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning)
+ epoch = tctx->thr->fast_state.epoch();
+ thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AfterSleep(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ thr->last_sleep_stack_id = CurrentStackId(thr, pc);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateSleepClockCallback, thr);
+}
+#endif
+
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncAcquire);
+}
+
+void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncReleaseStoreAcquire);
+}
+
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.acq_rel(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncAcquire);
+ StatInc(thr, StatSyncRelease);
+}
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk && stk != 0xffffffff) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_rtl_proc.cpp b/lib/tsan/tsan_rtl_proc.cpp
new file mode 100644
index 0000000000..def61cca14
--- /dev/null
+++ b/lib/tsan/tsan_rtl_proc.cpp
@@ -0,0 +1,60 @@
+//===-- tsan_rtl_proc.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+Processor *ProcCreate() {
+ void *mem = InternalAlloc(sizeof(Processor));
+ internal_memset(mem, 0, sizeof(Processor));
+ Processor *proc = new(mem) Processor;
+ proc->thr = nullptr;
+#if !SANITIZER_GO
+ AllocatorProcStart(proc);
+#endif
+ if (common_flags()->detect_deadlocks)
+ proc->dd_pt = ctx->dd->CreatePhysicalThread();
+ return proc;
+}
+
+void ProcDestroy(Processor *proc) {
+ CHECK_EQ(proc->thr, nullptr);
+#if !SANITIZER_GO
+ AllocatorProcFinish(proc);
+#endif
+ ctx->clock_alloc.FlushCache(&proc->clock_cache);
+ ctx->metamap.OnProcIdle(proc);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyPhysicalThread(proc->dd_pt);
+ proc->~Processor();
+ InternalFree(proc);
+}
+
+void ProcWire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, nullptr);
+ CHECK_EQ(proc->thr, nullptr);
+ thr->proc1 = proc;
+ proc->thr = thr;
+}
+
+void ProcUnwire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, proc);
+ CHECK_EQ(proc->thr, thr);
+ thr->proc1 = nullptr;
+ proc->thr = nullptr;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_rtl_report.cpp b/lib/tsan/tsan_rtl_report.cpp
new file mode 100644
index 0000000000..3354546c2a
--- /dev/null
+++ b/lib/tsan/tsan_rtl_report.cpp
@@ -0,0 +1,754 @@
+//===-- tsan_rtl_report.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_fd.h"
+
+namespace __tsan {
+
+using namespace __sanitizer;
+
+static ReportStack *SymbolizeStack(StackTrace trace);
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
+ Printf("FATAL: ThreadSanitizer CHECK failed: "
+ "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
+ file, line, cond, (uptr)v1, (uptr)v2);
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+ Die();
+}
+
+// Can be overriden by an application/test to intercept reports.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnReport(const ReportDesc *rep, bool suppressed);
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnReport(const ReportDesc *rep, bool suppressed) {
+ (void)rep;
+ return suppressed;
+}
+#endif
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_on_report(const ReportDesc *rep) {
+ (void)rep;
+}
+
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ last_frame2 = last_frame;
+ last_frame = cur;
+ }
+
+ if (last_frame2 == 0)
+ return;
+#if !SANITIZER_GO
+ const char *last = last_frame->info.function;
+ const char *last2 = last_frame2->info.function;
+ // Strip frame above 'main'
+ if (last2 && 0 == internal_strcmp(last2, "main")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip our internal thread start routine.
+ } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip global ctors init.
+ } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // If both are 0, then we probably just failed to symbolize.
+ } else if (last || last2) {
+ // Ensure that we recovered stack completely. Trimmed stack
+ // can actually happen if we do not instrument some code,
+ // so it's only a debug print. However we must try hard to not miss it
+ // due to our fault.
+ DPrintf("Bottom stack frame is missed\n");
+ }
+#else
+ // The last frame always point into runtime (gosched0, goexit0, runtime.main).
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+#endif
+}
+
+ReportStack *SymbolizeStackId(u32 stack_id) {
+ if (stack_id == 0)
+ return 0;
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
+}
+
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
+ return 0;
+ SymbolizedStack *top = nullptr;
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
+ uptr pc1 = pc;
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
+ CHECK_NE(ent, 0);
+ SymbolizedStack *last = ent;
+ while (last->next) {
+ last->info.address = pc; // restore original pc for report
+ last = last->next;
+ }
+ last->info.address = pc; // restore original pc for report
+ last->next = top;
+ top = ent;
+ }
+ StackStripMain(top);
+
+ ReportStack *stack = ReportStack::New();
+ stack->frames = top;
+ return stack;
+}
+
+ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
+ ctx->thread_registry->CheckLocked();
+ void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
+ rep_ = new(mem) ReportDesc;
+ rep_->typ = typ;
+ rep_->tag = tag;
+ ctx->report_mtx.Lock();
+}
+
+ScopedReportBase::~ScopedReportBase() {
+ ctx->report_mtx.Unlock();
+ DestroyAndFree(rep_);
+ rep_ = nullptr;
+}
+
+void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
+ ReportStack **rs = rep_->stacks.PushBack();
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
+}
+
+void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
+ void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
+ ReportMop *mop = new(mem) ReportMop;
+ rep_->mops.PushBack(mop);
+ mop->tid = s.tid();
+ mop->addr = addr + s.addr0();
+ mop->size = s.size();
+ mop->write = s.IsWrite();
+ mop->atomic = s.IsAtomic();
+ mop->stack = SymbolizeStack(stack);
+ mop->external_tag = external_tag;
+ if (mop->stack)
+ mop->stack->suppressable = true;
+ for (uptr i = 0; i < mset->Size(); i++) {
+ MutexSet::Desc d = mset->Get(i);
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
+ }
+}
+
+void ScopedReportBase::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
+ for (uptr i = 0; i < rep_->threads.Size(); i++) {
+ if ((u32)rep_->threads[i]->id == tctx->tid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
+ ReportThread *rt = new(mem) ReportThread;
+ rep_->threads.PushBack(rt);
+ rt->id = tctx->tid;
+ rt->os_id = tctx->os_id;
+ rt->running = (tctx->status == ThreadStatusRunning);
+ rt->name = internal_strdup(tctx->name);
+ rt->parent_tid = tctx->parent_tid;
+ rt->thread_type = tctx->thread_type;
+ rt->stack = 0;
+ rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
+}
+
+#if !SANITIZER_GO
+static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
+ int unique_id = *(int *)arg;
+ return tctx->unique_id == (u32)unique_id;
+}
+
+static ThreadContext *FindThreadByUidLocked(int unique_id) {
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext *>(
+ ctx->thread_registry->FindThreadContextLocked(
+ FindThreadByUidLockedCallback, &unique_id));
+}
+
+static ThreadContext *FindThreadByTidLocked(int tid) {
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+}
+
+static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
+ uptr addr = (uptr)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status != ThreadStatusRunning)
+ return false;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
+ (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
+}
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
+ (void*)addr));
+ if (!tctx)
+ return 0;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
+ return tctx;
+}
+#endif
+
+void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
+#if !SANITIZER_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
+#endif
+}
+
+void ScopedReportBase::AddMutex(const SyncVar *s) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == s->uid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex;
+ rep_->mutexes.PushBack(rm);
+ rm->id = s->uid;
+ rm->addr = s->addr;
+ rm->destroyed = false;
+ rm->stack = SymbolizeStackId(s->creation_stack_id);
+}
+
+u64 ScopedReportBase::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.Unlock();
+ return mid;
+}
+
+void ScopedReportBase::AddDeadMutex(u64 id) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == id)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex;
+ rep_->mutexes.PushBack(rm);
+ rm->id = id;
+ rm->addr = 0;
+ rm->destroyed = true;
+ rm->stack = 0;
+}
+
+void ScopedReportBase::AddLocation(uptr addr, uptr size) {
+ if (addr == 0)
+ return;
+#if !SANITIZER_GO
+ int fd = -1;
+ int creat_tid = kInvalidTid;
+ u32 creat_stack = 0;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
+ loc->fd = fd;
+ loc->tid = creat_tid;
+ loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
+ ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
+ loc->external_tag = b->tag;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
+ rep_->locs.PushBack(loc);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ bool is_stack = false;
+ if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
+ loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
+ AddThread(tctx);
+ }
+#endif
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
+ rep_->locs.PushBack(loc);
+ return;
+ }
+}
+
+#if !SANITIZER_GO
+void ScopedReportBase::AddSleep(u32 stack_id) {
+ rep_->sleep = SymbolizeStackId(stack_id);
+}
+#endif
+
+void ScopedReportBase::SetCount(int count) { rep_->count = count; }
+
+const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
+
+ScopedReport::ScopedReport(ReportType typ, uptr tag)
+ : ScopedReportBase(typ, tag) {}
+
+ScopedReport::~ScopedReport() {}
+
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag) {
+ // This function restores stack trace and mutex set for the thread/epoch.
+ // It does so by getting stack trace and mutex set at the beginning of
+ // trace part, and then replaying the trace till the given epoch.
+ Trace* trace = ThreadTrace(tid);
+ ReadLock l(&trace->mtx);
+ const int partidx = (epoch / kTracePartSize) % TraceParts();
+ TraceHeader* hdr = &trace->headers[partidx];
+ if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
+ return;
+ CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
+ const u64 epoch0 = RoundDown(epoch, TraceSize());
+ const u64 eend = epoch % TraceSize();
+ const u64 ebegin = RoundDown(eend, kTracePartSize);
+ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
+ tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
+ Vector<uptr> stack;
+ stack.Resize(hdr->stack0.size + 64);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
+ DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
+ }
+ if (mset)
+ *mset = hdr->mset0;
+ uptr pos = hdr->stack0.size;
+ Event *events = (Event*)GetThreadTrace(tid);
+ for (uptr i = ebegin; i <= eend; i++) {
+ Event ev = events[i];
+ EventType typ = (EventType)(ev >> kEventPCBits);
+ uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
+ DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
+ if (typ == EventTypeMop) {
+ stack[pos] = pc;
+ } else if (typ == EventTypeFuncEnter) {
+ if (stack.Size() < pos + 2)
+ stack.Resize(pos + 2);
+ stack[pos++] = pc;
+ } else if (typ == EventTypeFuncExit) {
+ if (pos > 0)
+ pos--;
+ }
+ if (mset) {
+ if (typ == EventTypeLock) {
+ mset->Add(pc, true, epoch0 + i);
+ } else if (typ == EventTypeUnlock) {
+ mset->Del(pc, true);
+ } else if (typ == EventTypeRLock) {
+ mset->Add(pc, false, epoch0 + i);
+ } else if (typ == EventTypeRUnlock) {
+ mset->Del(pc, false);
+ }
+ }
+ for (uptr j = 0; j <= pos; j++)
+ DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ }
+ if (pos == 0 && stack[0] == 0)
+ return;
+ pos++;
+ stk->Init(&stack[0], pos);
+ ExtractTagFromStack(stk, tag);
+}
+
+static bool FindRacyStacks(const RacyStacks &hash) {
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
+ if (!flags()->suppress_equal_stacks)
+ return false;
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
+ }
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
+ ctx->racy_stacks.PushBack(hash);
+ return false;
+}
+
+static bool FindRacyAddress(const RacyAddress &ra0) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
+ if (!flags()->suppress_equal_addresses)
+ return false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
+ }
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
+ ctx->racy_addresses.PushBack(ra0);
+ return false;
+}
+
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
+ if (!flags()->report_bugs || thr->suppress_reports)
+ return false;
+ atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
+ const ReportDesc *rep = srep.GetReport();
+ CHECK_EQ(thr->current_report, nullptr);
+ thr->current_report = rep;
+ Suppression *supp = 0;
+ uptr pc_or_addr = 0;
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
+ if (pc_or_addr != 0) {
+ Lock lock(&ctx->fired_suppressions_mtx);
+ FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
+ ctx->fired_suppressions.push_back(s);
+ }
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, pc_or_addr != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed) {
+ thr->current_report = nullptr;
+ return false;
+ }
+ }
+ PrintReport(rep);
+ __tsan_on_report(rep);
+ ctx->nreported++;
+ if (flags()->halt_on_error)
+ Die();
+ thr->current_report = nullptr;
+ return true;
+}
+
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ for (uptr j = 0; j < trace.size; j++) {
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (trace.trace[j] == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (addr == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
+ return false;
+}
+
+void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
+ if (!flags()->report_bugs)
+ return;
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
+
+ bool freed = false;
+ {
+ Shadow s(thr->racy_state[1]);
+ freed = s.GetFreedAndReset();
+ thr->racy_state[1] = s.raw();
+ }
+
+ uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
+ uptr addr_min = 0;
+ uptr addr_max = 0;
+ {
+ uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
+ uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
+ uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
+ uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
+ addr_min = min(a0, a1);
+ addr_max = max(e0, e1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
+ }
+ if (HandleRacyAddress(thr, addr_min, addr_max))
+ return;
+
+ ReportType typ = ReportTypeRace;
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
+ typ = ReportTypeVptrRace;
+ else if (freed)
+ typ = ReportTypeUseAfterFree;
+
+ if (IsFiredSuppression(ctx, typ, addr))
+ return;
+
+ const uptr kMop = 2;
+ VarSizeStackTrace traces[kMop];
+ uptr tags[kMop] = {kExternalTagNone};
+ uptr toppc = TraceTopPC(thr);
+ if (toppc >> kEventPCBits) {
+ // This is a work-around for a known issue.
+ // The scenario where this happens is rather elaborate and requires
+ // an instrumented __sanitizer_report_error_summary callback and
+ // a __tsan_symbolize_external callback and a race during a range memory
+ // access larger than 8 bytes. MemoryAccessRange adds the current PC to
+ // the trace and starts processing memory accesses. A first memory access
+ // triggers a race, we report it and call the instrumented
+ // __sanitizer_report_error_summary, which adds more stuff to the trace
+ // since it is intrumented. Then a second memory access in MemoryAccessRange
+ // also triggers a race and we get here and call TraceTopPC to get the
+ // current PC, however now it contains some unrelated events from the
+ // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
+ // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
+ // and the resulting PC has kExternalPCBit set, so we pass it to
+ // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
+ // rights to crash since the PC is completely bogus.
+ // test/tsan/double_race.cpp contains a test case for this.
+ toppc = 0;
+ }
+ ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, typ, traces[0]))
+ return;
+
+ // MutexSet is too large to live on stack.
+ Vector<u64> mset_buffer;
+ mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
+ MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
+
+ Shadow s2(thr->racy_state[1]);
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
+ if (IsFiredSuppression(ctx, typ, traces[1]))
+ return;
+
+ if (HandleRacyStacks(thr, traces))
+ return;
+
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
+ }
+
+ ThreadRegistryLock l0(ctx->thread_registry);
+ ScopedReport rep(typ, tag);
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ rep.AddMemoryAccess(addr, tags[i], s, traces[i],
+ i == 0 ? &thr->mset : mset2);
+ }
+
+ for (uptr i = 0; i < kMop; i++) {
+ FastState s(thr->racy_state[i]);
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(s.tid()));
+ if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
+ continue;
+ rep.AddThread(tctx);
+ }
+
+ rep.AddLocation(addr_min, addr_max - addr_min);
+
+#if !SANITIZER_GO
+ {
+ Shadow s(thr->racy_state[1]);
+ if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+ }
+#endif
+
+ if (!OutputReport(thr, rep))
+ return;
+
+}
+
+void PrintCurrentStack(ThreadState *thr, uptr pc) {
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ PrintStack(SymbolizeStack(trace));
+}
+
+// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
+// tail-call to PrintCurrentStackSlow breaks this assumption because
+// __sanitizer_print_stack_trace disappears after tail-call.
+// However, this solution is not reliable enough, please see dvyukov's comment
+// http://reviews.llvm.org/D19148#406208
+// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
+ALWAYS_INLINE
+void PrintCurrentStackSlow(uptr pc) {
+#if !SANITIZER_GO
+ uptr bp = GET_CURRENT_FRAME();
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(pc, bp, nullptr, false);
+
+ for (uptr i = 0; i < ptrace->size / 2; i++) {
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
+ }
+ PrintStack(SymbolizeStack(*ptrace));
+#endif
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/lib/tsan/tsan_rtl_thread.cpp b/lib/tsan/tsan_rtl_thread.cpp
new file mode 100644
index 0000000000..d80146735e
--- /dev/null
+++ b/lib/tsan/tsan_rtl_thread.cpp
@@ -0,0 +1,462 @@
+//===-- tsan_rtl_thread.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+// ThreadContext implementation.
+
+ThreadContext::ThreadContext(int tid)
+ : ThreadContextBase(tid)
+ , thr()
+ , sync()
+ , epoch0()
+ , epoch1() {
+}
+
+#if !SANITIZER_GO
+ThreadContext::~ThreadContext() {
+}
+#endif
+
+void ThreadContext::OnDead() {
+ CHECK_EQ(sync.size(), 0);
+}
+
+void ThreadContext::OnJoined(void *arg) {
+ ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+ AcquireImpl(caller_thr, 0, &sync);
+ sync.Reset(&caller_thr->proc()->clock_cache);
+}
+
+struct OnCreatedArgs {
+ ThreadState *thr;
+ uptr pc;
+};
+
+void ThreadContext::OnCreated(void *arg) {
+ thr = 0;
+ if (tid == 0)
+ return;
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ if (!args->thr) // GCD workers don't have a parent thread.
+ return;
+ args->thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(args->thr, 0, &sync);
+ creation_stack_id = CurrentStackId(args->thr, args->pc);
+ if (reuse_count == 0)
+ StatInc(args->thr, StatThreadMaxTid);
+}
+
+void ThreadContext::OnReset() {
+ CHECK_EQ(sync.size(), 0);
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
+}
+
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->proc()->clock_cache);
+}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
+ thr = args->thr;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+ epoch1 = (u64)-1;
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#if !SANITIZER_GO
+ thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
+#else
+ // Setup dynamic shadow stack.
+ const int kInitStackSize = 8;
+ thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
+ kInitStackSize * sizeof(uptr));
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
+#endif
+ if (common_flags()->detect_deadlocks)
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ // Commit switch to the new part of the trace.
+ // TraceAddEvent will reset stack0/mset0 in the new part for us.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
+ thr->fast_synch_epoch = epoch0;
+ AcquireImpl(thr, 0, &sync);
+ StatInc(thr, StatSyncAcquire);
+ sync.Reset(&thr->proc()->clock_cache);
+ thr->is_inited = true;
+ DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)epoch0, args->stk_addr, args->stk_size,
+ args->tls_addr, args->tls_size);
+}
+
+void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+#endif
+ if (!detached) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, 0, &sync);
+ }
+ epoch1 = thr->fast_state.epoch();
+
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+#if !SANITIZER_GO
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+#endif
+#if !SANITIZER_GO
+ PlatformCleanUpThreadState(thr);
+#endif
+ thr->~ThreadState();
+#if TSAN_COLLECT_STATS
+ StatAggregate(ctx->stat, thr->stat);
+#endif
+ thr = 0;
+}
+
+#if !SANITIZER_GO
+struct ThreadLeak {
+ ThreadContext *tctx;
+ int count;
+};
+
+static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
+ Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->detached || tctx->status != ThreadStatusFinished)
+ return;
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
+ leaks[i].count++;
+ return;
+ }
+ }
+ ThreadLeak leak = {tctx, 1};
+ leaks.PushBack(leak);
+}
+#endif
+
+#if !SANITIZER_GO
+static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
+ if (tctx->tid == 0) {
+ Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
+ } else {
+ Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
+ " created at:\n", tctx->tid, tctx->name);
+ PrintStack(SymbolizeStackId(tctx->creation_stack_id));
+ }
+ Printf(" One of the following ignores was not ended"
+ " (in order of probability)\n");
+ for (uptr i = 0; i < set->Size(); i++) {
+ Printf(" Ignore was enabled at:\n");
+ PrintStack(SymbolizeStackId(set->At(i)));
+ }
+ Die();
+}
+
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
+ if (thr->ignore_reads_and_writes)
+ ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
+ if (thr->ignore_sync)
+ ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
+}
+#else
+static void ThreadCheckIgnore(ThreadState *thr) {}
+#endif
+
+void ThreadFinalize(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+#if !SANITIZER_GO
+ if (!flags()->report_thread_leaks)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ Vector<ThreadLeak> leaks;
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ MaybeReportThreadLeak, &leaks);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(leaks[i].tctx, true);
+ rep.SetCount(leaks[i].count);
+ OutputReport(thr, rep);
+ }
+#endif
+}
+
+int ThreadCount(ThreadState *thr) {
+ uptr result;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ return (int)result;
+}
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ StatInc(thr, StatThreadCreate);
+ OnCreatedArgs args = { thr, pc };
+ u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
+ int tid =
+ ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+ StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
+ return tid;
+}
+
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type) {
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+#if !SANITIZER_GO
+ if (thread_type != ThreadType::Fiber)
+ GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+
+ if (tid) {
+ if (stk_addr && stk_size)
+ MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
+
+ if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
+ }
+#endif
+
+ ThreadRegistry *tr = ctx->thread_registry;
+ OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
+ tr->StartThread(tid, os_id, thread_type, &args);
+
+ tr->Lock();
+ thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
+ tr->Unlock();
+
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
+}
+
+void ThreadFinish(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+ StatInc(thr, StatThreadFinish);
+ if (thr->stk_addr && thr->stk_size)
+ DontNeedShadowFor(thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size)
+ DontNeedShadowFor(thr->tls_addr, thr->tls_size);
+ thr->is_dead = true;
+ ctx->thread_registry->FinishThread(thr->tid);
+}
+
+struct ConsumeThreadContext {
+ uptr uid;
+ ThreadContextBase *tctx;
+};
+
+static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
+ ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg;
+ if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
+ if (findCtx->tctx) {
+ // Ensure that user_id is unique. If it's not the case we are screwed.
+ // Something went wrong before, but now there is no way to recover.
+ // Returning a wrong thread is not an option, it may lead to very hard
+ // to debug false positives (e.g. if we join a wrong thread).
+ Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
+ Die();
+ }
+ findCtx->tctx = tctx;
+ tctx->user_id = 0;
+ }
+ return false;
+}
+
+int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
+ ConsumeThreadContext findCtx = {uid, nullptr};
+ ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
+ int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid;
+ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
+ return tid;
+}
+
+void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
+ ctx->thread_registry->JoinThread(tid, thr);
+}
+
+void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry->DetachThread(tid, thr);
+}
+
+void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry->SetThreadUserId(tid, uid);
+}
+
+void ThreadSetName(ThreadState *thr, const char *name) {
+ ctx->thread_registry->SetThreadName(thr->tid, name);
+}
+
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write) {
+ if (size == 0)
+ return;
+
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
+ thr->tid, (void*)pc, (void*)addr,
+ (int)size, is_write);
+
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+ if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
+ DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
+ }
+#endif
+
+ StatInc(thr, StatMopRange);
+
+ if (*shadow_mem == kShadowRodata) {
+ DCHECK(!is_write);
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMopRangeRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+
+ bool unaligned = (addr % kShadowCell) != 0;
+
+ // Handle unaligned beginning, if any.
+ for (; addr % kShadowCell && size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+ if (unaligned)
+ shadow_mem += kShadowCnt;
+ // Handle middle part, if any.
+ for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
+ int const kAccessSizeLog = 3;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ shadow_mem += kShadowCnt;
+ }
+ // Handle ending, if any.
+ for (; size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+}
+
+#if !SANITIZER_GO
+void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
+ Processor *proc = from->proc();
+ ProcUnwire(proc, from);
+ ProcWire(proc, to);
+ set_cur_thread(to);
+}
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
+ ThreadState *fiber = static_cast<ThreadState *>(mem);
+ internal_memset(fiber, 0, sizeof(*fiber));
+ int tid = ThreadCreate(thr, pc, 0, true);
+ FiberSwitchImpl(thr, fiber);
+ ThreadStart(fiber, tid, 0, ThreadType::Fiber);
+ FiberSwitchImpl(fiber, thr);
+ return fiber;
+}
+
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
+ FiberSwitchImpl(thr, fiber);
+ ThreadFinish(fiber);
+ FiberSwitchImpl(fiber, thr);
+ internal_free(fiber);
+}
+
+void FiberSwitch(ThreadState *thr, uptr pc,
+ ThreadState *fiber, unsigned flags) {
+ if (!(flags & FiberSwitchFlagNoSync))
+ Release(thr, pc, (uptr)fiber);
+ FiberSwitchImpl(thr, fiber);
+ if (!(flags & FiberSwitchFlagNoSync))
+ Acquire(fiber, pc, (uptr)fiber);
+}
+#endif
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_stack_trace.cpp b/lib/tsan/tsan_stack_trace.cpp
new file mode 100644
index 0000000000..403a21ae4a
--- /dev/null
+++ b/lib/tsan/tsan_stack_trace.cpp
@@ -0,0 +1,63 @@
+//===-- tsan_stack_trace.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
+ }
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+void VarSizeStackTrace::ReverseOrder() {
+ for (u32 i = 0; i < (size >> 1); i++)
+ Swap(trace_buffer[i], trace_buffer[size - 1 - i]);
+}
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ uptr top = 0;
+ uptr bottom = 0;
+ if (StackTrace::WillUseFastUnwind(request_fast)) {
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
+ } else
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+}
+#endif // SANITIZER_GO
diff --git a/lib/tsan/tsan_stack_trace.h b/lib/tsan/tsan_stack_trace.h
new file mode 100644
index 0000000000..3eb8ce156e
--- /dev/null
+++ b/lib/tsan/tsan_stack_trace.h
@@ -0,0 +1,42 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ // Reverses the current stack trace order, the top frame goes to the bottom,
+ // the last frame goes to the top.
+ void ReverseOrder();
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
diff --git a/lib/tsan/tsan_stat.cpp b/lib/tsan/tsan_stat.cpp
new file mode 100644
index 0000000000..78f3cce913
--- /dev/null
+++ b/lib/tsan/tsan_stat.cpp
@@ -0,0 +1,186 @@
+//===-- tsan_stat.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stat.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+#if TSAN_COLLECT_STATS
+
+void StatAggregate(u64 *dst, u64 *src) {
+ for (int i = 0; i < StatCnt; i++)
+ dst[i] += src[i];
+}
+
+void StatOutput(u64 *stat) {
+ stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
+
+ static const char *name[StatCnt] = {};
+ name[StatMop] = "Memory accesses ";
+ name[StatMopRead] = " Including reads ";
+ name[StatMopWrite] = " writes ";
+ name[StatMop1] = " Including size 1 ";
+ name[StatMop2] = " size 2 ";
+ name[StatMop4] = " size 4 ";
+ name[StatMop8] = " size 8 ";
+ name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
+ name[StatMopRange] = " Including range ";
+ name[StatMopRodata] = " Including .rodata ";
+ name[StatMopRangeRodata] = " Including .rodata range ";
+ name[StatShadowProcessed] = "Shadow processed ";
+ name[StatShadowZero] = " Including empty ";
+ name[StatShadowNonZero] = " Including non empty ";
+ name[StatShadowSameSize] = " Including same size ";
+ name[StatShadowIntersect] = " intersect ";
+ name[StatShadowNotIntersect] = " not intersect ";
+ name[StatShadowSameThread] = " Including same thread ";
+ name[StatShadowAnotherThread] = " another thread ";
+ name[StatShadowReplace] = " Including evicted ";
+
+ name[StatFuncEnter] = "Function entries ";
+ name[StatFuncExit] = "Function exits ";
+ name[StatEvents] = "Events collected ";
+
+ name[StatThreadCreate] = "Total threads created ";
+ name[StatThreadFinish] = " threads finished ";
+ name[StatThreadReuse] = " threads reused ";
+ name[StatThreadMaxTid] = " max tid ";
+ name[StatThreadMaxAlive] = " max alive threads ";
+
+ name[StatMutexCreate] = "Mutexes created ";
+ name[StatMutexDestroy] = " destroyed ";
+ name[StatMutexLock] = " lock ";
+ name[StatMutexUnlock] = " unlock ";
+ name[StatMutexRecLock] = " recursive lock ";
+ name[StatMutexRecUnlock] = " recursive unlock ";
+ name[StatMutexReadLock] = " read lock ";
+ name[StatMutexReadUnlock] = " read unlock ";
+
+ name[StatSyncCreated] = "Sync objects created ";
+ name[StatSyncDestroyed] = " destroyed ";
+ name[StatSyncAcquire] = " acquired ";
+ name[StatSyncRelease] = " released ";
+
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast] = " fast ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
+ name[StatAtomic] = "Atomic operations ";
+ name[StatAtomicLoad] = " Including load ";
+ name[StatAtomicStore] = " store ";
+ name[StatAtomicExchange] = " exchange ";
+ name[StatAtomicFetchAdd] = " fetch_add ";
+ name[StatAtomicFetchSub] = " fetch_sub ";
+ name[StatAtomicFetchAnd] = " fetch_and ";
+ name[StatAtomicFetchOr] = " fetch_or ";
+ name[StatAtomicFetchXor] = " fetch_xor ";
+ name[StatAtomicFetchNand] = " fetch_nand ";
+ name[StatAtomicCAS] = " compare_exchange ";
+ name[StatAtomicFence] = " fence ";
+ name[StatAtomicRelaxed] = " Including relaxed ";
+ name[StatAtomicConsume] = " consume ";
+ name[StatAtomicAcquire] = " acquire ";
+ name[StatAtomicRelease] = " release ";
+ name[StatAtomicAcq_Rel] = " acq_rel ";
+ name[StatAtomicSeq_Cst] = " seq_cst ";
+ name[StatAtomic1] = " Including size 1 ";
+ name[StatAtomic2] = " size 2 ";
+ name[StatAtomic4] = " size 4 ";
+ name[StatAtomic8] = " size 8 ";
+ name[StatAtomic16] = " size 16 ";
+
+ name[StatAnnotation] = "Dynamic annotations ";
+ name[StatAnnotateHappensBefore] = " HappensBefore ";
+ name[StatAnnotateHappensAfter] = " HappensAfter ";
+ name[StatAnnotateCondVarSignal] = " CondVarSignal ";
+ name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
+ name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
+ name[StatAnnotateCondVarWait] = " CondVarWait ";
+ name[StatAnnotateRWLockCreate] = " RWLockCreate ";
+ name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
+ name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
+ name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
+ name[StatAnnotateRWLockReleased] = " RWLockReleased ";
+ name[StatAnnotateTraceMemory] = " TraceMemory ";
+ name[StatAnnotateFlushState] = " FlushState ";
+ name[StatAnnotateNewMemory] = " NewMemory ";
+ name[StatAnnotateNoOp] = " NoOp ";
+ name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
+ name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
+ name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
+ name[StatAnnotatePCQGet] = " PCQGet ";
+ name[StatAnnotatePCQPut] = " PCQPut ";
+ name[StatAnnotatePCQDestroy] = " PCQDestroy ";
+ name[StatAnnotatePCQCreate] = " PCQCreate ";
+ name[StatAnnotateExpectRace] = " ExpectRace ";
+ name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
+ name[StatAnnotateBenignRace] = " BenignRace ";
+ name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
+ name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
+ name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
+ name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
+ name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin ";
+ name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd ";
+ name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
+ name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
+ name[StatAnnotateThreadName] = " ThreadName ";
+ name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
+ name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
+ name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
+ name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
+ name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
+ name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
+ name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
+ name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
+ name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
+ name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
+
+ name[StatMtxTotal] = "Contentionz ";
+ name[StatMtxTrace] = " Trace ";
+ name[StatMtxThreads] = " Threads ";
+ name[StatMtxReport] = " Report ";
+ name[StatMtxSyncVar] = " SyncVar ";
+ name[StatMtxSyncTab] = " SyncTab ";
+ name[StatMtxSlab] = " Slab ";
+ name[StatMtxAtExit] = " Atexit ";
+ name[StatMtxAnnotations] = " Annotations ";
+ name[StatMtxMBlock] = " MBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
+ name[StatMtxFired] = " FiredSuppressions ";
+ name[StatMtxRacy] = " RacyStacks ";
+ name[StatMtxFD] = " FD ";
+ name[StatMtxGlobalProc] = " GlobalProc ";
+
+ Printf("Statistics:\n");
+ for (int i = 0; i < StatCnt; i++)
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
+}
+
+#endif
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_stat.h b/lib/tsan/tsan_stat.h
new file mode 100644
index 0000000000..8b26a59bb2
--- /dev/null
+++ b/lib/tsan/tsan_stat.h
@@ -0,0 +1,191 @@
+//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_STAT_H
+#define TSAN_STAT_H
+
+namespace __tsan {
+
+enum StatType {
+ // Memory access processing related stuff.
+ StatMop,
+ StatMopRead,
+ StatMopWrite,
+ StatMop1, // These must be consequtive.
+ StatMop2,
+ StatMop4,
+ StatMop8,
+ StatMopSame,
+ StatMopIgnored,
+ StatMopRange,
+ StatMopRodata,
+ StatMopRangeRodata,
+ StatShadowProcessed,
+ StatShadowZero,
+ StatShadowNonZero, // Derived.
+ StatShadowSameSize,
+ StatShadowIntersect,
+ StatShadowNotIntersect,
+ StatShadowSameThread,
+ StatShadowAnotherThread,
+ StatShadowReplace,
+
+ // Func processing.
+ StatFuncEnter,
+ StatFuncExit,
+
+ // Trace processing.
+ StatEvents,
+
+ // Threads.
+ StatThreadCreate,
+ StatThreadFinish,
+ StatThreadReuse,
+ StatThreadMaxTid,
+ StatThreadMaxAlive,
+
+ // Mutexes.
+ StatMutexCreate,
+ StatMutexDestroy,
+ StatMutexLock,
+ StatMutexUnlock,
+ StatMutexRecLock,
+ StatMutexRecUnlock,
+ StatMutexReadLock,
+ StatMutexReadUnlock,
+
+ // Synchronization.
+ StatSyncCreated,
+ StatSyncDestroyed,
+ StatSyncAcquire,
+ StatSyncRelease,
+ StatSyncReleaseStoreAcquire,
+
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
+ // Atomics.
+ StatAtomic,
+ StatAtomicLoad,
+ StatAtomicStore,
+ StatAtomicExchange,
+ StatAtomicFetchAdd,
+ StatAtomicFetchSub,
+ StatAtomicFetchAnd,
+ StatAtomicFetchOr,
+ StatAtomicFetchXor,
+ StatAtomicFetchNand,
+ StatAtomicCAS,
+ StatAtomicFence,
+ StatAtomicRelaxed,
+ StatAtomicConsume,
+ StatAtomicAcquire,
+ StatAtomicRelease,
+ StatAtomicAcq_Rel,
+ StatAtomicSeq_Cst,
+ StatAtomic1,
+ StatAtomic2,
+ StatAtomic4,
+ StatAtomic8,
+ StatAtomic16,
+
+ // Dynamic annotations.
+ StatAnnotation,
+ StatAnnotateHappensBefore,
+ StatAnnotateHappensAfter,
+ StatAnnotateCondVarSignal,
+ StatAnnotateCondVarSignalAll,
+ StatAnnotateMutexIsNotPHB,
+ StatAnnotateCondVarWait,
+ StatAnnotateRWLockCreate,
+ StatAnnotateRWLockCreateStatic,
+ StatAnnotateRWLockDestroy,
+ StatAnnotateRWLockAcquired,
+ StatAnnotateRWLockReleased,
+ StatAnnotateTraceMemory,
+ StatAnnotateFlushState,
+ StatAnnotateNewMemory,
+ StatAnnotateNoOp,
+ StatAnnotateFlushExpectedRaces,
+ StatAnnotateEnableRaceDetection,
+ StatAnnotateMutexIsUsedAsCondVar,
+ StatAnnotatePCQGet,
+ StatAnnotatePCQPut,
+ StatAnnotatePCQDestroy,
+ StatAnnotatePCQCreate,
+ StatAnnotateExpectRace,
+ StatAnnotateBenignRaceSized,
+ StatAnnotateBenignRace,
+ StatAnnotateIgnoreReadsBegin,
+ StatAnnotateIgnoreReadsEnd,
+ StatAnnotateIgnoreWritesBegin,
+ StatAnnotateIgnoreWritesEnd,
+ StatAnnotateIgnoreSyncBegin,
+ StatAnnotateIgnoreSyncEnd,
+ StatAnnotatePublishMemoryRange,
+ StatAnnotateUnpublishMemoryRange,
+ StatAnnotateThreadName,
+ Stat__tsan_mutex_create,
+ Stat__tsan_mutex_destroy,
+ Stat__tsan_mutex_pre_lock,
+ Stat__tsan_mutex_post_lock,
+ Stat__tsan_mutex_pre_unlock,
+ Stat__tsan_mutex_post_unlock,
+ Stat__tsan_mutex_pre_signal,
+ Stat__tsan_mutex_post_signal,
+ Stat__tsan_mutex_pre_divert,
+ Stat__tsan_mutex_post_divert,
+
+ // Internal mutex contentionz.
+ StatMtxTotal,
+ StatMtxTrace,
+ StatMtxThreads,
+ StatMtxReport,
+ StatMtxSyncVar,
+ StatMtxSyncTab,
+ StatMtxSlab,
+ StatMtxAnnotations,
+ StatMtxAtExit,
+ StatMtxMBlock,
+ StatMtxDeadlockDetector,
+ StatMtxFired,
+ StatMtxRacy,
+ StatMtxFD,
+ StatMtxGlobalProc,
+
+ // This must be the last.
+ StatCnt
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STAT_H
diff --git a/lib/tsan/tsan_suppressions.cpp b/lib/tsan/tsan_suppressions.cpp
new file mode 100644
index 0000000000..a1c1bf81bf
--- /dev/null
+++ b/lib/tsan/tsan_suppressions.cpp
@@ -0,0 +1,161 @@
+//===-- tsan_suppressions.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+#if !SANITIZER_GO
+// Suppressions for true/false positives in standard libraries.
+static const char *const std_suppressions =
+// Libstdc++ 4.4 has data races in std::string.
+// See http://crbug.com/181502 for an example.
+"race:^_M_rep$\n"
+"race:^_M_is_leaked$\n"
+// False positive when using std <thread>.
+// Happens because we miss atomic synchronization in libstdc++.
+// See http://llvm.org/bugs/show_bug.cgi?id=17066 for details.
+"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
+
+// Can be overriden in frontend.
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_suppressions() {
+ return 0;
+}
+#endif
+
+namespace __tsan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char *kSuppressionTypes[] = {
+ kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex,
+ kSuppressionThread, kSuppressionSignal, kSuppressionLib,
+ kSuppressionDeadlock};
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder)
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+#if !SANITIZER_GO
+ suppression_ctx->Parse(__tsan_default_suppressions());
+ suppression_ctx->Parse(std_suppressions);
+#endif
+}
+
+SuppressionContext *Suppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static const char *conv(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace:
+ case ReportTypeVptrRace:
+ case ReportTypeUseAfterFree:
+ case ReportTypeVptrUseAfterFree:
+ case ReportTypeExternalRace:
+ return kSuppressionRace;
+ case ReportTypeThreadLeak:
+ return kSuppressionThread;
+ case ReportTypeMutexDestroyLocked:
+ case ReportTypeMutexDoubleLock:
+ case ReportTypeMutexInvalidAccess:
+ case ReportTypeMutexBadUnlock:
+ case ReportTypeMutexBadReadLock:
+ case ReportTypeMutexBadReadUnlock:
+ return kSuppressionMutex;
+ case ReportTypeSignalUnsafe:
+ case ReportTypeErrnoInSignal:
+ return kSuppressionSignal;
+ case ReportTypeDeadlock:
+ return kSuppressionDeadlock;
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static uptr IsSuppressed(const char *stype, const AddressInfo &info,
+ Suppression **sp) {
+ if (suppression_ctx->Match(info.function, stype, sp) ||
+ suppression_ctx->Match(info.file, stype, sp) ||
+ suppression_ctx->Match(info.module, stype, sp)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
+ atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed);
+ return info.address;
+ }
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
+ uptr pc = IsSuppressed(stype, frame->info, sp);
+ if (pc != 0)
+ return pc;
+ }
+ if (0 == internal_strcmp(stype, kSuppressionRace) && stack->frames != nullptr)
+ return IsSuppressed(kSuppressionRaceTop, stack->frames->info, sp);
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ Suppression *s;
+ const DataInfo &global = loc->global;
+ if (suppression_ctx->Match(global.name, stype, &s) ||
+ suppression_ctx->Match(global.module, stype, &s)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed);
+ *sp = s;
+ return global.start;
+ }
+ return 0;
+}
+
+void PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ CHECK(suppression_ctx);
+ suppression_ctx->GetMatched(&matched);
+ if (!matched.size())
+ return;
+ int hit_count = 0;
+ for (uptr i = 0; i < matched.size(); i++)
+ hit_count += atomic_load_relaxed(&matched[i]->hit_count);
+ Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
+ (int)internal_getpid());
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%d %s:%s\n", atomic_load_relaxed(&matched[i]->hit_count),
+ matched[i]->type, matched[i]->templ);
+ }
+}
+} // namespace __tsan
diff --git a/lib/tsan/tsan_suppressions.h b/lib/tsan/tsan_suppressions.h
new file mode 100644
index 0000000000..f430aeb6c4
--- /dev/null
+++ b/lib/tsan/tsan_suppressions.h
@@ -0,0 +1,37 @@
+//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SUPPRESSIONS_H
+#define TSAN_SUPPRESSIONS_H
+
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+const char kSuppressionNone[] = "none";
+const char kSuppressionRace[] = "race";
+const char kSuppressionRaceTop[] = "race_top";
+const char kSuppressionMutex[] = "mutex";
+const char kSuppressionThread[] = "thread";
+const char kSuppressionSignal[] = "signal";
+const char kSuppressionLib[] = "called_from_lib";
+const char kSuppressionDeadlock[] = "deadlock";
+
+void InitializeSuppressions();
+SuppressionContext *Suppressions();
+void PrintMatchedSuppressions();
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
+
+} // namespace __tsan
+
+#endif // TSAN_SUPPRESSIONS_H
diff --git a/lib/tsan/tsan_symbolize.cpp b/lib/tsan/tsan_symbolize.cpp
new file mode 100644
index 0000000000..6478f3a754
--- /dev/null
+++ b/lib/tsan/tsan_symbolize.cpp
@@ -0,0 +1,122 @@
+//===-- tsan_symbolize.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_symbolize.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_flags.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void EnterSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(!thr->in_symbolizer);
+ thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
+}
+
+void ExitSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(thr->in_symbolizer);
+ thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
+}
+
+// Legacy API.
+// May be overriden by JIT/JAVA/etc,
+// whatever produces PCs marked with kExternalPCBit.
+SANITIZER_WEAK_DEFAULT_IMPL
+bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz, int *line,
+ int *col) {
+ return false;
+}
+
+// New API: call __tsan_symbolize_external_ex only when it exists.
+// Once old clients are gone, provide dummy implementation.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_symbolize_external_ex(uptr pc,
+ void (*add_frame)(void *, const char *,
+ const char *, int, int),
+ void *ctx) {}
+
+struct SymbolizedStackBuilder {
+ SymbolizedStack *head;
+ SymbolizedStack *tail;
+ uptr addr;
+};
+
+static void AddFrame(void *ctx, const char *function_name, const char *file,
+ int line, int column) {
+ SymbolizedStackBuilder *ssb = (struct SymbolizedStackBuilder *)ctx;
+ if (ssb->tail) {
+ ssb->tail->next = SymbolizedStack::New(ssb->addr);
+ ssb->tail = ssb->tail->next;
+ } else {
+ ssb->head = ssb->tail = SymbolizedStack::New(ssb->addr);
+ }
+ AddressInfo *info = &ssb->tail->info;
+ if (function_name) {
+ info->function = internal_strdup(function_name);
+ }
+ if (file) {
+ info->file = internal_strdup(file);
+ }
+ info->line = line;
+ info->column = column;
+}
+
+SymbolizedStack *SymbolizeCode(uptr addr) {
+ // Check if PC comes from non-native land.
+ if (addr & kExternalPCBit) {
+ SymbolizedStackBuilder ssb = {nullptr, nullptr, addr};
+ __tsan_symbolize_external_ex(addr, AddFrame, &ssb);
+ if (ssb.head)
+ return ssb.head;
+ // Legacy code: remove along with the declaration above
+ // once all clients using this API are gone.
+ // Declare static to not consume too much stack space.
+ // We symbolize reports in a single thread, so this is fine.
+ static char func_buf[1024];
+ static char file_buf[1024];
+ int line, col;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
+ }
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
+}
+
+ReportLocation *SymbolizeData(uptr addr) {
+ DataInfo info;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
+ return 0;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ internal_memcpy(&ent->global, &info, sizeof(info));
+ return ent;
+}
+
+void SymbolizeFlush() {
+ Symbolizer::GetOrInit()->Flush();
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_symbolize.h b/lib/tsan/tsan_symbolize.h
new file mode 100644
index 0000000000..7adaa04dc2
--- /dev/null
+++ b/lib/tsan/tsan_symbolize.h
@@ -0,0 +1,30 @@
+//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYMBOLIZE_H
+#define TSAN_SYMBOLIZE_H
+
+#include "tsan_defs.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+SymbolizedStack *SymbolizeCode(uptr addr);
+ReportLocation *SymbolizeData(uptr addr);
+void SymbolizeFlush();
+
+ReportStack *NewReportStackEntry(uptr addr);
+
+} // namespace __tsan
+
+#endif // TSAN_SYMBOLIZE_H
diff --git a/lib/tsan/tsan_sync.cpp b/lib/tsan/tsan_sync.cpp
new file mode 100644
index 0000000000..7f686dc5fc
--- /dev/null
+++ b/lib/tsan/tsan_sync.cpp
@@ -0,0 +1,296 @@
+//===-- tsan_sync.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
+}
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (!SANITIZER_GO) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(Processor *proc) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ atomic_store_relaxed(&flags, 0);
+
+ if (proc == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
+ }
+}
+
+MetaMap::MetaMap()
+ : block_alloc_("heap block allocator")
+ , sync_alloc_("sync allocator") {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
+}
+
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tag = 0;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
+}
+
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(proc, p, sz);
+ return sz;
+}
+
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+ bool has_something = false;
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ if (idx == 0) {
+ // Note: don't write to meta in this case -- the block can be huge.
+ continue;
+ }
+ *meta = 0;
+ has_something = true;
+ while (idx != 0) {
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
+ }
+ }
+ return has_something;
+}
+
+// ResetRange removes all meta objects from the range.
+// It is called for large mmap-ed regions. The function is best-effort wrt
+// freeing of meta objects, because we don't want to page in the whole range
+// which can be huge. The function probes pages one-by-one until it finds a page
+// without meta objects, at this point it stops freeing meta objects. Because
+// thread stacks grow top-down, we do the same starting from end as well.
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+ if (SANITIZER_GO) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do the optimization only for C/C++.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ if (sz <= 4 * kPageSize) {
+ // If the range is small, just do the normal free procedure.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ // First, round both ends of the range to page size.
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ FreeRange(proc, p, diff);
+ p += diff;
+ sz -= diff;
+ }
+ diff = p + sz - RoundDown(p + sz, kPageSize);
+ if (diff != 0) {
+ FreeRange(proc, p + sz - diff, diff);
+ sz -= diff;
+ }
+ // Now we must have a non-empty page-aligned range.
+ CHECK_GT(sz, 0);
+ CHECK_EQ(p, RoundUp(p, kPageSize));
+ CHECK_EQ(sz, RoundUp(sz, kPageSize));
+ const uptr p0 = p;
+ const uptr sz0 = sz;
+ // Probe start of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p, kPageSize);
+ p += kPageSize;
+ sz -= kPageSize;
+ if (!has_something && checked > (128 << 10))
+ break;
+ }
+ // Probe end of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ sz -= kPageSize;
+ // Stacks grow down, so sync object are most likely at the end of the region
+ // (if it is a stack). The very end of the stack is TLS and tsan increases
+ // TLS by at least 256K, so check at least 512K.
+ if (!has_something && checked > (512 << 10))
+ break;
+ }
+ // Finally, page out the whole range (including the parts that we've just
+ // freed). Note: we can't simply madvise, because we need to leave a zeroed
+ // range (otherwise __tsan_java_move can crash if it encounters a left-over
+ // meta objects in java heap).
+ uptr metap = (uptr)MemToMeta(p0);
+ uptr metasz = sz0 / kMetaRatio;
+ UnmapOrDie((void*)metap, metasz);
+ if (!MmapFixedNoReserve(metap, metasz))
+ Die();
+}
+
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
+}
+
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
+}
+
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
+}
+
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock, bool create) {
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr->proc());
+ sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
+ }
+ if (write_lock)
+ s->mtx.Lock();
+ else
+ s->mtx.ReadLock();
+ return s;
+ }
+ idx = s->next;
+ }
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
+ }
+
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
+ }
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
+ }
+ }
+}
+
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
+ }
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
+ }
+ }
+}
+
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/tsan_sync.h b/lib/tsan/tsan_sync.h
new file mode 100644
index 0000000000..47f2739d8d
--- /dev/null
+++ b/lib/tsan/tsan_sync.h
@@ -0,0 +1,145 @@
+//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYNC_H
+#define TSAN_SYNC_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "tsan_defs.h"
+#include "tsan_clock.h"
+#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
+// See documentation there as well.
+enum MutexFlags {
+ MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
+ MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
+ MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
+ MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
+ MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
+ MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
+ MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
+ MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+ MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static
+
+ // The following flags are runtime private.
+ // Mutex API misuse was detected, so don't report any more.
+ MutexFlagBroken = 1 << 30,
+ // We did not intercept pre lock event, so handle it on post lock.
+ MutexFlagDoPreLockOnPostLock = 1 << 29,
+ // Must list all mutex creation flags.
+ MutexCreationFlagMask = MutexFlagLinkerInit |
+ MutexFlagWriteReentrant |
+ MutexFlagReadReentrant |
+ MutexFlagNotStatic,
+};
+
+struct SyncVar {
+ SyncVar();
+
+ static const int kInvalidTid = -1;
+
+ uptr addr; // overwritten by DenseSlabAlloc freelist
+ Mutex mtx;
+ u64 uid; // Globally unique id.
+ u32 creation_stack_id;
+ int owner_tid; // Set only by exclusive owners.
+ u64 last_lock;
+ int recursion;
+ atomic_uint32_t flags;
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(Processor *proc);
+
+ u64 GetId() const {
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
+ }
+ bool CheckId(u64 uid) const {
+ CHECK_EQ(uid, GetLsb(uid, 14));
+ return GetLsb(this->uid, 14) == uid;
+ }
+ static uptr SplitId(u64 id, u64 *uid) {
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
+ }
+
+ bool IsFlagSet(u32 f) const {
+ return atomic_load_relaxed(&flags) & f;
+ }
+
+ void SetFlags(u32 f) {
+ atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
+ }
+
+ void UpdateFlags(u32 flagz) {
+ // Filter out operation flags.
+ if (!(flagz & MutexCreationFlagMask))
+ return;
+ u32 current = atomic_load_relaxed(&flags);
+ if (current & MutexCreationFlagMask)
+ return;
+ // Note: this can be called from MutexPostReadLock which holds only read
+ // lock on the SyncVar.
+ atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
+ }
+};
+
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
+ public:
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
+
+ SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock);
+ SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+
+ void MoveMemory(uptr src, uptr dst, uptr sz);
+
+ void OnProcIdle(Processor *proc);
+
+ private:
+ static const u32 kFlagMask = 3u << 30;
+ static const u32 kFlagBlock = 1u << 30;
+ static const u32 kFlagSync = 2u << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
+ atomic_uint64_t uid_gen_;
+
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_SYNC_H
diff --git a/lib/tsan/tsan_trace.h b/lib/tsan/tsan_trace.h
new file mode 100644
index 0000000000..fbd0f72db6
--- /dev/null
+++ b/lib/tsan/tsan_trace.h
@@ -0,0 +1,75 @@
+//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TRACE_H
+#define TSAN_TRACE_H
+
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+#include "tsan_stack_trace.h"
+#include "tsan_mutexset.h"
+
+namespace __tsan {
+
+const int kTracePartSizeBits = 13;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
+const int kTraceSize = kTracePartSize * kTraceParts;
+
+// Must fit into 3 bits.
+enum EventType {
+ EventTypeMop,
+ EventTypeFuncEnter,
+ EventTypeFuncExit,
+ EventTypeLock,
+ EventTypeUnlock,
+ EventTypeRLock,
+ EventTypeRUnlock
+};
+
+// Represents a thread event (from most significant bit):
+// u64 typ : 3; // EventType.
+// u64 addr : 61; // Associated pc.
+typedef u64 Event;
+
+const uptr kEventPCBits = 61;
+
+struct TraceHeader {
+#if !SANITIZER_GO
+ BufferedStackTrace stack0; // Start stack for the trace.
+#else
+ VarSizeStackTrace stack0;
+#endif
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+
+ TraceHeader() : stack0(), epoch0() {}
+};
+
+struct Trace {
+ Mutex mtx;
+#if !SANITIZER_GO
+ // Must be last to catch overflow as paging fault.
+ // Go shadow stack is dynamically allocated.
+ uptr shadow_stack[kShadowStackSize];
+#endif
+ // Must be the last field, because we unmap the unused part in
+ // CreateThreadContext.
+ TraceHeader headers[kTraceParts];
+
+ Trace()
+ : mtx(MutexTypeTrace, StatMtxTrace) {
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_TRACE_H
diff --git a/lib/tsan/tsan_update_shadow_word_inl.h b/lib/tsan/tsan_update_shadow_word_inl.h
new file mode 100644
index 0000000000..056c3aa203
--- /dev/null
+++ b/lib/tsan/tsan_update_shadow_word_inl.h
@@ -0,0 +1,69 @@
+//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Body of the hottest inner loop.
+// If we wrap this body into a function, compilers (both gcc and clang)
+// produce sligtly less efficient code.
+//===----------------------------------------------------------------------===//
+do {
+ StatInc(thr, StatShadowProcessed);
+ const unsigned kAccessSize = 1 << kAccessSizeLog;
+ u64 *sp = &shadow_mem[idx];
+ old = LoadShadow(sp);
+ if (LIKELY(old.IsZero())) {
+ StatInc(thr, StatShadowZero);
+ if (!stored) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ // is the memory access equal to the previous?
+ if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
+ StatInc(thr, StatShadowSameSize);
+ // same thread?
+ if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
+ StatInc(thr, StatShadowSameThread);
+ if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (HappensBefore(old, thr)) {
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ if (LIKELY(old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)))
+ break;
+ goto RACE;
+ }
+ // Do the memory access intersect?
+ if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
+ StatInc(thr, StatShadowIntersect);
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
+ break;
+ if (LIKELY(HappensBefore(old, thr)))
+ break;
+ goto RACE;
+ }
+ // The accesses do not intersect.
+ StatInc(thr, StatShadowNotIntersect);
+ break;
+} while (0);
diff --git a/lib/tsan/ubsan/ubsan_flags.h b/lib/tsan/ubsan/ubsan_flags.h
new file mode 100644
index 0000000000..daa0d7c701
--- /dev/null
+++ b/lib/tsan/ubsan/ubsan_flags.h
@@ -0,0 +1,48 @@
+//===-- ubsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAGS_H
+#define UBSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+class FlagParser;
+}
+
+namespace __ubsan {
+
+struct Flags {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags ubsan_flags;
+inline Flags *flags() { return &ubsan_flags; }
+
+void InitializeFlags();
+void RegisterUbsanFlags(FlagParser *parser, Flags *f);
+
+const char *MaybeCallUbsanDefaultOptions();
+
+} // namespace __ubsan
+
+extern "C" {
+// Users may provide their own implementation of __ubsan_default_options to
+// override the default flag values.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options();
+} // extern "C"
+
+#endif // UBSAN_FLAGS_H
diff --git a/lib/tsan/ubsan/ubsan_flags.inc b/lib/tsan/ubsan/ubsan_flags.inc
new file mode 100644
index 0000000000..a4d0e6109e
--- /dev/null
+++ b/lib/tsan/ubsan/ubsan_flags.inc
@@ -0,0 +1,28 @@
+//===-- ubsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// UBSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAG
+# error "Define UBSAN_FLAG prior to including this file!"
+#endif
+
+// UBSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+UBSAN_FLAG(bool, halt_on_error, false,
+ "Crash the program after printing the first error report")
+UBSAN_FLAG(bool, print_stacktrace, false,
+ "Include full stacktrace into an error report")
+UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+UBSAN_FLAG(bool, report_error_type, false,
+ "Print specific error type instead of 'undefined-behavior' in summary.")
+UBSAN_FLAG(bool, silence_unsigned_overflow, false,
+ "Do not print non-fatal error reports for unsigned integer overflow. "
+ "Used to provide fuzzing signal without blowing up logs.")
diff --git a/lib/tsan/ubsan/ubsan_init.h b/lib/tsan/ubsan/ubsan_init.h
new file mode 100644
index 0000000000..0510385b13
--- /dev/null
+++ b/lib/tsan/ubsan/ubsan_init.h
@@ -0,0 +1,33 @@
+//===-- ubsan_init.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization function for UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_INIT_H
+#define UBSAN_INIT_H
+
+namespace __ubsan {
+
+// Get the full tool name for UBSan.
+const char *GetSanititizerToolName();
+
+// Initialize UBSan as a standalone tool. Typically should be called early
+// during initialization.
+void InitAsStandalone();
+
+// Initialize UBSan as a standalone tool, if it hasn't been initialized before.
+void InitAsStandaloneIfNecessary();
+
+// Initializes UBSan as a plugin tool. This function should be called once
+// from "parent tool" (e.g. ASan) initialization.
+void InitAsPlugin();
+
+} // namespace __ubsan
+
+#endif // UBSAN_INIT_H
diff --git a/lib/tsan/ubsan/ubsan_platform.h b/lib/tsan/ubsan/ubsan_platform.h
new file mode 100644
index 0000000000..71d7fb18c9
--- /dev/null
+++ b/lib/tsan/ubsan/ubsan_platform.h
@@ -0,0 +1,25 @@
+//===-- ubsan_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the platforms which UBSan is supported at.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_PLATFORM_H
+#define UBSAN_PLATFORM_H
+
+// Other platforms should be easy to add, and probably work as-is.
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__) || \
+ (defined(__sun__) && defined(__svr4__)) || \
+ defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__)
+# define CAN_SANITIZE_UB 1
+#else
+# define CAN_SANITIZE_UB 0
+#endif
+
+#endif