aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/tsan
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/tsan')
-rw-r--r--libsanitizer/tsan/Makefile.am1
-rw-r--r--libsanitizer/tsan/Makefile.in3
-rw-r--r--libsanitizer/tsan/tsan_defs.h2
-rw-r--r--libsanitizer/tsan/tsan_fd.cc9
-rw-r--r--libsanitizer/tsan/tsan_flags.cc1
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc370
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.cc4
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc2
-rw-r--r--libsanitizer/tsan/tsan_interface_java.cc2
-rw-r--r--libsanitizer/tsan/tsan_mman.cc16
-rw-r--r--libsanitizer/tsan/tsan_mman.h4
-rw-r--r--libsanitizer/tsan/tsan_platform.h294
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc149
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cc22
-rw-r--r--libsanitizer/tsan/tsan_platform_windows.cc4
-rw-r--r--libsanitizer/tsan/tsan_report.cc53
-rw-r--r--libsanitizer/tsan/tsan_report.h28
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc35
-rw-r--r--libsanitizer/tsan/tsan_rtl.h31
-rw-r--r--libsanitizer/tsan/tsan_rtl_amd64.S22
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc22
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc174
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.cc108
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.h39
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc19
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc65
-rw-r--r--libsanitizer/tsan/tsan_trace.h18
27 files changed, 828 insertions, 669 deletions
diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am
index ab4748e0878..c5325905154 100644
--- a/libsanitizer/tsan/Makefile.am
+++ b/libsanitizer/tsan/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in
index c6f1ecffa75..93ffc9e82dc 100644
--- a/libsanitizer/tsan/Makefile.in
+++ b/libsanitizer/tsan/Makefile.in
@@ -276,7 +276,8 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index a8528cbb44a..076de73c9be 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -41,7 +41,6 @@ const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
-const uptr kTraceStackSize = 256;
#ifdef TSAN_SHADOW_COUNT
# if TSAN_SHADOW_COUNT == 2 \
@@ -172,7 +171,6 @@ struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
-class StackTrace;
// Descriptor of user's memory block.
struct MBlock {
diff --git a/libsanitizer/tsan/tsan_fd.cc b/libsanitizer/tsan/tsan_fd.cc
index a74a668c13b..10582035f24 100644
--- a/libsanitizer/tsan/tsan_fd.cc
+++ b/libsanitizer/tsan/tsan_fd.cc
@@ -46,7 +46,8 @@ static bool bogusfd(int fd) {
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
- FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync));
+ FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
+ false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -63,7 +64,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- user_free(thr, pc, s);
+ user_free(thr, pc, s, false);
}
}
}
@@ -76,13 +77,13 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size);
+ void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
- user_free(thr, pc, p);
+ user_free(thr, pc, p, false);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index 02bf6527e6a..6059f2771ba 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -97,6 +97,7 @@ void InitializeFlags(Flags *f, const char *env) {
cf->allow_addr2line = true;
cf->detect_deadlocks = true;
cf->print_suppressions = false;
+ cf->stack_trace_format = " #%n %f %S %M";
// Let a frontend override.
ParseFlags(f, __tsan_default_options());
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index b49622b3ad4..7f1b6e45a8d 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -27,6 +27,16 @@
using namespace __tsan; // NOLINT
+#if SANITIZER_FREEBSD
+#define __errno_location __error
+#define __libc_malloc __malloc
+#define __libc_realloc __realloc
+#define __libc_calloc __calloc
+#define __libc_free __free
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
const int kSigCount = 65;
struct my_siginfo_t {
@@ -60,7 +70,9 @@ extern "C" void *__libc_malloc(uptr size);
extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
+#if !SANITIZER_FREEBSD
extern "C" int mallopt(int param, int value);
+#endif
extern __sanitizer_FILE *stdout, *stderr;
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
@@ -91,19 +103,19 @@ typedef void (*sighandler_t)(int sig);
#define errno (*__errno_location())
-// 16K loaded modules should be enough for everyone.
-static const uptr kMaxModules = 1 << 14;
-static LoadedModule *modules;
-static uptr nmodules;
-
struct sigaction_t {
union {
sighandler_t sa_handler;
void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
__sanitizer_sigset_t sa_mask;
int sa_flags;
void (*sa_restorer)();
+#endif
};
const sighandler_t SIG_DFL = (sighandler_t)0;
@@ -202,7 +214,7 @@ ScopedInterceptor::~ScopedInterceptor() {
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -218,7 +230,11 @@ ScopedInterceptor::~ScopedInterceptor() {
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
-#define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
@@ -271,63 +287,24 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
return res;
}
-class AtExitContext {
- public:
- AtExitContext()
- : mtx_(MutexTypeAtExit, StatMtxAtExit)
- , stack_(MBlockAtExit) {
- }
-
- typedef void(*atexit_cb_t)();
-
- int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
- atexit_cb_t f, void *arg, void *dso) {
- Lock l(&mtx_);
- Release(thr, pc, (uptr)this);
- atexit_t *a = stack_.PushBack();
- a->cb = f;
- a->arg = arg;
- a->dso = dso;
- a->is_on_exit = is_on_exit;
- return 0;
- }
-
- void exit(ThreadState *thr, uptr pc) {
- for (;;) {
- atexit_t a = {};
- {
- Lock l(&mtx_);
- if (stack_.Size() != 0) {
- a = stack_[stack_.Size() - 1];
- stack_.PopBack();
- Acquire(thr, pc, (uptr)this);
- }
- }
- if (a.cb == 0)
- break;
- VPrintf(2, "#%d: executing atexit func %p(%p) dso=%p\n",
- thr->tid, a.cb, a.arg, a.dso);
- if (a.is_on_exit)
- ((void(*)(int status, void *arg))a.cb)(0, a.arg);
- else
- ((void(*)(void *arg, void *dso))a.cb)(a.arg, a.dso);
- }
- }
-
- private:
- struct atexit_t {
- atexit_cb_t cb;
- void *arg;
- void *dso;
- bool is_on_exit;
- };
-
- static const int kMaxAtExit = 1024;
- Mutex mtx_;
- Vector<atexit_t> stack_;
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
};
-static AtExitContext *atexit_ctx;
+static void at_exit_wrapper(void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ __libc_free(ctx);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
@@ -335,40 +312,51 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
SCOPED_INTERCEPTOR_RAW(atexit, f);
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0, 0);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
}
-TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg, 0);
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
}
-bool IsSaticModule(void *dso) {
- if (modules == 0)
- return false;
- for (uptr i = 0; i < nmodules; i++) {
- if (modules[i].containsAddress((uptr)dso))
- return true;
- }
- return false;
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
}
-TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ __libc_free(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- // If it's the main executable or a statically loaded library,
- // we will call the callback.
- if (dso == 0 || IsSaticModule(dso))
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg, dso);
-
- // Dynamically load module, don't know when to call the callback for it.
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
ThreadIgnoreBegin(thr, pc);
- int res = REAL(__cxa_atexit)(f, arg, dso);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
ThreadIgnoreEnd(thr, pc);
return res;
}
@@ -406,7 +394,11 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
}
static void LongJmp(ThreadState *thr, uptr *env) {
+#if SANITIZER_FREEBSD
+ uptr mangled_sp = env[2];
+#else
uptr mangled_sp = env[6];
+#endif // SANITIZER_FREEBSD
// Find the saved buf by mangled_sp.
for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
JmpBuf *buf = &thr->jmp_bufs[i];
@@ -751,6 +743,7 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
int flags, int fd, u64 off) {
SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
@@ -764,6 +757,10 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
}
return res;
}
+#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
+#else
+#define TSAN_MAYBE_INTERCEPT_MMAP64
+#endif
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
@@ -772,10 +769,15 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
@@ -787,11 +789,16 @@ TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
sz = RoundUp(sz, GetPageSizeCached());
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
@@ -863,11 +870,13 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadState *thr = cur_thread();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(g_thread_finalize_key,
(void *)kPthreadDestructorIterations)) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
+ ThreadIgnoreEnd(thr, 0);
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
pthread_yield();
atomic_store(&p->tid, 0, memory_order_release);
@@ -1336,73 +1345,135 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
return REAL(__xstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT
+#endif
TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
+ return REAL(stat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
return REAL(__xstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
return REAL(__xstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
return REAL(__xstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_STAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
return REAL(__lxstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
+ return REAL(lstat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
return REAL(__lxstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
return REAL(__lxstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
return REAL(__lxstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_LSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(0, fd, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(0, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
@@ -1412,6 +1483,7 @@ TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
int fd = REAL(open64)(name, flags, mode);
@@ -1419,6 +1491,10 @@ TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
@@ -1428,6 +1504,7 @@ TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
int fd = REAL(creat64)(name, mode);
@@ -1435,6 +1512,10 @@ TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
TSAN_INTERCEPTOR(int, dup, int oldfd) {
SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
@@ -1460,6 +1541,7 @@ TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
return newfd2;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
int fd = REAL(eventfd)(initval, flags);
@@ -1467,7 +1549,12 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
FdEventCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
if (fd >= 0)
@@ -1477,7 +1564,12 @@ TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
FdSignalCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init, int fake) {
SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
int fd = REAL(inotify_init)(fake);
@@ -1485,7 +1577,12 @@ TSAN_INTERCEPTOR(int, inotify_init, int fake) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
int fd = REAL(inotify_init1)(flags);
@@ -1493,6 +1590,10 @@ TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
@@ -1535,6 +1636,7 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create, int size) {
SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
int fd = REAL(epoll_create)(size);
@@ -1542,7 +1644,12 @@ TSAN_INTERCEPTOR(int, epoll_create, int size) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
int fd = REAL(epoll_create1)(flags);
@@ -1550,6 +1657,10 @@ TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
+#endif
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
@@ -1558,14 +1669,20 @@ TSAN_INTERCEPTOR(int, close, int fd) {
return REAL(close)(fd);
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __close, int fd) {
SCOPED_TSAN_INTERCEPTOR(__close, fd);
if (fd >= 0)
FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
// glibc guts
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
int fds[64];
@@ -1576,6 +1693,10 @@ TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
}
REAL(__res_iclose)(state, free_addr);
}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
@@ -1642,6 +1763,7 @@ TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
void *res = REAL(tmpfile64)(fake);
@@ -1652,6 +1774,10 @@ TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
}
return res;
}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
// libc file streams can call user-supplied functions, see fopencookie.
@@ -1698,6 +1824,7 @@ TSAN_INTERCEPTOR(void*, opendir, char *path) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1709,7 +1836,12 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1719,6 +1851,10 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
namespace __tsan {
@@ -1746,12 +1882,12 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// from rtl_generic_sighandler) we have not yet received the reraised
// signal; and it looks too fragile to intercept all ways to reraise a signal.
if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
- __tsan::StackTrace stack;
- stack.ObtainCurrent(thr, pc);
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack, true);
+ rep.AddStack(stack, true);
OutputReport(thr, rep);
}
}
@@ -2226,8 +2362,6 @@ namespace __tsan {
static void finalize(void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- atexit_ctx->exit(thr, pc);
int status = Finalize(thr);
// Make sure the output is not lost.
// Flushing all the streams here may freeze the process if a child thread is
@@ -2250,8 +2384,10 @@ void InitializeInterceptors() {
REAL(memcmp) = internal_memcmp;
// Instruct libc malloc to consume less memory.
+#if !SANITIZER_FREEBSD
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
InitializeCommonInterceptors();
@@ -2273,11 +2409,11 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(free);
TSAN_INTERCEPT(cfree);
TSAN_INTERCEPT(mmap);
- TSAN_INTERCEPT(mmap64);
+ TSAN_MAYBE_INTERCEPT_MMAP64;
TSAN_INTERCEPT(munmap);
- TSAN_INTERCEPT(memalign);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(valloc);
- TSAN_INTERCEPT(pvalloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
TSAN_INTERCEPT(strlen);
@@ -2340,38 +2476,38 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sem_getvalue);
TSAN_INTERCEPT(stat);
- TSAN_INTERCEPT(__xstat);
- TSAN_INTERCEPT(stat64);
- TSAN_INTERCEPT(__xstat64);
+ TSAN_MAYBE_INTERCEPT___XSTAT;
+ TSAN_MAYBE_INTERCEPT_STAT64;
+ TSAN_MAYBE_INTERCEPT___XSTAT64;
TSAN_INTERCEPT(lstat);
- TSAN_INTERCEPT(__lxstat);
- TSAN_INTERCEPT(lstat64);
- TSAN_INTERCEPT(__lxstat64);
+ TSAN_MAYBE_INTERCEPT___LXSTAT;
+ TSAN_MAYBE_INTERCEPT_LSTAT64;
+ TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
- TSAN_INTERCEPT(__fxstat);
- TSAN_INTERCEPT(fstat64);
- TSAN_INTERCEPT(__fxstat64);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
- TSAN_INTERCEPT(open64);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
- TSAN_INTERCEPT(creat64);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
TSAN_INTERCEPT(dup);
TSAN_INTERCEPT(dup2);
TSAN_INTERCEPT(dup3);
- TSAN_INTERCEPT(eventfd);
- TSAN_INTERCEPT(signalfd);
- TSAN_INTERCEPT(inotify_init);
- TSAN_INTERCEPT(inotify_init1);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
TSAN_INTERCEPT(socket);
TSAN_INTERCEPT(socketpair);
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_INTERCEPT(epoll_create);
- TSAN_INTERCEPT(epoll_create1);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
TSAN_INTERCEPT(close);
- TSAN_INTERCEPT(__close);
- TSAN_INTERCEPT(__res_iclose);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
@@ -2381,7 +2517,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(unlink);
TSAN_INTERCEPT(tmpfile);
- TSAN_INTERCEPT(tmpfile64);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
TSAN_INTERCEPT(abort);
@@ -2389,8 +2525,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(opendir);
- TSAN_INTERCEPT(epoll_ctl);
- TSAN_INTERCEPT(epoll_wait);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
+ TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
@@ -2413,9 +2549,6 @@ void InitializeInterceptors() {
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
- atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
- AtExitContext();
-
if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
@@ -2427,11 +2560,6 @@ void InitializeInterceptors() {
}
FdInit();
-
- // Remember list of loaded libraries for atexit interceptors.
- modules = (LoadedModule*)MmapOrDie(sizeof(*modules)*kMaxModules,
- "LoadedModule");
- nmodules = GetListOfModules(modules, kMaxModules, 0);
}
void *internal_start_thread(void(*func)(void *arg), void *arg) {
diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc
index 85de2e6221f..b44e56898d6 100644
--- a/libsanitizer/tsan/tsan_interface_ann.cc
+++ b/libsanitizer/tsan/tsan_interface_ann.cc
@@ -52,7 +52,7 @@ class ScopedAnnotation {
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -452,4 +452,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 316614dd486..15fc21c4627 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -472,7 +472,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#define SCOPED_ATOMIC(func, ...) \
const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ uptr pc = StackTrace::GetCurrentPc(); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
ThreadState *const thr = cur_thread(); \
if (thr->ignore_interceptors) \
diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc
index 03a9e280ddc..6e1ec6de359 100644
--- a/libsanitizer/tsan/tsan_interface_java.cc
+++ b/libsanitizer/tsan/tsan_interface_java.cc
@@ -59,7 +59,7 @@ static JavaContext *jctx;
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \
/**/
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index 2eae60de512..d89610a2cc3 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -64,17 +64,17 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
!flags()->report_signal_unsafe)
return;
- StackTrace stack;
- stack.ObtainCurrent(thr, pc);
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack, true);
+ rep.AddStack(stack, true);
OutputReport(thr, rep);
}
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return AllocatorReturnNull();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
@@ -82,15 +82,17 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
return 0;
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
- SignalUnsafeCall(thr, pc);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
return p;
}
-void user_free(ThreadState *thr, uptr pc, void *p) {
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
- SignalUnsafeCall(thr, pc);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
}
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h
index ab8eb83f919..fe020af9d2d 100644
--- a/libsanitizer/tsan/tsan_mman.h
+++ b/libsanitizer/tsan/tsan_mman.h
@@ -24,9 +24,9 @@ void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment);
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
-void user_free(ThreadState *thr, uptr pc, void *p);
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
uptr user_alloc_usable_size(const void *p);
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index b7a6376e8ed..35837d2132a 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -10,124 +10,223 @@
// Platform-specific code.
//===----------------------------------------------------------------------===//
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !defined(TSAN_GO)
+
/*
-C++ linux memory layout:
-0000 0000 0000 - 03c0 0000 0000: protected
-03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: protected
+C/C++ on linux and freebsd
+0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
+0100 0000 0000 - 0200 0000 0000: -
+0200 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: protected
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
-Go linux and darwin memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x020000000000ull;
+const uptr kShadowEnd = 0x100000000000ull;
+const uptr kHeapMemBeg = 0x7d0000000000ull;
+const uptr kHeapMemEnd = 0x7e0000000000ull;
+const uptr kLoAppMemBeg = 0x000000001000ull;
+const uptr kLoAppMemEnd = 0x010000000000ull;
+const uptr kHiAppMemBeg = 0x7e8000000000ull;
+const uptr kHiAppMemEnd = 0x800000000000ull;
+const uptr kAppMemMsk = 0x7c0000000000ull;
+const uptr kAppMemXor = 0x020000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
+ (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
+ (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
+ ^ kAppMemXor) * kShadowCnt;
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
+ ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
+ return (s / kShadowCnt) ^ kAppMemXor;
+ else
+ return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
+}
+
+static USED uptr UserRegions[] = {
+ kLoAppMemBeg, kLoAppMemEnd,
+ kHiAppMemBeg, kHiAppMemEnd,
+ kHeapMemBeg, kHeapMemEnd,
+};
+
+#elif defined(TSAN_GO) && !SANITIZER_WINDOWS
+
+/* Go on linux, darwin and freebsd
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 2000 0000 0000: -
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7fff ffff ffff: -
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x200000000000ull;
+const uptr kShadowEnd = 0x238000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+}
-Go windows memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ return (s & ~kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#elif defined(TSAN_GO) && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0560 0000 0000: shadow
+0100 0000 0000 - 0380 0000 0000: shadow
+0380 0000 0000 - 0560 0000 0000: -
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
-07d0 0000 0000 - 07ff ffff ffff: -
+07d0 0000 0000 - 8000 0000 0000: -
*/
-#ifndef TSAN_PLATFORM_H
-#define TSAN_PLATFORM_H
-
-#include "tsan_defs.h"
-#include "tsan_trace.h"
-
-#if defined(__LP64__) || defined(_WIN64)
-namespace __tsan {
-
-#if defined(TSAN_GO)
-static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
-# if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-static const uptr kMetaShadow = 0x076000000000ULL;
-static const uptr kMetaSize = 0x007000000000ULL;
-# else // if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-static const uptr kMetaShadow = 0x300000000000ULL;
-static const uptr kMetaSize = 0x100000000000ULL;
-# endif // if SANITIZER_WINDOWS
-#else // defined(TSAN_GO)
-static const uptr kMetaShadow = 0x300000000000ULL;
-static const uptr kMetaSize = 0x100000000000ULL;
-static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#endif
+const uptr kMetaShadowBeg = 0x076000000000ull;
+const uptr kMetaShadowEnd = 0x07d000000000ull;
+const uptr kTraceMemBeg = 0x056000000000ull;
+const uptr kTraceMemEnd = 0x076000000000ull;
+const uptr kShadowBeg = 0x010000000000ull;
+const uptr kShadowEnd = 0x038000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
-#if SANITIZER_WINDOWS
-const uptr kTraceMemBegin = 0x056000000000ULL;
-#else
-const uptr kTraceMemBegin = 0x600000000000ULL;
-#endif
-const uptr kTraceMemSize = 0x020000000000ULL;
-
-// This has to be a macro to allow constant initialization of constants below.
-#ifndef TSAN_GO
-#define MemToShadow(addr) \
- ((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
-#define MemToMeta(addr) \
- (u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \
- / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
-#else
-#define MemToShadow(addr) \
- (((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
-#define MemToMeta(addr) \
- (u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \
- / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
-#endif
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
-static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
-static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | 0xff;
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
-static inline bool IsAppMem(uptr mem) {
-#if defined(TSAN_GO)
- return mem <= kLinuxAppMemEnd;
-#else
- return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
-#endif
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
}
-static inline bool IsShadowMem(uptr mem) {
- return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd);
}
-static inline uptr ShadowToMem(uptr shadow) {
- CHECK(IsShadowMem(shadow));
-#ifdef TSAN_GO
- return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
-#else
- return (shadow / kShadowCnt) | kLinuxAppMemMsk;
-#endif
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
+ return (x & ~kShadowBeg) / kShadowCnt;
}
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
-uptr GetRSS();
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
-void InitializePlatform();
-void FinalizePlatform();
+#else
+# error "Unknown platform"
+#endif
// The additional page is to catch shadow stack overflow as paging fault.
// Windows wants 64K alignment for mmaps.
@@ -135,18 +234,23 @@ const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
+void InitializePlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+uptr GetRSS();
+
void *internal_start_thread(void(*func)(void*), void *arg);
void internal_join_thread(void *th);
@@ -162,8 +266,4 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
} // namespace __tsan
-#else // defined(__LP64__) || defined(_WIN64)
-# error "Only 64-bit is supported"
-#endif
-
#endif // TSAN_PLATFORM_H
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index ba81fd242e3..32591316ea2 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -7,7 +7,7 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
@@ -60,6 +61,9 @@ void *__libc_stack_end = 0;
namespace __tsan {
+static uptr g_data_start;
+static uptr g_data_end;
+
const uptr kPageSize = 4096;
enum {
@@ -74,22 +78,26 @@ enum {
MemCount = 8,
};
-void FillProfileCallback(uptr start, uptr rss, bool file,
+void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) {
mem[MemTotal] += rss;
- start >>= 40;
- if (start < 0x10)
+ if (p >= kShadowBeg && p < kShadowEnd)
mem[MemShadow] += rss;
- else if (start >= 0x20 && start < 0x30)
- mem[file ? MemFile : MemMmap] += rss;
- else if (start >= 0x30 && start < 0x40)
+ else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
mem[MemMeta] += rss;
- else if (start >= 0x7e)
+#ifndef TSAN_GO
+ else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+ mem[MemHeap] += rss;
+ else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
mem[file ? MemFile : MemMmap] += rss;
- else if (start >= 0x60 && start < 0x62)
+ else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+#else
+ else if (p >= kAppMemBeg && p < kAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+#endif
+ else if (p >= kTraceMemBeg && p < kTraceMemEnd)
mem[MemTrace] += rss;
- else if (start >= 0x7d && start < 0x7e)
- mem[MemHeap] += rss;
else
mem[MemOther] += rss;
}
@@ -97,12 +105,14 @@ void FillProfileCallback(uptr start, uptr rss, bool file,
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+ StackDepotStats *stacks = StackDepotGetStats();
internal_snprintf(buf, buf_size,
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
- " trace:%zd heap:%zd other:%zd nthr=%zd/%zd\n",
+ " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ stacks->allocated >> 20, stacks->n_uniq_ids,
nlive, nthread);
}
@@ -137,7 +147,7 @@ uptr GetRSS() {
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+ FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
}
#endif
@@ -218,12 +228,12 @@ static void MapRodata() {
void InitializeShadowMemory() {
// Map memory shadow.
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
+ "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
Die();
}
// This memory range is used for thread stacks and large user mmaps.
@@ -235,78 +245,42 @@ void InitializeShadowMemory() {
0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
#endif
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
// Map meta shadow.
- if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) {
- Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
- kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow);
- Die();
- }
- if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
- Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
- kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
- Die();
- }
- uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
- if (meta != kMetaShadow) {
+ uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
+ uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size);
+ if (meta != kMetaShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", meta, kMetaShadow);
+ "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
Die();
}
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
- kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30);
-
- // Protect gaps.
- const uptr kClosedLowBeg = 0x200000;
- const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
- const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
- kMetaShadow);
-
- ProtectRange(kClosedLowBeg, kClosedLowEnd);
- ProtectRange(kClosedMidBeg, kClosedMidEnd);
- VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
- kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
- kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- VPrintf(2, "stack: %zx\n", (uptr)&shadow);
+ meta, meta + meta_size, meta_size >> 30);
MapRodata();
}
-#endif
-
-static uptr g_data_start;
-static uptr g_data_end;
-
-#ifndef TSAN_GO
-static void CheckPIE() {
- // Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps(true);
- uptr start, end;
- if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
- "something is mapped at 0x%zx < 0x%zx)\n",
- start, kLinuxAppMemBeg);
- Printf("FATAL: Make sure to compile with -fPIE"
- " and to link with -pie.\n");
- Die();
- }
- }
-}
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
+#if SANITIZER_FREEBSD
+ // On FreeBSD BSS is usually the last block allocated within the
+ // low range and heap is the last block allocated within the range
+ // 0x800000000-0x8ffffffff.
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
+ DPrintf("%p-%p %p %s\n", start, end, offset, name);
+ if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
+ name[0] == '\0') {
+ g_data_start = start;
+ g_data_end = end;
+ }
+ }
+#else
bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
@@ -322,12 +296,35 @@ static void InitDataSeg() {
g_data_end = end;
prev_is_data = is_data;
}
+#endif
DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end);
}
+static void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr p, end;
+ while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) {
+ if (IsAppMem(p))
+ continue;
+ if (p >= kHeapMemEnd &&
+ p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
+ continue;
+ if (p >= 0xf000000000000000ull) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Die();
+ }
+
+ ProtectRange(kLoAppMemEnd, kShadowBeg);
+ ProtectRange(kShadowEnd, kMetaShadowBeg);
+ ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ ProtectRange(kTraceMemEnd, kHeapMemBeg);
+ ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
+}
#endif // #ifndef TSAN_GO
void InitializePlatform() {
@@ -363,7 +360,7 @@ void InitializePlatform() {
}
#ifndef TSAN_GO
- CheckPIE();
+ CheckAndProtect();
InitTlsSize();
InitDataSeg();
#endif
@@ -426,4 +423,4 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
} // namespace __tsan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc
index 95527c79431..2091318a674 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cc
+++ b/libsanitizer/tsan/tsan_platform_mac.cc
@@ -54,20 +54,20 @@ uptr GetRSS() {
#ifndef TSAN_GO
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie.\n");
Die();
}
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("kShadow %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+ DPrintf("kAppMem %zx-%zx (%zuGB)\n",
+ kAppMemBeg, kAppMemEnd,
+ (kAppMemEnd - kAppMemBeg) >> 30);
}
#endif
@@ -75,10 +75,6 @@ void InitializePlatform() {
DisableCoreDumperIfNecessary();
}
-void FinalizePlatform() {
- fflush(0);
-}
-
#ifndef TSAN_GO
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc
index d6e9e6d33f4..3f81c54e3eb 100644
--- a/libsanitizer/tsan/tsan_platform_windows.cc
+++ b/libsanitizer/tsan/tsan_platform_windows.cc
@@ -36,10 +36,6 @@ uptr GetRSS() {
void InitializePlatform() {
}
-void FinalizePlatform() {
- fflush(0);
-}
-
} // namespace __tsan
#endif // SANITIZER_WINDOWS
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index b14856cd153..6feab81383f 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -11,10 +11,30 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
namespace __tsan {
+ReportStack::ReportStack() : next(nullptr), info(), suppressable(false) {}
+
+ReportStack *ReportStack::New(uptr addr) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ ReportStack *res = new(mem) ReportStack();
+ res->info.address = addr;
+ return res;
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
@@ -68,6 +88,8 @@ static const char *ReportTypeString(ReportType typ) {
return "data race on vptr (ctor/dtor vs virtual call)";
if (typ == ReportTypeUseAfterFree)
return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree)
+ return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
@@ -94,14 +116,11 @@ void PrintStack(const ReportStack *ent) {
Printf(" [failed to restore the stack]\n\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
- if (ent->col)
- Printf(":%d", ent->col);
- if (ent->module && ent->offset)
- Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
- else
- Printf(" (%p)\n", (void*)ent->pc);
+ for (int i = 0; ent && ent->info.address; ent = ent->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, ent->info,
+ common_flags()->strip_path_prefix, "__interceptor_");
+ Printf("%s\n", res.data());
}
Printf("\n");
}
@@ -143,12 +162,15 @@ static void PrintLocation(const ReportLocation *loc) {
bool print_stack = false;
Printf("%s", d.Location());
if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- loc->name, loc->size, loc->addr, loc->module, loc->offset);
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->size, loc->addr, thread_name(thrbuf, loc->tid));
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
@@ -301,8 +323,10 @@ void PrintReport(const ReportDesc *rep) {
if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
- if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
- ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+ if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) {
+ const AddressInfo &info = ent->info;
+ ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
+ }
Printf("==================\n");
}
@@ -317,8 +341,9 @@ void PrintStack(const ReportStack *ent) {
return;
}
for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" %s()\n %s:%d +0x%zx\n",
- ent->func, ent->file, ent->line, (void*)ent->offset);
+ const AddressInfo &info = ent->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line,
+ (void *)info.module_offset);
}
}
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index 0bde59b1675..3d99c7a7c24 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -11,6 +11,7 @@
#ifndef TSAN_REPORT_H
#define TSAN_REPORT_H
+#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
#include "tsan_vector.h"
@@ -20,6 +21,7 @@ enum ReportType {
ReportTypeRace,
ReportTypeVptrRace,
ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock,
@@ -33,14 +35,12 @@ enum ReportType {
struct ReportStack {
ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
+ AddressInfo info;
bool suppressable;
+ static ReportStack *New(uptr addr);
+
+ private:
+ ReportStack();
};
struct ReportMopMutex {
@@ -70,17 +70,17 @@ enum ReportLocationType {
struct ReportLocation {
ReportLocationType type;
- uptr addr;
- uptr size;
- char *module;
- uptr offset;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
int tid;
int fd;
- char *name;
- char *file;
- int line;
bool suppressable;
ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
};
struct ReportThread {
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index f5942bcaa3e..bf971b62da4 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -259,8 +259,8 @@ void MapShadow(uptr addr, uptr size) {
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBegin);
- CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ CHECK_GE(addr, kTraceMemBeg);
+ CHECK_LE(addr + size, kTraceMemEnd);
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) {
@@ -270,6 +270,28 @@ void MapThreadTrace(uptr addr, uptr size) {
}
}
+static void CheckShadowMapping() {
+ for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
+ const uptr beg = UserRegions[i];
+ const uptr end = UserRegions[i + 1];
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -1; x <= 1; x++) {
+ const uptr p = p0 + x;
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ VPrintf(3, " checking pointer %p -> %p\n", p, s);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ const uptr m = (uptr)MemToMeta(p);
+ CHECK(IsMetaMem(m));
+ }
+ }
+ }
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
@@ -289,6 +311,7 @@ void Initialize(ThreadState *thr) {
InitializeAllocator();
#endif
InitializeInterceptors();
+ CheckShadowMapping();
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
@@ -437,8 +460,8 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(thr->shadow_stack,
- thr->shadow_stack_pos - thr->shadow_stack);
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
if (pc != 0)
thr->shadow_stack_pos--;
return id;
@@ -451,7 +474,7 @@ void TraceSwitch(ThreadState *thr) {
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
- hdr->stack0.ObtainCurrent(thr, 0);
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
hdr->mset0 = thr->mset;
thr->nomalloc--;
}
@@ -690,6 +713,8 @@ ALWAYS_INLINE
bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads.
DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
return res;
#else
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index c7ea94dfbde..3f7873165cd 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -51,11 +51,8 @@
namespace __tsan {
#ifndef TSAN_GO
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -499,9 +496,9 @@ class ScopedReport {
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+ void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
const MutexSet *mset);
- void AddStack(const StackTrace *stack, bool suppressable = false);
+ void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
void AddThread(int unique_tid, bool suppressable = false);
void AddUniqueTid(int unique_tid);
@@ -525,7 +522,20 @@ class ScopedReport {
void operator = (const ScopedReport&);
};
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset);
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+}
+
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
@@ -552,9 +562,8 @@ void ForkChildAfter(ThreadState *thr, uptr pc);
void ReportRace(ThreadState *thr);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace);
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
bool FrameIsInternal(const ReportStack *frame);
@@ -575,7 +584,7 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent);
u32 CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(u32 stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
-void PrintCurrentStackSlow(); // uses libunwind
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 71a2ecda9dd..6df36a500a7 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -170,10 +170,15 @@ setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -197,10 +202,15 @@ _setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -231,10 +241,15 @@ sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -272,10 +287,15 @@ __sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -296,7 +316,7 @@ __sigsetjmp:
CFI_ENDPROC
.size __sigsetjmp, .-__sigsetjmp
-#ifdef __linux__
+#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index cc183138aba..d731b4bfb59 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -57,9 +57,9 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
rep.AddMutex(mid);
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace, true);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
rep.AddLocation(addr, 1);
OutputReport(thr, rep);
}
@@ -122,12 +122,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(mid);
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace);
FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(&trace, true);
+ rep.AddStack(trace, true);
rep.AddLocation(addr, 1);
OutputReport(thr, rep);
}
@@ -470,21 +470,17 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx);
}
- InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
uptr dummy_pc = 0x42;
for (int i = 0; i < r->n; i++) {
- uptr size;
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j];
if (stk) {
- const uptr *trace = StackDepotGet(stk, &size);
- stacks[i].Init(const_cast<uptr *>(trace), size);
+ rep.AddStack(StackDepotGet(stk), true);
} else {
// Sometimes we fail to extract the stack trace (FIXME: investigate),
// but we should still produce some stack trace in the report.
- stacks[i].Init(&dummy_pc, 1);
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
}
- rep.AddStack(&stacks[i], true);
}
}
OutputReport(thr, rep);
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index eafd1f4dfcd..11ec8381de6 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -28,7 +28,7 @@ namespace __tsan {
using namespace __sanitizer; // NOLINT
-static ReportStack *SymbolizeStack(const StackTrace& trace);
+static ReportStack *SymbolizeStack(StackTrace trace);
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
@@ -39,7 +39,7 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow();
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
Die();
}
@@ -57,27 +57,16 @@ bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
static void StackStripMain(ReportStack *stack) {
ReportStack *last_frame = 0;
ReportStack *last_frame2 = 0;
- const char *prefix = "__interceptor_";
- uptr prefix_len = internal_strlen(prefix);
- const char *path_prefix = common_flags()->strip_path_prefix;
- uptr path_prefix_len = internal_strlen(path_prefix);
- char *pos;
for (ReportStack *ent = stack; ent; ent = ent->next) {
- if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
- ent->func += prefix_len;
- if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
- ent->file = pos + path_prefix_len;
- if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
- ent->file += 2;
last_frame2 = last_frame;
last_frame = ent;
}
if (last_frame2 == 0)
return;
- const char *last = last_frame->func;
+ const char *last = last_frame->info.function;
#ifndef TSAN_GO
- const char *last2 = last_frame2->func;
+ const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
last_frame2->next = 0;
@@ -105,39 +94,36 @@ static void StackStripMain(ReportStack *stack) {
ReportStack *SymbolizeStackId(u32 stack_id) {
if (stack_id == 0)
return 0;
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(stack_id, &ssz);
- if (stack == 0)
- return 0;
- StackTrace trace;
- trace.Init(stack, ssz);
- return SymbolizeStack(trace);
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
}
-static ReportStack *SymbolizeStack(const StackTrace& trace) {
- if (trace.IsEmpty())
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
return 0;
ReportStack *stack = 0;
- for (uptr si = 0; si < trace.Size(); si++) {
- const uptr pc = trace.Get(si);
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
#ifndef TSAN_GO
// We obtain the return address, that is, address of the next instruction,
// so offset it by 1 byte.
- const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
+ const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
#else
// FIXME(dvyukov): Go sometimes uses address of a function as top pc.
uptr pc1 = pc;
- if (si != trace.Size() - 1)
+ if (si != trace.size - 1)
pc1 -= 1;
#endif
ReportStack *ent = SymbolizeCode(pc1);
CHECK_NE(ent, 0);
ReportStack *last = ent;
while (last->next) {
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last = last->next;
}
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last->next = stack;
stack = ent;
}
@@ -160,14 +146,14 @@ ScopedReport::~ScopedReport() {
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(const StackTrace *stack, bool suppressable) {
+void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
- *rs = SymbolizeStack(*stack);
+ *rs = SymbolizeStack(stack);
(*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
- const StackTrace *stack, const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -176,7 +162,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->size = s.size();
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
- mop->stack = SymbolizeStack(*stack);
+ mop->stack = SymbolizeStack(stack);
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
@@ -315,13 +301,11 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
int creat_tid = -1;
u32 creat_stack = 0;
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = ReportLocationFD;
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
@@ -336,33 +320,25 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
if (b != 0) {
ThreadContext *tctx = FindThreadByTidLocked(b->tid);
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = ReportLocationHeap;
- loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->siz;
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
loc->tid = tctx ? tctx->tid : b->tid;
- loc->name = 0;
- loc->file = 0;
- loc->line = 0;
- loc->stack = 0;
loc->stack = SymbolizeStackId(b->stk);
+ rep_->locs.PushBack(loc);
if (tctx)
AddThread(tctx);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
AddThread(tctx);
}
- ReportLocation *loc = SymbolizeData(addr);
- if (loc) {
+ if (ReportLocation *loc = SymbolizeData(addr)) {
loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
@@ -384,7 +360,8 @@ const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
@@ -409,13 +386,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
InternalScopedBuffer<uptr> stack(kShadowStackSize);
- for (uptr i = 0; i < hdr->stack0.Size(); i++) {
- stack[i] = hdr->stack0.Get(i);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
}
if (mset)
*mset = hdr->mset0;
- uptr pos = hdr->stack0.Size();
+ uptr pos = hdr->stack0.size;
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
@@ -450,13 +427,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
stk->Init(stack.data(), pos);
}
-static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
bool equal_stack = false;
RacyStacks hash;
if (flags()->suppress_equal_stacks) {
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
if (hash == ctx->racy_stacks[i]) {
DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
@@ -489,12 +466,12 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
return false;
}
-static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
ctx->racy_stacks.PushBack(hash);
}
if (flags()->suppress_equal_addresses) {
@@ -535,15 +512,14 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
return true;
}
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace) {
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace) {
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
continue;
- for (uptr j = 0; j < trace.Size(); j++) {
+ for (uptr j = 0; j < trace.size; j++) {
FiredSuppression *s = &ctx->fired_suppressions[k];
- if (trace.Get(j) == s->pc) {
+ if (trace.trace[j] == s->pc) {
if (s->supp)
s->supp->hit_count++;
return true;
@@ -570,10 +546,13 @@ static bool IsFiredSuppression(Context *ctx,
}
bool FrameIsInternal(const ReportStack *frame) {
- return frame != 0 && frame->file != 0
- && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
- internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(frame->file, "tsan_interface_"));
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ return file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_"));
}
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
@@ -625,7 +604,9 @@ void ReportRace(ThreadState *thr) {
ThreadRegistryLock l0(ctx->thread_registry);
ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access)
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
@@ -633,9 +614,9 @@ void ReportRace(ThreadState *thr) {
if (IsFiredSuppression(ctx, rep, addr))
return;
const uptr kMop = 2;
- StackTrace traces[kMop];
+ VarSizeStackTrace traces[kMop];
const uptr toppc = TraceTopPC(thr);
- traces[0].ObtainCurrent(thr, toppc);
+ ObtainCurrentStack(thr, toppc, &traces[0]);
if (IsFiredSuppression(ctx, rep, traces[0]))
return;
InternalScopedBuffer<MutexSet> mset2(1);
@@ -650,7 +631,7 @@ void ReportRace(ThreadState *thr) {
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, &traces[i],
+ rep.AddMemoryAccess(addr, s, traces[i],
i == 0 ? &thr->mset : mset2.data());
}
@@ -680,26 +661,33 @@ void ReportRace(ThreadState *thr) {
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
PrintStack(SymbolizeStack(trace));
}
-void PrintCurrentStackSlow() {
+void PrintCurrentStackSlow(uptr pc) {
#ifndef TSAN_GO
- __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
- sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
- ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0,
- 0, 0, false);
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
- uptr tmp = ptrace->trace[i];
- ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
- ptrace->trace[ptrace->size - i - 1] = tmp;
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
- StackTrace trace;
- trace.Init(ptrace->trace, ptrace->size);
- PrintStack(SymbolizeStack(trace));
+ PrintStack(SymbolizeStack(*ptrace));
#endif
}
} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/libsanitizer/tsan/tsan_stack_trace.cc b/libsanitizer/tsan/tsan_stack_trace.cc
index 45bd2517837..3734e0e4975 100644
--- a/libsanitizer/tsan/tsan_stack_trace.cc
+++ b/libsanitizer/tsan/tsan_stack_trace.cc
@@ -8,103 +8,37 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
-//#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_stack_trace.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
namespace __tsan {
-StackTrace::StackTrace()
- : n_()
- , s_()
- , c_() {
-}
-
-StackTrace::StackTrace(uptr *buf, uptr cnt)
- : n_()
- , s_(buf)
- , c_(cnt) {
- CHECK_NE(buf, 0);
- CHECK_NE(cnt, 0);
-}
-
-StackTrace::~StackTrace() {
- Reset();
-}
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
-void StackTrace::Reset() {
- if (s_ && !c_) {
- CHECK_NE(n_, 0);
- internal_free(s_);
- s_ = 0;
- }
- n_ = 0;
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
}
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
- Reset();
- if (cnt == 0)
- return;
- if (c_) {
- CHECK_NE(s_, 0);
- CHECK_LE(cnt, c_);
- } else {
- s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
- }
- n_ = cnt;
- internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
-}
-
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
- Reset();
- n_ = thr->shadow_stack_pos - thr->shadow_stack;
- if (n_ + !!toppc == 0)
- return;
- uptr start = 0;
- if (c_) {
- CHECK_NE(s_, 0);
- if (n_ + !!toppc > c_) {
- start = n_ - c_ + !!toppc;
- n_ = c_ - !!toppc;
- }
- } else {
- // Cap potentially huge stacks.
- if (n_ + !!toppc > kTraceStackSize) {
- start = n_ - kTraceStackSize + !!toppc;
- n_ = kTraceStackSize - !!toppc;
- }
- s_ = (uptr*)internal_alloc(MBlockStackTrace,
- (n_ + !!toppc) * sizeof(s_[0]));
- }
- for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[start + i];
- if (toppc) {
- s_[n_] = toppc;
- n_++;
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
}
-}
-
-void StackTrace::CopyFrom(const StackTrace& other) {
- Reset();
- Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
- return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
- return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
- CHECK_LT(i, n_);
- return s_[i];
-}
-
-const uptr *StackTrace::Begin() const {
- return s_;
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stack_trace.h b/libsanitizer/tsan/tsan_stack_trace.h
index ce0cb8859d2..b097a9b2696 100644
--- a/libsanitizer/tsan/tsan_stack_trace.h
+++ b/libsanitizer/tsan/tsan_stack_trace.h
@@ -11,40 +11,25 @@
#ifndef TSAN_STACK_TRACE_H
#define TSAN_STACK_TRACE_H
-//#include "sanitizer_common/sanitizer_atomic.h"
-//#include "sanitizer_common/sanitizer_common.h"
-//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_defs.h"
-//#include "tsan_clock.h"
-//#include "tsan_mutex.h"
-//#include "tsan_dense_alloc.h"
namespace __tsan {
-class StackTrace {
- public:
- StackTrace();
- // Initialized the object in "static mode",
- // in this mode it never calls malloc/free but uses the provided buffer.
- StackTrace(uptr *buf, uptr cnt);
- ~StackTrace();
- void Reset();
-
- void Init(const uptr *pcs, uptr cnt);
- void ObtainCurrent(ThreadState *thr, uptr toppc);
- bool IsEmpty() const;
- uptr Size() const;
- uptr Get(uptr i) const;
- const uptr *Begin() const;
- void CopyFrom(const StackTrace& other);
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
private:
- uptr n_;
- uptr *s_;
- const uptr c_;
+ void ResizeBuffer(uptr new_size);
- StackTrace(const StackTrace&);
- void operator = (const StackTrace&);
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index 6b42d3a67b8..76460d90d92 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -58,6 +58,8 @@ SuppressionType conv(ReportType typ) {
return SuppressionRace;
else if (typ == ReportTypeUseAfterFree)
return SuppressionRace;
+ else if (typ == ReportTypeVptrUseAfterFree)
+ return SuppressionRace;
else if (typ == ReportTypeThreadLeak)
return SuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
@@ -89,13 +91,14 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
return 0;
Suppression *s;
for (const ReportStack *frame = stack; frame; frame = frame->next) {
- if (SuppressionContext::Get()->Match(frame->func, stype, &s) ||
- SuppressionContext::Get()->Match(frame->file, stype, &s) ||
- SuppressionContext::Get()->Match(frame->module, stype, &s)) {
+ const AddressInfo &info = frame->info;
+ if (SuppressionContext::Get()->Match(info.function, stype, &s) ||
+ SuppressionContext::Get()->Match(info.file, stype, &s) ||
+ SuppressionContext::Get()->Match(info.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return frame->pc;
+ return info.address;
}
}
return 0;
@@ -109,13 +112,13 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
if (stype == SuppressionNone)
return 0;
Suppression *s;
- if (SuppressionContext::Get()->Match(loc->name, stype, &s) ||
- SuppressionContext::Get()->Match(loc->file, stype, &s) ||
- SuppressionContext::Get()->Match(loc->module, stype, &s)) {
+ const DataInfo &global = loc->global;
+ if (SuppressionContext::Get()->Match(global.name, stype, &s) ||
+ SuppressionContext::Get()->Match(global.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return loc->addr;
+ return global.start;
}
return 0;
}
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index 49ae3dffa47..795f838991b 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -34,38 +34,6 @@ void ExitSymbolizer() {
thr->ignore_interceptors--;
}
-ReportStack *NewReportStackEntry(uptr addr) {
- ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
- sizeof(ReportStack));
- internal_memset(ent, 0, sizeof(*ent));
- ent->pc = addr;
- return ent;
-}
-
-static ReportStack *NewReportStackEntry(const AddressInfo &info) {
- ReportStack *ent = NewReportStackEntry(info.address);
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.function)
- ent->func = internal_strdup(info.function);
- if (info.file)
- ent->file = internal_strdup(info.file);
- ent->line = info.line;
- ent->col = info.column;
- return ent;
-}
-
-
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
-
-
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external() will be called.
const uptr kExternalPCBit = 1ULL << 60;
@@ -93,16 +61,14 @@ ReportStack *SymbolizeCode(uptr addr) {
static char func_buf[1024];
static char file_buf[1024];
int line, col;
+ ReportStack *ent = ReportStack::New(addr);
if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
file_buf, sizeof(file_buf), &line, &col))
- return NewReportStackEntry(addr);
- ReportStack *ent = NewReportStackEntry(addr);
- ent->module = 0;
- ent->offset = 0;
- ent->func = internal_strdup(func_buf);
- ent->file = internal_strdup(file_buf);
- ent->line = line;
- ent->col = col;
+ return ent;
+ ent->info.function = internal_strdup(func_buf);
+ ent->info.file = internal_strdup(file_buf);
+ ent->info.line = line;
+ ent->info.column = col;
return ent;
}
static const uptr kMaxAddrFrames = 16;
@@ -112,13 +78,12 @@ ReportStack *SymbolizeCode(uptr addr) {
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0)
- return NewReportStackEntry(addr);
+ return ReportStack::New(addr);
ReportStack *top = 0;
ReportStack *bottom = 0;
for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
- CHECK(cur_entry);
- addr_frames[i].Clear();
+ ReportStack *cur_entry = ReportStack::New(addr);
+ cur_entry->info = addr_frames[i];
if (i == 0)
top = cur_entry;
else
@@ -132,16 +97,8 @@ ReportLocation *SymbolizeData(uptr addr) {
DataInfo info;
if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
- sizeof(ReportLocation));
- internal_memset(ent, 0, sizeof(*ent));
- ent->type = ReportLocationGlobal;
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.name)
- ent->name = internal_strdup(info.name);
- ent->addr = info.start;
- ent->size = info.size;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ ent->global = info;
return ent;
}
diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h
index af140686f5d..8eceb634a53 100644
--- a/libsanitizer/tsan/tsan_trace.h
+++ b/libsanitizer/tsan/tsan_trace.h
@@ -40,21 +40,15 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
- StackTrace stack0; // Start stack for the trace.
- u64 epoch0; // Start epoch for the trace.
- MutexSet mset0;
-#ifndef TSAN_GO
- uptr stack0buf[kTraceStackSize];
-#endif
-
- TraceHeader()
#ifndef TSAN_GO
- : stack0(stack0buf, kTraceStackSize)
+ BufferedStackTrace stack0; // Start stack for the trace.
#else
- : stack0()
+ VarSizeStackTrace stack0;
#endif
- , epoch0() {
- }
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+
+ TraceHeader() : stack0(), epoch0() {}
};
struct Trace {