aboutsummaryrefslogtreecommitdiff
path: root/libsanitizer/asan/asan_allocator2.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan/asan_allocator2.cc')
-rw-r--r--libsanitizer/asan/asan_allocator2.cc48
1 files changed, 21 insertions, 27 deletions
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
index bbc1ff723a4..78c1ec113a6 100644
--- a/libsanitizer/asan/asan_allocator2.cc
+++ b/libsanitizer/asan/asan_allocator2.cc
@@ -19,6 +19,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
@@ -165,23 +166,6 @@ struct AsanChunk: ChunkBase {
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
- // If we don't use stack depot, we store the alloc/free stack traces
- // in the chunk itself.
- u32 *AllocStackBeg() {
- return (u32*)(Beg() - RZLog2Size(rz_log));
- }
- uptr AllocStackSize() {
- CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
- return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
- }
- u32 *FreeStackBeg() {
- return (u32*)(Beg() + kChunkHeader2Size);
- }
- uptr FreeStackSize() {
- if (user_requested_size < kChunkHeader2Size) return 0;
- uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
- return (available - kChunkHeader2Size) / sizeof(u32);
- }
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
@@ -461,12 +445,17 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
}
}
-static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
+static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack,
+ AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
@@ -493,7 +482,7 @@ static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack, FROM_MALLOC);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
@@ -592,7 +581,12 @@ void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
}
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, stack, alloc_type);
+ Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, StackTrace *stack,
+ AllocType alloc_type) {
+ Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, StackTrace *stack) {
@@ -614,7 +608,7 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
- Deallocate(p, stack, FROM_MALLOC);
+ Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return Reallocate(p, size, stack);
@@ -758,23 +752,23 @@ using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __asan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-int __asan_get_ownership(const void *p) {
+int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
-uptr __asan_get_allocated_size(const void *p) {
+uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
- ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
@@ -783,12 +777,12 @@ uptr __asan_get_allocated_size(const void *p) {
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_malloc_hook(void *ptr, uptr size) {
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_free_hook(void *ptr) {
+void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"