blob: 772587d49a3ed7e3b56348c217e2abfa73edde18 [file] [log] [blame]
commit 48ac5b4833b60f00f0923db11ea31e7316bc78c6
Author: Vitaly Buka <vitalybuka@google.com>
Date: Fri Sep 4 01:17:18 2020 -0700
[NFC][Asan] Reformat some allocator code
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index cb9f49f73a3..52033821ffd 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -73,13 +73,13 @@ static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
atomic_uint8_t chunk_state;
- u32 alloc_tid : 24;
+ u32 alloc_tid : 24;
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
@@ -109,7 +109,7 @@ enum {
// The chunk is allocated and not yet freed.
CHUNK_ALLOCATED = 2,
// The chunk was freed and put into quarantine zone.
- CHUNK_QUARANTINE = 3
+ CHUNK_QUARANTINE = 3,
};
struct AsanChunk: ChunkBase {
@@ -118,7 +118,7 @@ struct AsanChunk: ChunkBase {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
- get_allocator().GetMetaData(AllocBeg(locked_version)));
+ get_allocator().GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
@@ -519,7 +519,7 @@ struct Allocator {
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+ CHECK_LE(alloc_beg + 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
@@ -735,7 +735,8 @@ struct Allocator {
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return nullptr;
+ if (!alloc_beg)
+ return nullptr;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
@@ -751,11 +752,13 @@ struct Allocator {
}
AsanChunk *GetAsanChunkDebug(void *alloc_beg) {
- if (!alloc_beg) return nullptr;
+ if (!alloc_beg)
+ return nullptr;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- Printf("GetAsanChunkDebug1 alloc_beg %p meta %p m %p\n", alloc_beg, meta, m);
+ Printf("GetAsanChunkDebug1 alloc_beg %p meta %p m %p\n", alloc_beg, meta,
+ m);
return m;
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
@@ -768,7 +771,6 @@ struct Allocator {
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
-
AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
@@ -784,7 +786,8 @@ struct Allocator {
AsanChunk *GetAsanChunkByAddrFastLockedDebug(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLockedDebug(reinterpret_cast<void *>(p));
- Printf("GetAsanChunkByAddrFastLockedDebug p %p alloc_beg %p\n", p, alloc_beg);
+ Printf("GetAsanChunkByAddrFastLockedDebug p %p alloc_beg %p\n", p,
+ alloc_beg);
return GetAsanChunkDebug(alloc_beg);
}
@@ -1055,7 +1058,7 @@ void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
instance.SetRssLimitExceeded(limit_exceeded);
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
@@ -1092,7 +1095,8 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE const char *__lsan_current_stage;
void GetUserBeginDebug(uptr chunk) {
Printf("GetUserBeginDebug1 chunk %p\n", chunk);
- __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLockedDebug(chunk);
+ __asan::AsanChunk *m =
+ __asan::instance.GetAsanChunkByAddrFastLockedDebug(chunk);
Printf("GetUserBeginDebug2 m %p\n", m);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 6d73784d77d..0cf483da1e5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -148,7 +148,6 @@ class CombinedAllocator {
return secondary_.GetBlockBeginFastLocked(p);
}
-
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 7af469c56fd..a6126fc6265 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -203,7 +203,8 @@ class SizeClassAllocator64 {
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
Printf("GetBlockBeginDebug1 p %p class_id %p size %p\n", p, class_id, size);
- if (!size) return nullptr;
+ if (!size)
+ return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = GetRegionBegin(p);
uptr beg = chunk_idx * size;
@@ -212,16 +213,16 @@ class SizeClassAllocator64 {
"GetBlockBeginDebug2 chunk_idx %p reg_beg %p beg %p next_beg %p "
"kNumClasses %p\n",
chunk_idx, reg_beg, beg, next_beg, kNumClasses);
- if (class_id >= kNumClasses) return nullptr;
+ if (class_id >= kNumClasses)
+ return nullptr;
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
Printf("GetBlockBeginDebug3 region %p region->mapped_user %p\n", region,
region->mapped_user);
if (region->mapped_user >= next_beg)
- return reinterpret_cast<void*>(reg_beg + beg);
+ return reinterpret_cast<void *>(reg_beg + beg);
return nullptr;
}
-
uptr GetActuallyAllocatedSize(void *p) {
CHECK(PointerIsMine(p));
return ClassIdToSize(GetSizeClass(p));