mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-05 06:59:32 +03:00
stronger encoding of free lists using two keys per page
This commit is contained in:
parent
ce02986d56
commit
e3391d9a53
8 changed files with 83 additions and 50 deletions
|
@ -392,12 +392,28 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------
|
/* -------------------------------------------------------------------
|
||||||
// Encoding/Decoding the free list next pointers
|
Encoding/Decoding the free list next pointers
|
||||||
// Note: we pass a `null` value to be used as the `NULL` value for the
|
|
||||||
// end of a free list. This is to prevent the cookie itself to ever
|
This is to protect against buffer overflow exploits where the
|
||||||
// be present among user blocks (as `cookie^0==cookie`).
|
free list is mutated. Many hardened allocators xor the next pointer `p`
|
||||||
// -------------------------------------------------------------------
|
with a secret key `k1`, as `p^k1`, but if the attacker can guess
|
||||||
|
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
|
||||||
|
Moreover, if multiple blocks can be read, the attacker can
|
||||||
|
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
|
||||||
|
about the pointers (and subsequently `k1`).
|
||||||
|
|
||||||
|
Instead mimalloc uses an extra key `k2` and encode as `rotl(p+k2,13)^k1`.
|
||||||
|
Since these operations are not associative, the above approaches do not
|
||||||
|
work so well any more even if the `p` can be guesstimated. (We include
|
||||||
|
the rotation since xor and addition are otherwise linear in the lowest bit)
|
||||||
|
Both keys are unique per page.
|
||||||
|
|
||||||
|
We also pass a separate `null` value to be used as `NULL` or otherwise
|
||||||
|
`rotl(k2,13)^k1` would appear (too) often as a sentinel value.
|
||||||
|
------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
#define MI_ENCODE_ROTATE_BITS (13)
|
||||||
|
|
||||||
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
||||||
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
|
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
|
||||||
|
@ -412,49 +428,55 @@ static inline bool mi_is_in_same_page(const void* p, const void* q) {
|
||||||
return (idxp == idxq);
|
return (idxp == idxq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t cookie ) {
|
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
|
||||||
|
return ((x << shift) | (x >> (MI_INTPTR_BITS - shift)));
|
||||||
|
}
|
||||||
|
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
|
||||||
|
return ((x >> shift) | (x << (MI_INTPTR_BITS - shift)));
|
||||||
|
}
|
||||||
|
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t key1, uintptr_t key2 ) {
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
mi_block_t* b = (mi_block_t*)(block->next ^ cookie);
|
mi_block_t* b = (mi_block_t*)(mi_rotr(block->next ^ key1, MI_ENCODE_ROTATE_BITS) - key2);
|
||||||
if (mi_unlikely((void*)b==null)) { b = NULL; }
|
if (mi_unlikely((void*)b==null)) { b = NULL; }
|
||||||
return b;
|
return b;
|
||||||
#else
|
#else
|
||||||
UNUSED(cookie); UNUSED(null);
|
UNUSED(key1); UNUSED(key2); UNUSED(null);
|
||||||
return (mi_block_t*)block->next;
|
return (mi_block_t*)block->next;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t cookie) {
|
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t key1, uintptr_t key2) {
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
if (mi_unlikely(next==NULL)) { next = (mi_block_t*)null; }
|
if (mi_unlikely(next==NULL)) { next = (mi_block_t*)null; }
|
||||||
block->next = (mi_encoded_t)next ^ cookie;
|
block->next = mi_rotl((mi_encoded_t)next + key2, MI_ENCODE_ROTATE_BITS) ^ key1;
|
||||||
#else
|
#else
|
||||||
UNUSED(cookie); UNUSED(null);
|
UNUSED(key1); UNUSED(key2); UNUSED(null);
|
||||||
block->next = (mi_encoded_t)next;
|
block->next = (mi_encoded_t)next;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
|
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
mi_block_t* next = mi_block_nextx(page,block,page->cookie);
|
mi_block_t* next = mi_block_nextx(page,block,page->key[0],page->key[1]);
|
||||||
// check for free list corruption: is `next` at least in our segment range?
|
// check for free list corruption: is `next` at least in the same page?
|
||||||
// TODO: check if `next` is `page->block_size` aligned?
|
// TODO: check if `next` is `page->block_size` aligned?
|
||||||
if (next!=NULL && !mi_is_in_same_page(block, next)) {
|
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
|
||||||
_mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next);
|
_mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next);
|
||||||
next = NULL;
|
next = NULL;
|
||||||
}
|
}
|
||||||
return next;
|
return next;
|
||||||
#else
|
#else
|
||||||
UNUSED(page);
|
UNUSED(page);
|
||||||
return mi_block_nextx(page,block,0);
|
return mi_block_nextx(page,block,0,0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
|
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
mi_block_set_nextx(page,block,next, page->cookie);
|
mi_block_set_nextx(page,block,next, page->key[0], page->key[1]);
|
||||||
#else
|
#else
|
||||||
UNUSED(page);
|
UNUSED(page);
|
||||||
mi_block_set_nextx(page,block, next,0);
|
mi_block_set_nextx(page,block, next,0,0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -191,7 +191,7 @@ typedef struct mi_page_s {
|
||||||
|
|
||||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
uintptr_t cookie; // random cookie to encode the free lists
|
uintptr_t key[2]; // two random keys to encode the free lists (see `_mi_block_next`)
|
||||||
#endif
|
#endif
|
||||||
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||||
|
|
||||||
|
@ -206,9 +206,9 @@ typedef struct mi_page_s {
|
||||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||||
|
|
||||||
// improve page index calculation
|
// improve page index calculation
|
||||||
// without padding: 10 words on 64-bit, 11 on 32-bit. Secure adds one word
|
// without padding: 10 words on 64-bit, 11 on 32-bit. Secure adds two words
|
||||||
#if (MI_INTPTR_SIZE==8 && defined(MI_ENCODE_FREELIST)) || (MI_INTPTR_SIZE==4 && !defined(MI_ENCODE_FREELIST))
|
#if (MI_INTPTR_SIZE==4)
|
||||||
void* padding[1]; // 12 words on 64-bit with cookie, 12 words on 32-bit plain
|
void* padding[1]; // 12/14 words on 32-bit plain
|
||||||
#endif
|
#endif
|
||||||
} mi_page_t;
|
} mi_page_t;
|
||||||
|
|
||||||
|
@ -239,8 +239,8 @@ typedef struct mi_segment_s {
|
||||||
size_t capacity; // count of available pages (`#free + used`)
|
size_t capacity; // count of available pages (`#free + used`)
|
||||||
size_t segment_size;// for huge pages this may be different from `MI_SEGMENT_SIZE`
|
size_t segment_size;// for huge pages this may be different from `MI_SEGMENT_SIZE`
|
||||||
size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages.
|
size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages.
|
||||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
||||||
|
|
||||||
// layout like this to optimize access in `mi_free`
|
// layout like this to optimize access in `mi_free`
|
||||||
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
|
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
|
||||||
volatile _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment
|
volatile _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment
|
||||||
|
@ -289,8 +289,9 @@ struct mi_heap_s {
|
||||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||||
volatile _Atomic(mi_block_t*) thread_delayed_free;
|
volatile _Atomic(mi_block_t*) thread_delayed_free;
|
||||||
uintptr_t thread_id; // thread this heap belongs too
|
uintptr_t thread_id; // thread this heap belongs too
|
||||||
uintptr_t cookie;
|
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||||
mi_random_ctx_t random; // random number used for secure allocation
|
uintptr_t key[2]; // twb random keys used to encode the `thread_delayed_free` list
|
||||||
|
mi_random_ctx_t random; // random number context used for secure allocation
|
||||||
size_t page_count; // total number of pages in the `pages` queues.
|
size_t page_count; // total number of pages in the `pages` queues.
|
||||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||||
};
|
};
|
||||||
|
|
|
@ -157,7 +157,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
mi_block_t* n = mi_block_nextx(page, block, page->cookie); // pretend it is freed, and get the decoded first field
|
mi_block_t* n = mi_block_nextx(page, block, page->key[0], page->key[1]); // pretend it is freed, and get the decoded first field
|
||||||
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
||||||
(n==NULL || mi_is_in_same_segment(block, n))) // quick check: in same segment or NULL?
|
(n==NULL || mi_is_in_same_segment(block, n))) // quick check: in same segment or NULL?
|
||||||
{
|
{
|
||||||
|
@ -242,7 +242,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||||
mi_block_t* dfree;
|
mi_block_t* dfree;
|
||||||
do {
|
do {
|
||||||
dfree = (mi_block_t*)heap->thread_delayed_free;
|
dfree = (mi_block_t*)heap->thread_delayed_free;
|
||||||
mi_block_set_nextx(heap,block,dfree, heap->cookie);
|
mi_block_set_nextx(heap,block,dfree, heap->key[0], heap->key[1]);
|
||||||
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
|
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,7 +266,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
||||||
// and push it on the free list
|
// and push it on the free list
|
||||||
if (mi_likely(local)) {
|
if (mi_likely(local)) {
|
||||||
// owning thread can free a block directly
|
// owning thread can free a block directly
|
||||||
if (mi_check_is_double_free(page, block)) return;
|
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
|
||||||
mi_block_set_next(page, block, page->local_free);
|
mi_block_set_next(page, block, page->local_free);
|
||||||
page->local_free = block;
|
page->local_free = block;
|
||||||
page->used--;
|
page->used--;
|
||||||
|
@ -341,7 +341,7 @@ void mi_free(void* p) mi_attr_noexcept
|
||||||
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
|
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
|
||||||
// local, and not full or aligned
|
// local, and not full or aligned
|
||||||
mi_block_t* block = (mi_block_t*)p;
|
mi_block_t* block = (mi_block_t*)p;
|
||||||
if (mi_check_is_double_free(page,block)) return;
|
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
|
||||||
mi_block_set_next(page, block, page->local_free);
|
mi_block_set_next(page, block, page->local_free);
|
||||||
page->local_free = block;
|
page->local_free = block;
|
||||||
page->used--;
|
page->used--;
|
||||||
|
|
|
@ -193,6 +193,8 @@ mi_heap_t* mi_heap_new(void) {
|
||||||
heap->thread_id = _mi_thread_id();
|
heap->thread_id = _mi_thread_id();
|
||||||
_mi_random_split(&bheap->random, &heap->random);
|
_mi_random_split(&bheap->random, &heap->random);
|
||||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||||
|
heap->key[0] = _mi_heap_random_next(heap);
|
||||||
|
heap->key[1] = _mi_heap_random_next(heap);
|
||||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||||
return heap;
|
return heap;
|
||||||
}
|
}
|
||||||
|
|
30
src/init.c
30
src/init.c
|
@ -16,13 +16,13 @@ const mi_page_t _mi_page_empty = {
|
||||||
{ 0 }, false,
|
{ 0 }, false,
|
||||||
NULL, // free
|
NULL, // free
|
||||||
#if MI_ENCODE_FREELIST
|
#if MI_ENCODE_FREELIST
|
||||||
0,
|
{ 0, 0 },
|
||||||
#endif
|
#endif
|
||||||
0, // used
|
0, // used
|
||||||
NULL,
|
NULL,
|
||||||
ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(0),
|
ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(0),
|
||||||
0, NULL, NULL, NULL
|
0, NULL, NULL, NULL
|
||||||
#if (MI_INTPTR_SIZE==8 && defined(MI_ENCODE_FREELIST)) || (MI_INTPTR_SIZE==4 && !defined(MI_ENCODE_FREELIST))
|
#if (MI_INTPTR_SIZE==4)
|
||||||
, { NULL } // padding
|
, { NULL } // padding
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -83,8 +83,9 @@ const mi_heap_t _mi_heap_empty = {
|
||||||
MI_SMALL_PAGES_EMPTY,
|
MI_SMALL_PAGES_EMPTY,
|
||||||
MI_PAGE_QUEUES_EMPTY,
|
MI_PAGE_QUEUES_EMPTY,
|
||||||
ATOMIC_VAR_INIT(NULL),
|
ATOMIC_VAR_INIT(NULL),
|
||||||
0,
|
0, // tid
|
||||||
0,
|
0, // cookie
|
||||||
|
{ 0, 0 }, // keys
|
||||||
{ {0}, {0}, 0 },
|
{ {0}, {0}, 0 },
|
||||||
0,
|
0,
|
||||||
false
|
false
|
||||||
|
@ -105,18 +106,21 @@ static mi_tld_t tld_main = {
|
||||||
{ MI_STATS_NULL } // stats
|
{ MI_STATS_NULL } // stats
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if MI_INTPTR_SIZE==8
|
||||||
|
#define MI_INIT_COOKIE (0xCDCDCDCDCDCDCDCDUL)
|
||||||
|
#else
|
||||||
|
#define MI_INIT_COOKIE (0xCDCDCDCDUL)
|
||||||
|
#endif
|
||||||
|
|
||||||
mi_heap_t _mi_heap_main = {
|
mi_heap_t _mi_heap_main = {
|
||||||
&tld_main,
|
&tld_main,
|
||||||
MI_SMALL_PAGES_EMPTY,
|
MI_SMALL_PAGES_EMPTY,
|
||||||
MI_PAGE_QUEUES_EMPTY,
|
MI_PAGE_QUEUES_EMPTY,
|
||||||
NULL,
|
NULL,
|
||||||
0, // thread id
|
0, // thread id
|
||||||
#if MI_INTPTR_SIZE==8 // the cookie of the main heap can be fixed (unlike page cookies that need to be secure!)
|
MI_INIT_COOKIE, // initial cookie
|
||||||
0xCDCDCDCDCDCDCDCDUL,
|
{ MI_INIT_COOKIE, MI_INIT_COOKIE }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||||
#else
|
{ {0}, {0}, 0 }, // random
|
||||||
0xCDCDCDCDUL,
|
|
||||||
#endif
|
|
||||||
{ {0}, {0}, 0 }, // random
|
|
||||||
0, // page count
|
0, // page count
|
||||||
false // can reclaim
|
false // can reclaim
|
||||||
};
|
};
|
||||||
|
@ -156,6 +160,8 @@ static bool _mi_heap_init(void) {
|
||||||
heap->thread_id = _mi_thread_id();
|
heap->thread_id = _mi_thread_id();
|
||||||
_mi_random_init(&heap->random);
|
_mi_random_init(&heap->random);
|
||||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||||
|
heap->key[0] = _mi_heap_random_next(heap);
|
||||||
|
heap->key[1] = _mi_heap_random_next(heap);
|
||||||
heap->tld = tld;
|
heap->tld = tld;
|
||||||
memset(tld, 0, sizeof(*tld));
|
memset(tld, 0, sizeof(*tld));
|
||||||
tld->heap_backing = heap;
|
tld->heap_backing = heap;
|
||||||
|
@ -399,6 +405,8 @@ void mi_process_init(void) mi_attr_noexcept {
|
||||||
_mi_random_init(&_mi_heap_main.random);
|
_mi_random_init(&_mi_heap_main.random);
|
||||||
#ifndef __APPLE__ // TODO: fix this? cannot update cookie if allocation already happened..
|
#ifndef __APPLE__ // TODO: fix this? cannot update cookie if allocation already happened..
|
||||||
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
||||||
|
_mi_heap_main.key[0] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
|
_mi_heap_main.key[1] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
#endif
|
#endif
|
||||||
mi_process_setup_auto_thread_done();
|
mi_process_setup_auto_thread_done();
|
||||||
_mi_os_init();
|
_mi_os_init();
|
||||||
|
|
14
src/page.c
14
src/page.c
|
@ -103,7 +103,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||||
bool _mi_page_is_valid(mi_page_t* page) {
|
bool _mi_page_is_valid(mi_page_t* page) {
|
||||||
mi_assert_internal(mi_page_is_valid_init(page));
|
mi_assert_internal(mi_page_is_valid_init(page));
|
||||||
#if MI_SECURE
|
#if MI_SECURE
|
||||||
mi_assert_internal(page->cookie != 0);
|
mi_assert_internal(page->key != 0);
|
||||||
#endif
|
#endif
|
||||||
if (page->heap!=NULL) {
|
if (page->heap!=NULL) {
|
||||||
mi_segment_t* segment = _mi_page_segment(page);
|
mi_segment_t* segment = _mi_page_segment(page);
|
||||||
|
@ -284,7 +284,7 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
|
||||||
|
|
||||||
// and free them all
|
// and free them all
|
||||||
while(block != NULL) {
|
while(block != NULL) {
|
||||||
mi_block_t* next = mi_block_nextx(heap,block, heap->cookie);
|
mi_block_t* next = mi_block_nextx(heap,block, heap->key[0], heap->key[1]);
|
||||||
// use internal free instead of regular one to keep stats etc correct
|
// use internal free instead of regular one to keep stats etc correct
|
||||||
if (!_mi_free_delayed_block(block)) {
|
if (!_mi_free_delayed_block(block)) {
|
||||||
// we might already start delayed freeing while another thread has not yet
|
// we might already start delayed freeing while another thread has not yet
|
||||||
|
@ -292,9 +292,8 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
|
||||||
mi_block_t* dfree;
|
mi_block_t* dfree;
|
||||||
do {
|
do {
|
||||||
dfree = (mi_block_t*)heap->thread_delayed_free;
|
dfree = (mi_block_t*)heap->thread_delayed_free;
|
||||||
mi_block_set_nextx(heap, block, dfree, heap->cookie);
|
mi_block_set_nextx(heap, block, dfree, heap->key[0], heap->key[1]);
|
||||||
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
|
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
|
||||||
|
|
||||||
}
|
}
|
||||||
block = next;
|
block = next;
|
||||||
}
|
}
|
||||||
|
@ -357,7 +356,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
|
||||||
|
|
||||||
#if MI_DEBUG>1
|
#if MI_DEBUG>1
|
||||||
// check there are no references left..
|
// check there are no references left..
|
||||||
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->cookie)) {
|
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->key[0], pheap->key[1])) {
|
||||||
mi_assert_internal(_mi_ptr_page(block) != page);
|
mi_assert_internal(_mi_ptr_page(block) != page);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -608,7 +607,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||||
page->reserved = (uint16_t)(page_size / block_size);
|
page->reserved = (uint16_t)(page_size / block_size);
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
page->cookie = _mi_heap_random_next(heap) | 1;
|
page->key[0] = _mi_heap_random_next(heap);
|
||||||
|
page->key[1] = _mi_heap_random_next(heap);
|
||||||
#endif
|
#endif
|
||||||
page->is_zero = page->is_zero_init;
|
page->is_zero = page->is_zero_init;
|
||||||
|
|
||||||
|
@ -621,7 +621,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
mi_assert_internal(page->prev == NULL);
|
mi_assert_internal(page->prev == NULL);
|
||||||
mi_assert_internal(!mi_page_has_aligned(page));
|
mi_assert_internal(!mi_page_has_aligned(page));
|
||||||
#if (MI_ENCODE_FREELIST)
|
#if (MI_ENCODE_FREELIST)
|
||||||
mi_assert_internal(page->cookie != 0);
|
mi_assert_internal(page->key != 0);
|
||||||
#endif
|
#endif
|
||||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||||
|
|
||||||
|
|
|
@ -231,9 +231,9 @@ void _mi_random_init(mi_random_ctx_t* ctx) {
|
||||||
if (!os_random_buf(key, sizeof(key))) {
|
if (!os_random_buf(key, sizeof(key))) {
|
||||||
// if we fail to get random data from the OS, we fall back to a
|
// if we fail to get random data from the OS, we fall back to a
|
||||||
// weak random source based on the current time
|
// weak random source based on the current time
|
||||||
|
_mi_warning_message("unable to use secure randomness\n");
|
||||||
uintptr_t x = os_random_weak(0);
|
uintptr_t x = os_random_weak(0);
|
||||||
for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words.
|
for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words.
|
||||||
_mi_warning_message("unable to use secure randomness\n");
|
|
||||||
x = _mi_random_shuffle(x);
|
x = _mi_random_shuffle(x);
|
||||||
((uint32_t*)key)[i] = (uint32_t)x;
|
((uint32_t*)key)[i] = (uint32_t)x;
|
||||||
}
|
}
|
||||||
|
|
|
@ -520,7 +520,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
||||||
segment->segment_size = segment_size;
|
segment->segment_size = segment_size;
|
||||||
segment->segment_info_size = pre_size;
|
segment->segment_info_size = pre_size;
|
||||||
segment->thread_id = _mi_thread_id();
|
segment->thread_id = _mi_thread_id();
|
||||||
segment->cookie = _mi_ptr_cookie(segment);
|
segment->cookie = _mi_ptr_cookie(segment);
|
||||||
// _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
|
// _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
|
||||||
|
|
||||||
// set protection
|
// set protection
|
||||||
|
|
Loading…
Add table
Reference in a new issue