stronger encoding of free lists using two keys per page

This commit is contained in:
daan 2019-12-27 23:33:50 -08:00
parent ce02986d56
commit e3391d9a53
8 changed files with 83 additions and 50 deletions

View file

@ -392,12 +392,28 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
}
// -------------------------------------------------------------------
// Encoding/Decoding the free list next pointers
// Note: we pass a `null` value to be used as the `NULL` value for the
// end of a free list. This is to prevent the cookie itself to ever
// be present among user blocks (as `cookie^0==cookie`).
// -------------------------------------------------------------------
/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
This is to protect against buffer overflow exploits where the
free list is mutated. Many hardened allocators xor the next pointer `p`
with a secret key `k1`, as `p^k1`, but if the attacker can guess
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
Moreover, if multiple blocks can be read, the attacker can
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
about the pointers (and subsequently `k1`).
Instead mimalloc uses an extra key `k2` and encode as `rotl(p+k2,13)^k1`.
Since these operations are not associative, the above approaches do not
work so well any more even if the `p` can be guesstimated. (We include
the rotation since xor and addition are otherwise linear in the lowest bit)
Both keys are unique per page.
We also pass a separate `null` value to be used as `NULL` or otherwise
`rotl(k2,13)^k1` would appear (too) often as a sentinel value.
------------------------------------------------------------------- */
#define MI_ENCODE_ROTATE_BITS (13)
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
@ -412,49 +428,55 @@ static inline bool mi_is_in_same_page(const void* p, const void* q) {
return (idxp == idxq);
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t cookie ) {
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
return ((x << shift) | (x >> (MI_INTPTR_BITS - shift)));
}
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
return ((x >> shift) | (x << (MI_INTPTR_BITS - shift)));
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t key1, uintptr_t key2 ) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* b = (mi_block_t*)(block->next ^ cookie);
mi_block_t* b = (mi_block_t*)(mi_rotr(block->next ^ key1, MI_ENCODE_ROTATE_BITS) - key2);
if (mi_unlikely((void*)b==null)) { b = NULL; }
return b;
#else
UNUSED(cookie); UNUSED(null);
UNUSED(key1); UNUSED(key2); UNUSED(null);
return (mi_block_t*)block->next;
#endif
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t cookie) {
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t key1, uintptr_t key2) {
#ifdef MI_ENCODE_FREELIST
if (mi_unlikely(next==NULL)) { next = (mi_block_t*)null; }
block->next = (mi_encoded_t)next ^ cookie;
block->next = mi_rotl((mi_encoded_t)next + key2, MI_ENCODE_ROTATE_BITS) ^ key1;
#else
UNUSED(cookie); UNUSED(null);
UNUSED(key1); UNUSED(key2); UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* next = mi_block_nextx(page,block,page->cookie);
// check for free list corruption: is `next` at least in our segment range?
mi_block_t* next = mi_block_nextx(page,block,page->key[0],page->key[1]);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
if (next!=NULL && !mi_is_in_same_page(block, next)) {
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
_mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next);
next = NULL;
}
return next;
#else
UNUSED(page);
return mi_block_nextx(page,block,0);
return mi_block_nextx(page,block,0,0);
#endif
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page,block,next, page->cookie);
mi_block_set_nextx(page,block,next, page->key[0], page->key[1]);
#else
UNUSED(page);
mi_block_set_nextx(page,block, next,0);
mi_block_set_nextx(page,block, next,0,0);
#endif
}

View file

@ -191,7 +191,7 @@ typedef struct mi_page_s {
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#ifdef MI_ENCODE_FREELIST
uintptr_t cookie; // random cookie to encode the free lists
uintptr_t key[2]; // two random keys to encode the free lists (see `_mi_block_next`)
#endif
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
@ -206,9 +206,9 @@ typedef struct mi_page_s {
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
// improve page index calculation
// without padding: 10 words on 64-bit, 11 on 32-bit. Secure adds one word
#if (MI_INTPTR_SIZE==8 && defined(MI_ENCODE_FREELIST)) || (MI_INTPTR_SIZE==4 && !defined(MI_ENCODE_FREELIST))
void* padding[1]; // 12 words on 64-bit with cookie, 12 words on 32-bit plain
// without padding: 10 words on 64-bit, 11 on 32-bit. Secure adds two words
#if (MI_INTPTR_SIZE==4)
void* padding[1]; // 12/14 words on 32-bit plain
#endif
} mi_page_t;
@ -239,8 +239,8 @@ typedef struct mi_segment_s {
size_t capacity; // count of available pages (`#free + used`)
size_t segment_size;// for huge pages this may be different from `MI_SEGMENT_SIZE`
size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages.
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
// layout like this to optimize access in `mi_free`
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
volatile _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment
@ -289,8 +289,9 @@ struct mi_heap_s {
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
volatile _Atomic(mi_block_t*) thread_delayed_free;
uintptr_t thread_id; // thread this heap belongs too
uintptr_t cookie;
mi_random_ctx_t random; // random number used for secure allocation
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t key[2]; // twb random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
};