mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
Merge branch 'dev' into dev-exp
This commit is contained in:
commit
0c912445c4
5 changed files with 84 additions and 30 deletions
|
@ -167,8 +167,8 @@ typedef struct mi_page_s {
|
|||
#if MI_SECURE
|
||||
uintptr_t cookie; // random cookie to encode the free lists
|
||||
#endif
|
||||
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
mi_page_flags_t flags; // threadid:62 | has_aligned:1 | in_full:1
|
||||
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
volatile uintptr_t thread_freed; // at least this number of blocks are in `thread_free`
|
||||
|
@ -384,6 +384,7 @@ typedef struct mi_segments_tld_s {
|
|||
|
||||
// OS thread local data
|
||||
typedef struct mi_os_tld_s {
|
||||
size_t region_idx; // start point for next allocation
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
} mi_os_tld_t;
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
}
|
||||
#endif
|
||||
|
||||
mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
|
||||
#if (MI_STAT>1)
|
||||
mi_heap_t* heap = mi_heap_get_default();
|
||||
|
@ -236,7 +236,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
// huge page stat is accounted for in `_mi_page_retire`
|
||||
#endif
|
||||
|
||||
uintptr_t tid = _mi_thread_id();
|
||||
const uintptr_t tid = _mi_thread_id();
|
||||
if (mi_likely(tid == page->flags)) { // if equal, the thread id matches and it is not a full page, nor has aligned blocks
|
||||
// local, and not full or aligned
|
||||
mi_block_t* block = (mi_block_t*)p;
|
||||
|
|
|
@ -17,7 +17,7 @@ const mi_page_t _mi_page_empty = {
|
|||
#if MI_SECURE
|
||||
0,
|
||||
#endif
|
||||
0, {0}, // used, flags
|
||||
0, 0, // flags, used
|
||||
NULL, 0, 0,
|
||||
0, NULL, NULL, NULL
|
||||
#if (MI_INTPTR_SIZE==8 && MI_SECURE==0)
|
||||
|
@ -97,7 +97,7 @@ static mi_tld_t tld_main = {
|
|||
0,
|
||||
&_mi_heap_main,
|
||||
{ { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats }, // segments
|
||||
{ tld_main_stats }, // os
|
||||
{ 0, tld_main_stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
|
||||
|
|
65
src/memory.c
65
src/memory.c
|
@ -79,7 +79,6 @@ typedef struct mem_region_s {
|
|||
static mem_region_t regions[MI_REGION_MAX];
|
||||
|
||||
static volatile size_t regions_count = 0; // allocated regions
|
||||
static volatile uintptr_t region_next_idx = 0; // good place to start searching
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
|
@ -180,12 +179,45 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
|||
}
|
||||
|
||||
// and return the allocation
|
||||
mi_atomic_write(®ion_next_idx,idx); // next search from here
|
||||
*p = blocks_start;
|
||||
*id = (idx*MI_REGION_MAP_BITS) + bitidx;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Use bit scan forward to quickly find the first zero bit if it is available
|
||||
#if defined(_MSC_VER)
|
||||
#define MI_HAVE_BITSCAN
|
||||
#include <intrin.h>
|
||||
static inline size_t mi_bsf(uintptr_t x) {
|
||||
if (x==0) return 8*MI_INTPTR_SIZE;
|
||||
DWORD idx;
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
_BitScanForward64(&idx, x);
|
||||
#else
|
||||
_BitScanForward(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
if (x==0) return 8*MI_INTPTR_SIZE;
|
||||
DWORD idx;
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
_BitScanReverse64(&idx, x);
|
||||
#else
|
||||
_BitScanReverse(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
#define MI_HAVE_BITSCAN
|
||||
static inline size_t mi_bsf(uintptr_t x) {
|
||||
return (x==0 ? 8*MI_INTPTR_SIZE : __builtin_ctzl(x));
|
||||
}
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
return (x==0 ? 8*MI_INTPTR_SIZE : (8*MI_INTPTR_SIZE - 1) - __builtin_clzl(x));
|
||||
}
|
||||
#endif
|
||||
|
||||
// Allocate `blocks` in a `region` at `idx` of a given `size`.
|
||||
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
|
||||
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
|
||||
|
@ -197,11 +229,17 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
|
|||
|
||||
const uintptr_t mask = mi_region_block_mask(blocks, 0);
|
||||
const size_t bitidx_max = MI_REGION_MAP_BITS - blocks;
|
||||
uintptr_t map = mi_atomic_read(®ion->map);
|
||||
|
||||
#ifdef MI_HAVE_BITSCAN
|
||||
size_t bitidx = mi_bsf(~map); // quickly find the first zero bit if possible
|
||||
#else
|
||||
size_t bitidx = 0; // otherwise start at 0
|
||||
#endif
|
||||
uintptr_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
|
||||
|
||||
// scan linearly for a free range of zero bits
|
||||
uintptr_t map = mi_atomic_read(®ion->map);
|
||||
uintptr_t m = mask; // the mask shifted by bitidx
|
||||
for(size_t bitidx = 0; bitidx <= bitidx_max; bitidx++, m <<= 1) {
|
||||
while(bitidx <= bitidx_max) {
|
||||
if ((map & m) == 0) { // are the mask bits free at bitidx?
|
||||
mi_assert_internal((m >> bitidx) == mask); // no overflow?
|
||||
uintptr_t newmap = map | m;
|
||||
|
@ -209,6 +247,7 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
|
|||
if (!mi_atomic_compare_exchange(®ion->map, newmap, map)) {
|
||||
// no success, another thread claimed concurrently.. keep going
|
||||
map = mi_atomic_read(®ion->map);
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
// success, we claimed the bits
|
||||
|
@ -216,6 +255,17 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
|
|||
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, p, id, tld);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// on to the next bit range
|
||||
#ifdef MI_HAVE_BITSCAN
|
||||
size_t shift = (blocks == 1 ? 1 : mi_bsr(map & m) - bitidx + 1);
|
||||
mi_assert_internal(shift > 0 && shift <= blocks);
|
||||
#else
|
||||
size_t shift = 1;
|
||||
#endif
|
||||
bitidx += shift;
|
||||
m <<= shift;
|
||||
}
|
||||
}
|
||||
// no error, but also no bits found
|
||||
return true;
|
||||
|
@ -267,7 +317,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
|
|||
// find a range of free blocks
|
||||
void* p = NULL;
|
||||
size_t count = mi_atomic_read(®ions_count);
|
||||
size_t idx = mi_atomic_read(®ion_next_idx);
|
||||
size_t idx = tld->region_idx; // start index is per-thread to reduce contention
|
||||
for (size_t visited = 0; visited < count; visited++, idx++) {
|
||||
if (idx >= count) idx = 0; // wrap around
|
||||
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error
|
||||
|
@ -286,6 +336,9 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
|
|||
// we could not find a place to allocate, fall back to the os directly
|
||||
p = _mi_os_alloc_aligned(size, alignment, commit, tld);
|
||||
}
|
||||
else {
|
||||
tld->region_idx = idx; // next start of search
|
||||
}
|
||||
|
||||
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
|
||||
return p;
|
||||
|
|
Loading…
Add table
Reference in a new issue