per thread region search index

This commit is contained in:
daan 2019-08-11 12:15:13 -07:00
parent 2e924150ae
commit 0fd898315c
3 changed files with 6 additions and 4 deletions

View file

@ -384,6 +384,7 @@ typedef struct mi_segments_tld_s {
// OS thread local data // OS thread local data
typedef struct mi_os_tld_s { typedef struct mi_os_tld_s {
size_t region_idx; // start point for next allocation
mi_stats_t* stats; // points to tld stats mi_stats_t* stats; // points to tld stats
} mi_os_tld_t; } mi_os_tld_t;

View file

@ -97,7 +97,7 @@ static mi_tld_t tld_main = {
0, 0,
&_mi_heap_main, &_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats }, // segments { { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats }, // segments
{ tld_main_stats }, // os { 0, tld_main_stats }, // os
{ MI_STATS_NULL } // stats { MI_STATS_NULL } // stats
}; };

View file

@ -79,7 +79,6 @@ typedef struct mem_region_s {
static mem_region_t regions[MI_REGION_MAX]; static mem_region_t regions[MI_REGION_MAX];
static volatile size_t regions_count = 0; // allocated regions static volatile size_t regions_count = 0; // allocated regions
static volatile uintptr_t region_next_idx = 0; // good place to start searching
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
@ -180,7 +179,6 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
} }
// and return the allocation // and return the allocation
mi_atomic_write(&region_next_idx,idx); // next search from here
*p = blocks_start; *p = blocks_start;
*id = (idx*MI_REGION_MAP_BITS) + bitidx; *id = (idx*MI_REGION_MAP_BITS) + bitidx;
return true; return true;
@ -267,7 +265,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
// find a range of free blocks // find a range of free blocks
void* p = NULL; void* p = NULL;
size_t count = mi_atomic_read(&regions_count); size_t count = mi_atomic_read(&regions_count);
size_t idx = mi_atomic_read(&region_next_idx); size_t idx = tld->region_idx; // start index is per-thread to reduce contention
for (size_t visited = 0; visited < count; visited++, idx++) { for (size_t visited = 0; visited < count; visited++, idx++) {
if (idx >= count) idx = 0; // wrap around if (idx >= count) idx = 0; // wrap around
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error
@ -286,6 +284,9 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
// we could not find a place to allocate, fall back to the os directly // we could not find a place to allocate, fall back to the os directly
p = _mi_os_alloc_aligned(size, alignment, commit, tld); p = _mi_os_alloc_aligned(size, alignment, commit, tld);
} }
else {
tld->region_idx = idx; // next start of search
}
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0); mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
return p; return p;