fix thread data cache to use pure os alloc

This commit is contained in:
daanx 2023-04-13 15:27:20 -07:00
parent 0ba79d01f6
commit 48d0d0da9b
3 changed files with 18 additions and 18 deletions

View file

@ -121,8 +121,6 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_o
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats); void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
bool _mi_arena_contains(const void* p); bool _mi_arena_contains(const void* p);
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
// "segment-map.c" // "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment); void _mi_segment_map_allocated_at(const mi_segment_t* segment);

View file

@ -194,7 +194,7 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
return p; return p;
} }
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
*memid = mi_arena_memid_none(); *memid = mi_arena_memid_none();
// try static // try static
@ -216,7 +216,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
return NULL; return NULL;
} }
void _mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) { static void mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
if (memid.memkind == MI_MEM_OS) { if (memid.memkind == MI_MEM_OS) {
_mi_os_free(p, size, stats); _mi_os_free(p, size, stats);
} }
@ -717,7 +717,7 @@ static void mi_arenas_destroy(void) {
else { else {
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
} }
_mi_arena_meta_free(arena, arena->meta_size, arena->meta_memid, &_mi_stats_main); mi_arena_meta_free(arena, arena->meta_size, arena->meta_memid, &_mi_stats_main);
} }
else { else {
new_max_arena = i; new_max_arena = i;
@ -789,7 +789,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_committed,
const size_t bitmaps = (allow_decommit ? 4 : 2); const size_t bitmaps = (allow_decommit ? 4 : 2);
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t)); const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
mi_memid_t meta_memid; mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false; if (arena == NULL) return false;
// already zero'd due to os_alloc // already zero'd due to os_alloc

View file

@ -191,32 +191,34 @@ static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
static mi_thread_data_t* mi_thread_data_zalloc(void) { static mi_thread_data_t* mi_thread_data_zalloc(void) {
// try to find thread metadata in the cache // try to find thread metadata in the cache
mi_thread_data_t* td; bool is_zero = false;
mi_thread_data_t* td = NULL;
for (int i = 0; i < TD_CACHE_SIZE; i++) { for (int i = 0; i < TD_CACHE_SIZE; i++) {
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
if (td != NULL) { if (td != NULL) {
// found cached allocation, try use it // found cached allocation, try use it
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) { if (td != NULL) {
_mi_memzero(td, sizeof(*td)); break;
return td;
} }
} }
} }
// if that fails, allocate as meta data // if that fails, allocate as meta data
mi_memid_t memid;
td = (mi_thread_data_t*)_mi_arena_meta_zalloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
if (td == NULL) { if (td == NULL) {
// if this fails, try once more. (issue #257) td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &is_zero, &_mi_stats_main);
td = (mi_thread_data_t*)_mi_arena_meta_zalloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
if (td == NULL) { if (td == NULL) {
// really out of memory // if this fails, try once more. (issue #257)
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &is_zero, &_mi_stats_main);
if (td == NULL) {
// really out of memory
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
}
} }
} }
if (td != NULL) {
td->memid = memid; if (td != NULL && !is_zero) {
_mi_memzero(td, sizeof(*td));
} }
return td; return td;
} }
@ -233,7 +235,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
} }
} }
// if that fails, just free it directly // if that fails, just free it directly
_mi_arena_meta_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main); _mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main);
} }
static void mi_thread_data_collect(void) { static void mi_thread_data_collect(void) {