mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
track os abandoned pages in a list
This commit is contained in:
parent
95aeda4cdd
commit
4ad7fedd25
5 changed files with 61 additions and 42 deletions
|
@ -415,6 +415,8 @@ static inline void mi_atomic_yield(void) {
|
|||
#pragma warning(disable:26110) // unlock with holding lock
|
||||
#endif
|
||||
|
||||
#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) )
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
||||
#if 0
|
||||
|
@ -424,9 +426,8 @@ static inline void mi_atomic_yield(void) {
|
|||
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
|
||||
return TryEnterCriticalSection(lock);
|
||||
}
|
||||
static inline bool mi_lock_acquire(mi_lock_t* lock) {
|
||||
static inline void mi_lock_acquire(mi_lock_t* lock) {
|
||||
EnterCriticalSection(lock);
|
||||
return true;
|
||||
}
|
||||
static inline void mi_lock_release(mi_lock_t* lock) {
|
||||
LeaveCriticalSection(lock);
|
||||
|
@ -445,9 +446,8 @@ static inline void mi_lock_done(mi_lock_t* lock) {
|
|||
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
|
||||
return TryAcquireSRWLockExclusive(lock);
|
||||
}
|
||||
static inline bool mi_lock_acquire(mi_lock_t* lock) {
|
||||
static inline void mi_lock_acquire(mi_lock_t* lock) {
|
||||
AcquireSRWLockExclusive(lock);
|
||||
return true;
|
||||
}
|
||||
static inline void mi_lock_release(mi_lock_t* lock) {
|
||||
ReleaseSRWLockExclusive(lock);
|
||||
|
@ -468,8 +468,11 @@ static inline void mi_lock_done(mi_lock_t* lock) {
|
|||
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
|
||||
return (pthread_mutex_trylock(lock) == 0);
|
||||
}
|
||||
static inline bool mi_lock_acquire(mi_lock_t* lock) {
|
||||
return (pthread_mutex_lock(lock) == 0);
|
||||
static inline void mi_lock_acquire(mi_lock_t* lock) {
|
||||
const int err = pthread_mutex_lock(lock);
|
||||
if (err != 0) {
|
||||
mi_error_message(EFAULT, "internal error: lock cannot be acquired\n");
|
||||
}
|
||||
}
|
||||
static inline void mi_lock_release(mi_lock_t* lock) {
|
||||
pthread_mutex_unlock(lock);
|
||||
|
@ -489,9 +492,8 @@ static inline void mi_lock_done(mi_lock_t* lock) {
|
|||
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
|
||||
return lock->try_lock();
|
||||
}
|
||||
static inline bool mi_lock_acquire(mi_lock_t* lock) {
|
||||
static inline void mi_lock_acquire(mi_lock_t* lock) {
|
||||
lock->lock();
|
||||
return true;
|
||||
}
|
||||
static inline void mi_lock_release(mi_lock_t* lock) {
|
||||
lock->unlock();
|
||||
|
@ -514,12 +516,11 @@ static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
|
|||
uintptr_t expected = 0;
|
||||
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
|
||||
}
|
||||
static inline bool mi_lock_acquire(mi_lock_t* lock) {
|
||||
static inline void mi_lock_acquire(mi_lock_t* lock) {
|
||||
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
|
||||
if (mi_lock_try_acquire(lock)) return true;
|
||||
if (mi_lock_try_acquire(lock)) return;
|
||||
mi_atomic_yield();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
static inline void mi_lock_release(mi_lock_t* lock) {
|
||||
mi_atomic_store_release(lock, (uintptr_t)0);
|
||||
|
|
|
@ -556,8 +556,8 @@ typedef struct mi_subproc_s {
|
|||
mi_lock_t arena_reserve_lock; // lock to ensure arena's get reserved one at a time
|
||||
|
||||
_Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // total count of abandoned pages for this sub-process
|
||||
mi_page_queue_t os_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on)
|
||||
mi_lock_t os_pages_lock; // lock for the os pages list (this lock protects list operations)
|
||||
mi_page_t* os_abandoned_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on)
|
||||
mi_lock_t os_abandoned_pages_lock; // lock for the os abandoned pages list (this lock protects list operations)
|
||||
|
||||
mi_memid_t memid; // provenance of this memory block (meta or OS)
|
||||
mi_stats_t stats; // sub-process statistics (tld stats are merged in on thread termination)
|
||||
|
|
|
@ -64,12 +64,11 @@ static void* mi_meta_block_start( mi_meta_page_t* mpage, size_t block_idx ) {
|
|||
// allocate a fresh meta page and add it to the global list.
|
||||
static mi_meta_page_t* mi_meta_page_zalloc(void) {
|
||||
// allocate a fresh arena slice
|
||||
// note: we always use subproc_main directly for the meta-data since at thread start the metadata for the
|
||||
// tld and heap need to be (meta) allocated and at that time we cannot read the tld pointer (yet).
|
||||
// note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again..
|
||||
mi_memid_t memid;
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)_mi_arena_alloc_aligned(_mi_subproc_main(), MI_ARENA_SLICE_SIZE, MI_ARENA_SLICE_ALIGN, 0,
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)_mi_arena_alloc_aligned(_mi_subproc(), MI_ARENA_SLICE_SIZE, MI_ARENA_SLICE_ALIGN, 0,
|
||||
true /* commit*/, true /* allow large */,
|
||||
NULL, 0 /* tseq */, &memid );
|
||||
NULL /* req arena */, 0 /* thread_seq */, &memid);
|
||||
if (mpage == NULL) return NULL;
|
||||
mi_assert_internal(_mi_is_aligned(mpage,MI_META_PAGE_ALIGN));
|
||||
if (!memid.initially_zero) {
|
||||
|
|
48
src/arena.c
48
src/arena.c
|
@ -439,24 +439,20 @@ static mi_decl_noinline void* mi_arenas_try_alloc(
|
|||
|
||||
// otherwise, try to reserve a new arena -- but one thread at a time.. (todo: allow 2 or 4 to reduce contention?)
|
||||
const size_t arena_count = mi_arenas_get_count(subproc);
|
||||
if (mi_lock_acquire(&subproc->arena_reserve_lock)) {
|
||||
bool ok = true;
|
||||
mi_lock(&subproc->arena_reserve_lock) {
|
||||
if (arena_count == mi_arenas_get_count(subproc)) {
|
||||
// we are the first to enter the lock, reserve a fresh arena
|
||||
mi_arena_id_t arena_id = 0;
|
||||
ok = mi_arena_reserve(subproc, mi_size_of_slices(slice_count), allow_large, req_arena, &arena_id);
|
||||
mi_arena_reserve(subproc, mi_size_of_slices(slice_count), allow_large, req_arena, &arena_id);
|
||||
}
|
||||
else {
|
||||
// another thread already reserved a new arena
|
||||
}
|
||||
mi_lock_release(&subproc->arena_reserve_lock);
|
||||
if (ok) {
|
||||
}
|
||||
// try once more to allocate in the new arena
|
||||
mi_assert_internal(req_arena == NULL);
|
||||
p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, memid);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -685,11 +681,13 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_
|
|||
else {
|
||||
page->block_size_shift = 0;
|
||||
}
|
||||
// and own it
|
||||
mi_page_try_claim_ownership(page);
|
||||
|
||||
// register in the page map
|
||||
_mi_page_map_register(page);
|
||||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
|
||||
|
||||
mi_page_try_claim_ownership(page);
|
||||
mi_assert_internal(mi_page_block_size(page) == block_size);
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
|
@ -771,7 +769,8 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
mi_assert_internal(page->next==NULL);
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(page->next==NULL && page->prev==NULL);
|
||||
|
||||
#if MI_DEBUG>1
|
||||
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
|
||||
|
@ -790,6 +789,7 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
}
|
||||
#endif
|
||||
|
||||
// unregister page
|
||||
_mi_page_map_unregister(page);
|
||||
if (page->memid.memkind == MI_MEM_ARENA) {
|
||||
mi_bitmap_clear(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index);
|
||||
|
@ -807,7 +807,7 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(!mi_page_all_free(page));
|
||||
mi_assert_internal(page->next==NULL);
|
||||
mi_assert_internal(page->next==NULL && page->prev == NULL);
|
||||
|
||||
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
|
||||
// make available for allocations
|
||||
|
@ -827,8 +827,19 @@ void _mi_arena_page_abandon(mi_page_t* page) {
|
|||
mi_subproc_stat_increase(arena->subproc, pages_abandoned, 1);
|
||||
}
|
||||
else {
|
||||
// page is full (or a singleton), page is OS/externally allocated
|
||||
// page is full (or a singleton), or the page is OS/externally allocated
|
||||
// leave as is; it will be reclaimed when an object is free'd in the page
|
||||
mi_subproc_t* subproc = _mi_subproc();
|
||||
// but for non-arena pages, add to the subproc list so these can be visited
|
||||
if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) {
|
||||
mi_lock(&subproc->os_abandoned_pages_lock) {
|
||||
// push in front
|
||||
page->prev = NULL;
|
||||
page->next = subproc->os_abandoned_pages;
|
||||
if (page->next != NULL) { page->next->prev = page; }
|
||||
subproc->os_abandoned_pages = page;
|
||||
}
|
||||
}
|
||||
mi_subproc_stat_increase(_mi_subproc(), pages_abandoned, 1);
|
||||
}
|
||||
_mi_page_unown(page);
|
||||
|
@ -881,9 +892,18 @@ void _mi_arena_page_unabandon(mi_page_t* page) {
|
|||
}
|
||||
else {
|
||||
// page is full (or a singleton), page is OS allocated
|
||||
// nothing to do
|
||||
// TODO: maintain count of these as well?
|
||||
mi_subproc_t* subproc = _mi_subproc();
|
||||
mi_subproc_stat_decrease(_mi_subproc(), pages_abandoned, 1);
|
||||
// if not an arena page, remove from the subproc os pages list
|
||||
if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) {
|
||||
mi_lock(&subproc->os_abandoned_pages_lock) {
|
||||
if (page->prev != NULL) { page->prev->next = page->next; }
|
||||
if (page->next != NULL) { page->next->prev = page->prev; }
|
||||
if (subproc->os_abandoned_pages == page) { subproc->os_abandoned_pages = page->next; }
|
||||
page->next = NULL;
|
||||
page->prev = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
11
src/init.c
11
src/init.c
|
@ -223,7 +223,7 @@ void _mi_heap_guarded_init(mi_heap_t* heap) {
|
|||
static void mi_subproc_main_init(void) {
|
||||
if (subproc_main.memid.memkind != MI_MEM_STATIC) {
|
||||
subproc_main.memid = _mi_memid_create(MI_MEM_STATIC);
|
||||
mi_lock_init(&subproc_main.os_pages_lock);
|
||||
mi_lock_init(&subproc_main.os_abandoned_pages_lock);
|
||||
mi_lock_init(&subproc_main.arena_reserve_lock);
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ mi_subproc_id_t mi_subproc_new(void) {
|
|||
mi_subproc_t* subproc = (mi_subproc_t*)_mi_meta_zalloc(sizeof(mi_subproc_t),&memid);
|
||||
if (subproc == NULL) return NULL;
|
||||
subproc->memid = memid;
|
||||
mi_lock_init(&subproc->os_pages_lock);
|
||||
mi_lock_init(&subproc->os_abandoned_pages_lock);
|
||||
mi_lock_init(&subproc->arena_reserve_lock);
|
||||
return subproc;
|
||||
}
|
||||
|
@ -375,11 +375,10 @@ void mi_subproc_delete(mi_subproc_id_t subproc_id) {
|
|||
mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id);
|
||||
// check if there are os pages still..
|
||||
bool safe_to_delete = false;
|
||||
if (mi_lock_acquire(&subproc->os_pages_lock)) {
|
||||
if (subproc->os_pages.first == NULL) {
|
||||
mi_lock(&subproc->os_abandoned_pages_lock) {
|
||||
if (subproc->os_abandoned_pages == NULL) {
|
||||
safe_to_delete = true;
|
||||
}
|
||||
mi_lock_release(&subproc->os_pages_lock);
|
||||
}
|
||||
if (!safe_to_delete) return;
|
||||
|
||||
|
@ -388,7 +387,7 @@ void mi_subproc_delete(mi_subproc_id_t subproc_id) {
|
|||
|
||||
// safe to release
|
||||
// todo: should we refcount subprocesses?
|
||||
mi_lock_done(&subproc->os_pages_lock);
|
||||
mi_lock_done(&subproc->os_abandoned_pages_lock);
|
||||
mi_lock_done(&subproc->arena_reserve_lock);
|
||||
_mi_meta_free(subproc, sizeof(mi_subproc_t), subproc->memid);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue