mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-01 09:14:38 +03:00
wip: can run mstress
This commit is contained in:
parent
d96c134566
commit
c9abfe8253
4 changed files with 19 additions and 13 deletions
|
@ -321,7 +321,12 @@ typedef struct mi_page_s {
|
|||
#define MI_PAGE_ALIGN MI_ARENA_SLICE_ALIGN // pages must be aligned on this for the page map.
|
||||
#define MI_PAGE_MIN_BLOCK_ALIGN (32) // minimal block alignment in a page
|
||||
#define MI_PAGE_MAX_OVERALLOC_ALIGN MI_ARENA_SLICE_SIZE // (64 KiB) limit for which we overallocate in arena pages, beyond this use OS allocation
|
||||
|
||||
#if MI_DEBUG && MI_SIZE_SIZE == 8
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
|
||||
#else
|
||||
#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*MI_PAGE_MIN_BLOCK_ALIGN) // >= sizeof(mi_page_t)
|
||||
#endif
|
||||
|
||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||
// (Except for large pages since huge objects are allocated in 4MiB chunks)
|
||||
|
|
|
@ -538,8 +538,10 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
|
|||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
|
||||
|
||||
mi_page_try_claim_ownership(page);
|
||||
mi_assert_internal(mi_page_block_size(page) == block_size);
|
||||
mi_assert_internal(mi_page_is_abandoned(page));
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@ -627,7 +629,6 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
size_t slice_count;
|
||||
mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count);
|
||||
|
||||
mi_assert_internal(!mi_page_is_singleton(page));
|
||||
mi_assert_internal(mi_bitmap_is_clearN(&arena->slices_free, slice_index, slice_count));
|
||||
mi_assert_internal(mi_bitmap_is_setN(&arena->slices_committed, slice_index, slice_count));
|
||||
mi_assert_internal(mi_bitmap_is_clearN(&arena->slices_purge, slice_index, slice_count));
|
||||
|
|
17
src/free.c
17
src/free.c
|
@ -158,13 +158,15 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
|
|||
mi_assert_internal(mi_page_thread_id(page)==0);
|
||||
|
||||
// we own the page now..
|
||||
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page); // this must be before collect
|
||||
|
||||
// collect the thread atomic free list
|
||||
_mi_page_free_collect(page, false); // update `used` count
|
||||
if (mi_page_is_singleton(page)) mi_assert_internal(mi_page_all_free(page));
|
||||
if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_all_free(page)); }
|
||||
|
||||
if (mi_page_all_free(page)) {
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page);
|
||||
// we can free the page directly
|
||||
_mi_arena_page_free(page);
|
||||
return;
|
||||
|
@ -186,17 +188,14 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
|
|||
(_mi_arena_memid_is_suitable(page->memid, tagheap->arena_id)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
|
||||
)
|
||||
{
|
||||
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
|
||||
_mi_arena_page_unabandon(page);
|
||||
// and make it part of our heap
|
||||
// make it part of our heap
|
||||
_mi_heap_page_reclaim(tagheap, page);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// give up ownership as we cannot reclaim this page
|
||||
// note: we don't need to re-abandon as we did not yet unabandon
|
||||
_mi_page_unown(page);
|
||||
// we cannot reclaim this page.. abandon it again
|
||||
_mi_arena_page_abandon(page);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||
mi_assert_internal(page->keys[0] != 0);
|
||||
#endif
|
||||
if (!mi_page_is_abandoned(page)) {
|
||||
mi_assert_internal(!_mi_process_is_initialized);
|
||||
//mi_assert_internal(!_mi_process_is_initialized);
|
||||
{
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
|
@ -734,7 +734,8 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
|
|||
mi_assert_internal(page->capacity == 0);
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->used == 0);
|
||||
mi_assert_internal(page->xthread_free == 0);
|
||||
mi_assert_internal(mi_page_is_owned(page));
|
||||
mi_assert_internal(page->xthread_free == 1);
|
||||
mi_assert_internal(page->next == NULL);
|
||||
mi_assert_internal(page->prev == NULL);
|
||||
mi_assert_internal(page->retire_expire == 0);
|
||||
|
|
Loading…
Add table
Reference in a new issue