use atomic read/write on the page->heap field where concurrent interaction is possible

This commit is contained in:
Daan Leijen 2019-11-20 13:19:17 -08:00
parent 94bfb47725
commit 4d4a2885f5
3 changed files with 18 additions and 11 deletions

View file

@ -235,7 +235,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
} }
else { else {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* heap = page->heap; mi_heap_t* heap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap));
mi_assert_internal(heap != NULL); mi_assert_internal(heap != NULL);
if (heap != NULL) { if (heap != NULL) {
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)

View file

@ -260,7 +260,7 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
page->heap->page_count--; page->heap->page_count--;
page->next = NULL; page->next = NULL;
page->prev = NULL; page->prev = NULL;
page->heap = NULL; mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL);
mi_page_set_in_full(page,false); mi_page_set_in_full(page,false);
} }
@ -274,7 +274,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue)); mi_page_set_in_full(page, mi_page_queue_is_full(queue));
page->heap = heap; mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), heap);
page->next = queue->first; page->next = queue->first;
page->prev = NULL; page->prev = NULL;
if (queue->first != NULL) { if (queue->first != NULL) {
@ -338,7 +338,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
// set append pages to new heap and count // set append pages to new heap and count
size_t count = 0; size_t count = 0;
for (mi_page_t* page = append->first; page != NULL; page = page->next) { for (mi_page_t* page = append->first; page != NULL; page = page->next) {
page->heap = heap; mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), heap);
count++; count++;
} }

View file

@ -343,18 +343,24 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(page->heap != NULL); mi_assert_internal(page->heap != NULL);
_mi_page_use_delayed_free(page,MI_NEVER_DELAYED_FREE); #if MI_DEBUG > 1
mi_heap_t* pheap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap));
#endif
// remove from our page list
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
mi_page_queue_remove(pq, page);
// page is no longer associated with our heap
mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL);
#if MI_DEBUG>1 #if MI_DEBUG>1
// check there are no references left.. // check there are no references left..
for (mi_block_t* block = (mi_block_t*)page->heap->thread_delayed_free; block != NULL; block = mi_block_nextx(page->heap->cookie,block)) { for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap->cookie, block)) {
mi_assert_internal(_mi_ptr_page(block) != page); mi_assert_internal(_mi_ptr_page(block) != page);
} }
#endif #endif
// and then remove from our page list
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
mi_page_queue_remove(pq, page);
// and abandon it // and abandon it
mi_assert_internal(page->heap == NULL); mi_assert_internal(page->heap == NULL);
_mi_segment_page_abandon(page,segments_tld); _mi_segment_page_abandon(page,segments_tld);
@ -755,7 +761,8 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE); mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1); mi_assert_internal(_mi_page_segment(page)->used==1);
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
page->heap = NULL; mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL);
if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
_mi_stat_increase(&heap->tld->stats.giant, block_size); _mi_stat_increase(&heap->tld->stats.giant, block_size);
_mi_stat_counter_increase(&heap->tld->stats.giant_count, 1); _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);