mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-07 15:59:32 +03:00
Hanlde large allocations while droping excess of segments
This commit is contained in:
parent
d19f103ea3
commit
5e7780cfbc
5 changed files with 93 additions and 25 deletions
|
@ -658,7 +658,7 @@ typedef struct mi_segment_stats_s {
|
||||||
_Atomic(size_t) reclaim_failed_count;
|
_Atomic(size_t) reclaim_failed_count;
|
||||||
_Atomic(size_t) allocated_count;
|
_Atomic(size_t) allocated_count;
|
||||||
_Atomic(size_t) freed_count;
|
_Atomic(size_t) freed_count;
|
||||||
mi_segment_alloc_counter_t alloc_stats[MI_BIN_HUGE+1];
|
mi_segment_alloc_counter_t alloc_stats[MI_BIN_FULL+1];
|
||||||
} mi_segment_stats_t;
|
} mi_segment_stats_t;
|
||||||
|
|
||||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||||
|
@ -705,6 +705,7 @@ typedef struct mi_os_tld_s {
|
||||||
typedef struct mi_segments_tld_s {
|
typedef struct mi_segments_tld_s {
|
||||||
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
||||||
mi_span_queue_t large_spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside large segments
|
mi_span_queue_t large_spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside large segments
|
||||||
|
mi_segment_t* large_segment;
|
||||||
size_t count; // current number of segments;
|
size_t count; // current number of segments;
|
||||||
size_t peak_count; // peak number of segments
|
size_t peak_count; // peak number of segments
|
||||||
size_t current_size; // current size of all segments
|
size_t current_size; // current size of all segments
|
||||||
|
|
29
src/heap.c
29
src/heap.c
|
@ -657,20 +657,31 @@ mi_segment_t* mi_segments_get_segment_to_drop_by_slice(mi_segments_tld_t* tld, s
|
||||||
const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment);
|
const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment);
|
||||||
|
|
||||||
static mi_segment_t* mi_heap_get_segment_to_drop(mi_heap_t* heap, size_t alloc_block_size) {
|
static mi_segment_t* mi_heap_get_segment_to_drop(mi_heap_t* heap, size_t alloc_block_size) {
|
||||||
mi_page_queue_t* fullPageQueue = &heap->pages[MI_BIN_FULL];
|
|
||||||
mi_segment_t* segment = NULL;
|
mi_segment_t* segment = NULL;
|
||||||
|
|
||||||
if (fullPageQueue->first != NULL) {
|
if ((alloc_block_size > MI_MEDIUM_OBJ_SIZE_MAX) && (heap->tld->segments.large_segment != NULL)) {
|
||||||
segment = _mi_ptr_segment(fullPageQueue->first);
|
return heap->tld->segments.large_segment;
|
||||||
int i = 0;
|
}
|
||||||
for (mi_page_t* page = fullPageQueue->first->next; page != NULL && i < 3; page = page->next, i++) {
|
|
||||||
mi_segment_t* temp_segment = _mi_ptr_segment(page);
|
int i = 0;
|
||||||
if (temp_segment->used > segment->used) {
|
mi_page_queue_t* fullPageQueue = &heap->pages[MI_BIN_FULL];
|
||||||
|
for (mi_page_t* page = fullPageQueue->first; page != NULL; page = page->next) {
|
||||||
|
mi_segment_t* temp_segment = _mi_ptr_segment(page);
|
||||||
|
if (!temp_segment->is_for_large_pages) {
|
||||||
|
if (segment == NULL) {
|
||||||
segment = temp_segment;
|
segment = temp_segment;
|
||||||
}
|
}
|
||||||
|
else if (temp_segment->used > segment->used) {
|
||||||
|
segment = temp_segment;
|
||||||
|
}
|
||||||
|
if (i > 3) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
|
if (segment == NULL) {
|
||||||
segment = mi_segments_get_segment_to_drop_by_slice(&heap->tld->segments, alloc_block_size);
|
segment = mi_segments_get_segment_to_drop_by_slice(&heap->tld->segments, alloc_block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,7 +756,7 @@ void mi_heap_drop_segment(mi_heap_t* heap, size_t targetSegmentCount, size_t all
|
||||||
|
|
||||||
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
||||||
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
||||||
_mi_abandoned_collect(heap, false /* force? */, &heap->tld->segments);
|
_mi_abandoned_collect(heap, true /* force? */, &heap->tld->segments);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,7 +137,7 @@ mi_decl_cache_align static const mi_tld_t tld_empty = {
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
NULL, NULL,
|
NULL, NULL,
|
||||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
|
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, MI_SEGMENT_SPAN_QUEUES_EMPTY, NULL, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
|
||||||
{ 0, tld_empty_stats }, // os
|
{ 0, tld_empty_stats }, // os
|
||||||
{ MI_STATS_NULL } // stats
|
{ MI_STATS_NULL } // stats
|
||||||
};
|
};
|
||||||
|
@ -154,7 +154,7 @@ extern mi_heap_t _mi_heap_main;
|
||||||
static mi_tld_t tld_main = {
|
static mi_tld_t tld_main = {
|
||||||
0, false,
|
0, false,
|
||||||
&_mi_heap_main, & _mi_heap_main,
|
&_mi_heap_main, & _mi_heap_main,
|
||||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
|
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, MI_SEGMENT_SPAN_QUEUES_EMPTY, NULL, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
|
||||||
{ 0, &tld_main.stats }, // os
|
{ 0, &tld_main.stats }, // os
|
||||||
{ MI_STATS_NULL } // stats
|
{ MI_STATS_NULL } // stats
|
||||||
};
|
};
|
||||||
|
|
|
@ -1059,6 +1059,9 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
||||||
// initialize initial free pages
|
// initialize initial free pages
|
||||||
if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
|
if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
|
||||||
mi_assert_internal(huge_page==NULL);
|
mi_assert_internal(huge_page==NULL);
|
||||||
|
if (segment->is_for_large_pages) {
|
||||||
|
tld->large_segment = segment;
|
||||||
|
}
|
||||||
mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
|
mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1103,6 +1106,10 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
||||||
// stats
|
// stats
|
||||||
_mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
|
_mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
|
||||||
|
|
||||||
|
if (segment == tld->large_segment) {
|
||||||
|
tld->large_segment = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// return it to the OS
|
// return it to the OS
|
||||||
mi_segment_os_free(segment, tld);
|
mi_segment_os_free(segment, tld);
|
||||||
mi_segment_increment_freed_stats();
|
mi_segment_increment_freed_stats();
|
||||||
|
@ -1275,6 +1282,10 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_atomic_or_acq_rel(&segment->free_space_mask, free_space_mask);
|
mi_atomic_or_acq_rel(&segment->free_space_mask, free_space_mask);
|
||||||
|
|
||||||
|
if (segment == tld->large_segment) {
|
||||||
|
tld->large_segment = NULL;
|
||||||
|
}
|
||||||
_mi_arena_segment_mark_abandoned(segment);
|
_mi_arena_segment_mark_abandoned(segment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1428,6 +1439,9 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
if (segment->is_for_large_pages) {
|
||||||
|
tld->large_segment = segment;
|
||||||
|
}
|
||||||
return segment;
|
return segment;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1474,6 +1488,10 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
|
|
||||||
mi_segment_t* segment;
|
mi_segment_t* segment;
|
||||||
size_t free_space_mask = mi_free_space_mask_from_blocksize(block_size);
|
size_t free_space_mask = mi_free_space_mask_from_blocksize(block_size);
|
||||||
|
bool is_large_allocation = block_size > MI_MEDIUM_OBJ_SIZE_MAX;
|
||||||
|
mi_segment_t* best_candidate_segment = NULL;
|
||||||
|
int candidates_to_check = 5;
|
||||||
|
|
||||||
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init2(heap, ¤t, free_space_mask);
|
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init2(heap, ¤t, free_space_mask);
|
||||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
||||||
{
|
{
|
||||||
|
@ -1495,10 +1513,31 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
// found a large enough free span, or a page of the right block_size with free space
|
// found a large enough free span, or a page of the right block_size with free space
|
||||||
// we return the result of reclaim (which is usually `segment`) as it might free
|
// we return the result of reclaim (which is usually `segment`) as it might free
|
||||||
// the segment due to concurrent frees (in which case `NULL` is returned).
|
// the segment due to concurrent frees (in which case `NULL` is returned).
|
||||||
mi_segment_t* segmentToReturn = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
if (segment->is_for_large_pages == is_large_allocation)
|
||||||
if (segmentToReturn != NULL) {
|
{
|
||||||
mi_segment_increment_reclaimed_stats();
|
mi_segment_t* segmentToReturn = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
||||||
return segmentToReturn;
|
if (segmentToReturn != NULL) {
|
||||||
|
if (best_candidate_segment != NULL) {
|
||||||
|
mi_segment_try_purge(best_candidate_segment, true /* true force? */, tld->stats);
|
||||||
|
_mi_arena_segment_mark_abandoned(best_candidate_segment);
|
||||||
|
}
|
||||||
|
mi_segment_increment_reclaimed_stats();
|
||||||
|
return segmentToReturn;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (best_candidate_segment == NULL) {
|
||||||
|
best_candidate_segment = segment;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_segment_try_purge(segment, true /* true force? */, tld->stats); // force purge if needed as we may not visit soon again
|
||||||
|
_mi_arena_segment_mark_abandoned(segment);
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates_to_check--;
|
||||||
|
if (candidates_to_check == 0) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (segment->abandoned_visits > 3 && is_suitable && mi_option_get_size(mi_option_max_segments_per_heap) == 0) {
|
else if (segment->abandoned_visits > 3 && is_suitable && mi_option_get_size(mi_option_max_segments_per_heap) == 0) {
|
||||||
|
@ -1512,6 +1551,11 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (best_candidate_segment != NULL) {
|
||||||
|
mi_segment_increment_reclaimed_stats();
|
||||||
|
return mi_segment_reclaim(best_candidate_segment, heap, block_size, reclaimed, tld);
|
||||||
|
}
|
||||||
|
|
||||||
mi_segment_increment_reclaim_failed_stats();
|
mi_segment_increment_reclaim_failed_stats();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
30
src/stats.c
30
src/stats.c
|
@ -27,23 +27,32 @@ void mi_init_segment_stats()
|
||||||
_mi_global_segment_stats.allocated_count = 0;
|
_mi_global_segment_stats.allocated_count = 0;
|
||||||
_mi_global_segment_stats.freed_count = 0;
|
_mi_global_segment_stats.freed_count = 0;
|
||||||
|
|
||||||
static_assert((MI_BIN_HUGE + 1) == sizeof(_mi_global_segment_stats.alloc_stats) / sizeof(_mi_global_segment_stats.alloc_stats[0]));
|
static_assert((MI_BIN_FULL + 1) == sizeof(_mi_global_segment_stats.alloc_stats) / sizeof(_mi_global_segment_stats.alloc_stats[0]));
|
||||||
for (int i = 0; i <= MI_BIN_HUGE; i++)
|
for (int i = 0; i <= MI_BIN_FULL; i++)
|
||||||
{
|
{
|
||||||
size_t block_size = _mi_bin_size((uint8_t)i);
|
size_t block_size = _mi_bin_size((uint8_t)i);
|
||||||
|
|
||||||
_mi_global_segment_stats.alloc_stats[i].counter = 0;
|
_mi_global_segment_stats.alloc_stats[i].counter = 0;
|
||||||
_mi_global_segment_stats.alloc_stats[i].block_size = block_size;
|
_mi_global_segment_stats.alloc_stats[i].block_size = block_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
// (MI_FREE_SPACE_MASK_BIT_COUNT-1) combines multiple block sizes. Set it INT32_MAX to distinguish from the rest.
|
|
||||||
_mi_global_segment_stats.alloc_stats[MI_FREE_SPACE_MASK_BIT_COUNT - 1].block_size = INT32_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint8_t mi_counter_index_from_block_size(size_t block_size)
|
||||||
|
{
|
||||||
|
uint8_t binIndex = 0;
|
||||||
|
|
||||||
|
if (block_size <= MI_LARGE_OBJ_SIZE_MAX){
|
||||||
|
binIndex = _mi_bin(block_size);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
binIndex = MI_BIN_FULL; // use the last element for Huge allocations
|
||||||
|
}
|
||||||
|
|
||||||
|
return binIndex;
|
||||||
|
}
|
||||||
void mi_segment_increment_alloc_stats(size_t block_size)
|
void mi_segment_increment_alloc_stats(size_t block_size)
|
||||||
{
|
{
|
||||||
uint8_t page_queue_index = _mi_bin(block_size);
|
uint8_t page_queue_index = mi_counter_index_from_block_size(block_size);
|
||||||
|
|
||||||
mi_atomic_increment_relaxed(&_mi_global_segment_stats.alloc_stats[page_queue_index].counter);
|
mi_atomic_increment_relaxed(&_mi_global_segment_stats.alloc_stats[page_queue_index].counter);
|
||||||
mi_atomic_increment_relaxed(&_mi_global_segment_stats.allocated_count);
|
mi_atomic_increment_relaxed(&_mi_global_segment_stats.allocated_count);
|
||||||
|
@ -117,17 +126,17 @@ int64_t mi_partitioned_counter_get_value(mi_partitioned_counter_t* counter)
|
||||||
return retVal;
|
return retVal;
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_partitioned_counter_t _mi_allocated_memory[MI_BIN_HUGE+1];
|
mi_partitioned_counter_t _mi_allocated_memory[MI_BIN_FULL+1];
|
||||||
|
|
||||||
void mi_allocation_stats_increment(size_t block_size)
|
void mi_allocation_stats_increment(size_t block_size)
|
||||||
{
|
{
|
||||||
uint8_t binIndex = _mi_bin(block_size);
|
uint8_t binIndex = mi_counter_index_from_block_size(block_size);
|
||||||
mi_partitioned_counter_increment(&_mi_allocated_memory[binIndex], block_size);
|
mi_partitioned_counter_increment(&_mi_allocated_memory[binIndex], block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mi_allocation_stats_decrement(size_t block_size)
|
void mi_allocation_stats_decrement(size_t block_size)
|
||||||
{
|
{
|
||||||
uint8_t binIndex = _mi_bin(block_size);
|
uint8_t binIndex = mi_counter_index_from_block_size(block_size);
|
||||||
mi_partitioned_counter_decrement(&_mi_allocated_memory[binIndex], block_size);
|
mi_partitioned_counter_decrement(&_mi_allocated_memory[binIndex], block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,6 +204,9 @@ bool mi_get_segment_stats(size_t* abandoned, size_t* reclaimed, size_t* reclaim_
|
||||||
allocated_memory[i].block_size = allocated_segments[i].block_size;
|
allocated_memory[i].block_size = allocated_segments[i].block_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// (MI_FREE_SPACE_MASK_BIT_COUNT-1) combines multiple block sizes. Set it INT32_MAX to distinguish from the rest.
|
||||||
|
free_space_in_segments[MI_FREE_SPACE_MASK_BIT_COUNT - 1].block_size = INT32_MAX;
|
||||||
|
|
||||||
mi_segment_update_free_space_stats(free_space_in_segments);
|
mi_segment_update_free_space_stats(free_space_in_segments);
|
||||||
mi_update_allocated_memory_stats(allocated_memory, allocated_memory_count);
|
mi_update_allocated_memory_stats(allocated_memory, allocated_memory_count);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue