mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
wip: work on purgable arenas
This commit is contained in:
parent
f4e006fa76
commit
c344bf5c20
2 changed files with 11 additions and 4 deletions
|
@ -16,7 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
|
||||||
#include "./bitmap.h" // atomic bitmap
|
#include "./bitmap.h" // atomic bitmap
|
||||||
|
|
||||||
//#define MI_CACHE_DISABLE 1 // define to completely disable the segment cache
|
// #define MI_CACHE_DISABLE 1 // define to completely disable the segment cache
|
||||||
|
|
||||||
#define MI_CACHE_FIELDS (16)
|
#define MI_CACHE_FIELDS (16)
|
||||||
#define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit
|
#define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit
|
||||||
|
|
|
@ -11,6 +11,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#include <string.h> // memset
|
#include <string.h> // memset
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#define MI_USE_SEGMENT_CACHE 0
|
||||||
#define MI_PAGE_HUGE_ALIGN (256*1024)
|
#define MI_PAGE_HUGE_ALIGN (256*1024)
|
||||||
|
|
||||||
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
||||||
|
@ -394,8 +395,10 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
|
|
||||||
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
||||||
const size_t size = mi_segment_size(segment);
|
const size_t size = mi_segment_size(segment);
|
||||||
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE || // only push regular segments on the cache
|
#if MI_USE_SEGMENT_CACHE
|
||||||
!_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE // only push regular segments on the cache
|
||||||
|
|| !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (!segment->mem_is_pinned) {
|
if (!segment->mem_is_pinned) {
|
||||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||||
|
@ -809,10 +812,14 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
|
||||||
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
|
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
|
||||||
mi_segment_t* segment = NULL;
|
mi_segment_t* segment = NULL;
|
||||||
|
|
||||||
|
#if MI_USE_SEGMENT_CACHE
|
||||||
// get from cache?
|
// get from cache?
|
||||||
if (page_alignment == 0) {
|
if (page_alignment == 0) {
|
||||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, mem_large, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
|
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, mem_large, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
MI_UNUSED(pdecommit_mask);
|
||||||
|
#endif
|
||||||
|
|
||||||
// get from OS
|
// get from OS
|
||||||
if (segment==NULL) {
|
if (segment==NULL) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue