mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
merge from dev
This commit is contained in:
commit
f79ea2461a
6 changed files with 17 additions and 19 deletions
|
@ -451,6 +451,7 @@ typedef struct mi_segment_s {
|
||||||
bool allow_decommit; // can we decommmit the memory
|
bool allow_decommit; // can we decommmit the memory
|
||||||
bool allow_purge; // can we purge the memory (reset or decommit)
|
bool allow_purge; // can we purge the memory (reset or decommit)
|
||||||
size_t segment_size;
|
size_t segment_size;
|
||||||
|
mi_subproc_t* subproc; // segment belongs to sub process
|
||||||
|
|
||||||
// segment fields
|
// segment fields
|
||||||
mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
|
mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
|
||||||
|
@ -465,7 +466,6 @@ typedef struct mi_segment_s {
|
||||||
size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
|
size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
|
||||||
size_t used; // count of pages in use
|
size_t used; // count of pages in use
|
||||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||||
mi_subproc_t* subproc; // segment belongs to sub process
|
|
||||||
|
|
||||||
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
|
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
|
||||||
struct mi_segment_s* abandoned_os_prev;
|
struct mi_segment_s* abandoned_os_prev;
|
||||||
|
|
|
@ -162,8 +162,9 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
|
||||||
mi_arena_t* arena = mi_arena_from_index(arena_idx);
|
mi_arena_t* arena = mi_arena_from_index(arena_idx);
|
||||||
mi_assert_internal(arena != NULL);
|
mi_assert_internal(arena != NULL);
|
||||||
// set abandonment atomically
|
// set abandonment atomically
|
||||||
|
mi_subproc_t* const subproc = segment->subproc; // don't access the segment after setting it abandoned
|
||||||
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL);
|
||||||
if (was_unmarked) { mi_atomic_increment_relaxed(&segment->subproc->abandoned_count); }
|
if (was_unmarked) { mi_atomic_increment_relaxed(&subproc->abandoned_count); }
|
||||||
mi_assert_internal(was_unmarked);
|
mi_assert_internal(was_unmarked);
|
||||||
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx));
|
||||||
}
|
}
|
||||||
|
|
|
@ -631,6 +631,9 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
||||||
if (size==0) return;
|
if (size==0) return;
|
||||||
const bool all_committed = (committed_size == size);
|
const bool all_committed = (committed_size == size);
|
||||||
|
|
||||||
|
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
|
||||||
|
mi_track_mem_undefined(p,size);
|
||||||
|
|
||||||
if (mi_memkind_is_os(memid.memkind)) {
|
if (mi_memkind_is_os(memid.memkind)) {
|
||||||
// was a direct OS allocation, pass through
|
// was a direct OS allocation, pass through
|
||||||
if (!all_committed && committed_size > 0) {
|
if (!all_committed && committed_size > 0) {
|
||||||
|
@ -660,9 +663,6 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
|
|
||||||
mi_track_mem_undefined(p,size);
|
|
||||||
|
|
||||||
// potentially decommit
|
// potentially decommit
|
||||||
if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
|
if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
|
||||||
mi_assert_internal(all_committed);
|
mi_assert_internal(all_committed);
|
||||||
|
|
|
@ -193,7 +193,7 @@ static void mi_heap_main_init(void) {
|
||||||
#endif
|
#endif
|
||||||
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
|
||||||
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
|
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
|
||||||
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
|
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
|
||||||
}
|
}
|
||||||
|
@ -363,7 +363,7 @@ static bool _mi_thread_heap_init(void) {
|
||||||
mi_heap_t* heap = &td->heap;
|
mi_heap_t* heap = &td->heap;
|
||||||
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
|
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
|
||||||
_mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
|
_mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */);
|
||||||
_mi_heap_set_default_direct(heap);
|
_mi_heap_set_default_direct(heap);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -857,6 +857,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
|
||||||
segment->allow_decommit = !memid.is_pinned;
|
segment->allow_decommit = !memid.is_pinned;
|
||||||
segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
|
segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0);
|
||||||
segment->segment_size = segment_size;
|
segment->segment_size = segment_size;
|
||||||
|
segment->subproc = tld->subproc;
|
||||||
segment->commit_mask = commit_mask;
|
segment->commit_mask = commit_mask;
|
||||||
segment->purge_expire = 0;
|
segment->purge_expire = 0;
|
||||||
mi_commit_mask_create_empty(&segment->purge_mask);
|
mi_commit_mask_create_empty(&segment->purge_mask);
|
||||||
|
@ -903,7 +904,6 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
||||||
segment->segment_info_slices = info_slices;
|
segment->segment_info_slices = info_slices;
|
||||||
segment->thread_id = _mi_thread_id();
|
segment->thread_id = _mi_thread_id();
|
||||||
segment->cookie = _mi_ptr_cookie(segment);
|
segment->cookie = _mi_ptr_cookie(segment);
|
||||||
segment->subproc = tld->subproc;
|
|
||||||
segment->slice_entries = slice_entries;
|
segment->slice_entries = slice_entries;
|
||||||
segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
|
segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
|
||||||
|
|
||||||
|
|
|
@ -25,17 +25,14 @@ terms of the MIT license.
|
||||||
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
||||||
//
|
//
|
||||||
// argument defaults
|
// argument defaults
|
||||||
|
#if !defined(MI_TSAN)
|
||||||
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
||||||
static int SCALE = 25; // scaling factor
|
#else // with thread-sanitizer reduce the defaults for azure pipeline limits
|
||||||
|
static int THREADS = 8;
|
||||||
#if defined(MI_TSAN)
|
|
||||||
static int ITER = 10; // N full iterations destructing and re-creating all threads (on tsan reduce for azure pipeline limits)
|
|
||||||
#else
|
|
||||||
static int ITER = 50; // N full iterations destructing and re-creating all threads
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// static int THREADS = 8; // more repeatable if THREADS <= #processors
|
static int SCALE = 25; // scaling factor
|
||||||
// static int SCALE = 100; // scaling factor
|
static int ITER = 50; // N full iterations destructing and re-creating all threads
|
||||||
|
|
||||||
#define STRESS // undefine for leak test
|
#define STRESS // undefine for leak test
|
||||||
|
|
||||||
|
@ -133,9 +130,9 @@ static void free_items(void* p) {
|
||||||
custom_free(p);
|
custom_free(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HEAP_WALK
|
#ifdef HEAP_WALK
|
||||||
static bool visit_blocks(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
|
static bool visit_blocks(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
|
||||||
(void)(heap); (void)(area);
|
(void)(heap); (void)(area);
|
||||||
size_t* total = (size_t*)arg;
|
size_t* total = (size_t*)arg;
|
||||||
if (block != NULL) {
|
if (block != NULL) {
|
||||||
*total += block_size;
|
*total += block_size;
|
||||||
|
@ -260,7 +257,7 @@ static void test_leak(void) {
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
#ifdef HEAP_WALK
|
#ifdef HEAP_WALK
|
||||||
mi_option_enable(mi_option_visit_abandoned);
|
mi_option_enable(mi_option_visit_abandoned);
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */);
|
mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */);
|
||||||
|
|
Loading…
Add table
Reference in a new issue