mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-09 16:59:32 +03:00
Merge branch 'dev3' into dev3-bin
This commit is contained in:
commit
63e625d9f8
8 changed files with 39 additions and 28 deletions
|
@ -394,8 +394,8 @@ typedef enum mi_option_e {
|
||||||
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
|
mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
|
||||||
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
|
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
|
||||||
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
|
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
|
||||||
mi_option_page_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
|
mi_option_page_reclaim_on_free, // reclaim abandoned pages on a free (=0). -1 disallowr always, 0 allows if the page originated from the current heap, 1 allow always
|
||||||
mi_option_page_full_retain, // retain N full pages per size class (=2)
|
mi_option_page_full_retain, // retain N full (small) pages per size class (=2)
|
||||||
mi_option_page_max_candidates, // max candidate pages to consider for allocation (=4)
|
mi_option_page_max_candidates, // max candidate pages to consider for allocation (=4)
|
||||||
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
||||||
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
||||||
|
|
|
@ -492,7 +492,7 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
|
||||||
// 2-level page map:
|
// 2-level page map:
|
||||||
// double indirection, but low commit and low virtual reserve.
|
// double indirection, but low commit and low virtual reserve.
|
||||||
//
|
//
|
||||||
// the page-map is usually 4 MiB and points to sub maps of 64 KiB.
|
// the page-map is usually 4 MiB (for 48 bits virtual addresses) and points to sub maps of 64 KiB.
|
||||||
// the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well)
|
// the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well)
|
||||||
// one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space
|
// one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space
|
||||||
// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 4 MiB size.
|
// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 4 MiB size.
|
||||||
|
@ -519,7 +519,7 @@ static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
|
||||||
size_t sub_idx;
|
size_t sub_idx;
|
||||||
const size_t idx = _mi_page_map_index(p, &sub_idx);
|
const size_t idx = _mi_page_map_index(p, &sub_idx);
|
||||||
mi_page_t** const sub = _mi_page_map[idx];
|
mi_page_t** const sub = _mi_page_map[idx];
|
||||||
if mi_unlikely(sub == NULL) return NULL;
|
if mi_unlikely(sub == NULL) return (mi_page_t*)&_mi_page_empty;
|
||||||
return sub[sub_idx];
|
return sub[sub_idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
36
src/free.c
36
src/free.c
|
@ -123,6 +123,10 @@ static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, void* p) mi_
|
||||||
// free a pointer owned by another thread (page parameter comes first for better codegen)
|
// free a pointer owned by another thread (page parameter comes first for better codegen)
|
||||||
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, void* p) mi_attr_noexcept {
|
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, void* p) mi_attr_noexcept {
|
||||||
if (p==NULL) return; // a NULL pointer is seen as abandoned (tid==0) with a full flag set
|
if (p==NULL) return; // a NULL pointer is seen as abandoned (tid==0) with a full flag set
|
||||||
|
#if !MI_PAGE_MAP_FLAT
|
||||||
|
if (page==&_mi_page_empty) return; // an invalid pointer may lead to using the empty page
|
||||||
|
#endif
|
||||||
|
mi_assert_internal(p!=NULL && page != NULL && page != &_mi_page_empty);
|
||||||
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
||||||
mi_block_check_unguard(page, block, p);
|
mi_block_check_unguard(page, block, p);
|
||||||
mi_free_block_mt(page, block);
|
mi_free_block_mt(page, block);
|
||||||
|
@ -135,10 +139,9 @@ void mi_decl_noinline _mi_free_generic(mi_page_t* page, bool is_local, void* p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Get the segment data belonging to a pointer
|
// Get the page belonging to a pointer
|
||||||
// This is just a single `and` in release mode but does further checks in debug mode
|
// Does further checks in debug mode to see if this was a valid pointer.
|
||||||
// (and secure mode) to see if this was a valid pointer.
|
static inline mi_page_t* mi_validate_ptr_page(const void* p, const char* msg)
|
||||||
static inline mi_page_t* mi_checked_ptr_page(const void* p, const char* msg)
|
|
||||||
{
|
{
|
||||||
MI_UNUSED_RELEASE(msg);
|
MI_UNUSED_RELEASE(msg);
|
||||||
#if MI_DEBUG
|
#if MI_DEBUG
|
||||||
|
@ -146,9 +149,14 @@ static inline mi_page_t* mi_checked_ptr_page(const void* p, const char* msg)
|
||||||
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
|
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mi_page_t* const page = _mi_safe_ptr_page(p);
|
mi_page_t* page = _mi_safe_ptr_page(p);
|
||||||
if (page == NULL && p != NULL) {
|
if (page == NULL) {
|
||||||
_mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p);
|
if (p != NULL) {
|
||||||
|
_mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p);
|
||||||
|
}
|
||||||
|
#if !MI_PAGE_MAP_FLAT
|
||||||
|
page = (mi_page_t*)&_mi_page_empty;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
return page;
|
return page;
|
||||||
#else
|
#else
|
||||||
|
@ -160,12 +168,13 @@ static inline mi_page_t* mi_checked_ptr_page(const void* p, const char* msg)
|
||||||
// Fast path written carefully to prevent register spilling on the stack
|
// Fast path written carefully to prevent register spilling on the stack
|
||||||
void mi_free(void* p) mi_attr_noexcept
|
void mi_free(void* p) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
mi_page_t* const page = mi_checked_ptr_page(p,"mi_free");
|
mi_page_t* const page = mi_validate_ptr_page(p,"mi_free");
|
||||||
|
|
||||||
#if MI_PAGE_MAP_FLAT // if not flat, NULL will point to `_mi_page_empty` and get to `mi_free_generic_mt`
|
#if MI_PAGE_MAP_FLAT // if not flat, p==NULL leads to `_mi_page_empty` which leads to `mi_free_generic_mt`
|
||||||
if mi_unlikely(page==NULL) return;
|
if mi_unlikely(page==NULL) return;
|
||||||
#endif
|
#endif
|
||||||
|
mi_assert_internal(page!=NULL);
|
||||||
|
|
||||||
const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page));
|
const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page));
|
||||||
if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0`
|
if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0`
|
||||||
// thread-local, aligned, and not a full page
|
// thread-local, aligned, and not a full page
|
||||||
|
@ -211,7 +220,7 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
||||||
if (mi_page_all_free(page))
|
if (mi_page_all_free(page))
|
||||||
{
|
{
|
||||||
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
|
// first remove it from the abandoned pages in the arena (if mapped, this waits for any readers to finish)
|
||||||
_mi_arenas_page_unabandon(page);
|
_mi_arenas_page_unabandon(page);
|
||||||
// we can free the page directly
|
// we can free the page directly
|
||||||
_mi_arenas_page_free(page);
|
_mi_arenas_page_free(page);
|
||||||
return;
|
return;
|
||||||
|
@ -235,8 +244,9 @@ static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t*
|
||||||
// can we reclaim?
|
// can we reclaim?
|
||||||
if (heap != NULL && heap->allow_page_reclaim) {
|
if (heap != NULL && heap->allow_page_reclaim) {
|
||||||
if (heap == page->heap || // only reclaim if we were the originating heap,
|
if (heap == page->heap || // only reclaim if we were the originating heap,
|
||||||
(reclaim_on_free == 1 && // OR if the reclaim option across heaps is enabled
|
(reclaim_on_free == 1 && // OR if the reclaim across heaps is allowed
|
||||||
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
!mi_page_is_used_at_frac(page, 8) && // and the page is not too full
|
||||||
|
!heap->tld->is_in_threadpool && // and not part of a threadpool
|
||||||
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
_mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) // and the memory is suitable
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
|
@ -283,7 +293,7 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
|
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
|
||||||
const mi_page_t* const page = mi_checked_ptr_page(p,msg);
|
const mi_page_t* const page = mi_validate_ptr_page(p,msg);
|
||||||
if mi_unlikely(page==NULL) return 0;
|
if mi_unlikely(page==NULL) return 0;
|
||||||
if mi_likely(!mi_page_has_aligned(page)) {
|
if mi_likely(!mi_page_has_aligned(page)) {
|
||||||
const mi_block_t* block = (const mi_block_t*)p;
|
const mi_block_t* block = (const mi_block_t*)p;
|
||||||
|
|
|
@ -181,10 +181,10 @@ void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy,
|
||||||
heap->tag = heap_tag;
|
heap->tag = heap_tag;
|
||||||
if (heap->tld->is_in_threadpool) {
|
if (heap->tld->is_in_threadpool) {
|
||||||
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
|
// if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap.
|
||||||
heap->allow_page_reclaim = false;
|
// this is checked in `free.c:mi_free_try_collect_mt`
|
||||||
// .. but abandoning is good in this case: quarter the full page retain (possibly to 0)
|
// .. but abandoning is good in this case: halve the full page retain (possibly to 0)
|
||||||
// (so blocked threads do not hold on to too much memory)
|
// (so blocked threads do not hold on to too much memory)
|
||||||
if (heap->page_full_retain >= 0) {
|
if (heap->page_full_retain > 0) {
|
||||||
heap->page_full_retain = heap->page_full_retain / 4;
|
heap->page_full_retain = heap->page_full_retain / 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -168,7 +168,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
{ MI_DEFAULT_GUARDED_SAMPLE_RATE,
|
{ MI_DEFAULT_GUARDED_SAMPLE_RATE,
|
||||||
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
|
||||||
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
|
||||||
{ 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
|
{ 0, UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps
|
||||||
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
|
{ 2, UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues
|
||||||
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
|
{ 4, UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate
|
||||||
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
{ 0, UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits
|
||||||
|
|
|
@ -206,7 +206,7 @@ bool _mi_page_map_init(void) {
|
||||||
if (!mi_page_map_memid.initially_committed) {
|
if (!mi_page_map_memid.initially_committed) {
|
||||||
_mi_os_commit(&_mi_page_map[0], os_page_size, NULL); // commit first part of the map
|
_mi_os_commit(&_mi_page_map[0], os_page_size, NULL); // commit first part of the map
|
||||||
}
|
}
|
||||||
_mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved 2 subs at the end already
|
_mi_page_map[0] = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved 2 sub maps at the end already
|
||||||
if (!mi_page_map_memid.initially_committed) {
|
if (!mi_page_map_memid.initially_committed) {
|
||||||
_mi_os_commit(_mi_page_map[0], os_page_size, NULL); // only first OS page
|
_mi_os_commit(_mi_page_map[0], os_page_size, NULL); // only first OS page
|
||||||
}
|
}
|
||||||
|
@ -315,10 +315,10 @@ void _mi_page_map_unregister_range(void* start, size_t size) {
|
||||||
mi_page_map_set_range(NULL, idx, sub_idx, slice_count); // todo: avoid committing if not already committed?
|
mi_page_map_set_range(NULL, idx, sub_idx, slice_count); // todo: avoid committing if not already committed?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the empty page for the NULL pointer to match the behaviour of `_mi_ptr_page`
|
// Return NULL for invalid pointers
|
||||||
mi_page_t* _mi_safe_ptr_page(const void* p) {
|
mi_page_t* _mi_safe_ptr_page(const void* p) {
|
||||||
|
if (p==NULL) return NULL;
|
||||||
if mi_unlikely(p >= mi_page_map_max_address) return NULL;
|
if mi_unlikely(p >= mi_page_map_max_address) return NULL;
|
||||||
if (p == NULL) return (mi_page_t*)&_mi_page_empty; // to match `_mi_ptr_page` (see `mi_free` as well)
|
|
||||||
size_t sub_idx;
|
size_t sub_idx;
|
||||||
const size_t idx = _mi_page_map_index(p,&sub_idx);
|
const size_t idx = _mi_page_map_index(p,&sub_idx);
|
||||||
if mi_unlikely(!mi_page_map_is_committed(idx,NULL)) return NULL;
|
if mi_unlikely(!mi_page_map_is_committed(idx,NULL)) return NULL;
|
||||||
|
@ -328,7 +328,7 @@ mi_page_t* _mi_safe_ptr_page(const void* p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
|
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
|
||||||
return (p != NULL && _mi_safe_ptr_page(p) != NULL);
|
return (_mi_safe_ptr_page(p) != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -358,11 +358,11 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||||
|
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
if (heap->allow_page_abandon) {
|
if (heap->allow_page_abandon) {
|
||||||
// abandon full pages
|
// abandon full pages (this is the usual case in order to allow for sharing of memory between heaps)
|
||||||
_mi_page_abandon(page, pq);
|
_mi_page_abandon(page, pq);
|
||||||
}
|
}
|
||||||
else if (!mi_page_is_in_full(page)) {
|
else if (!mi_page_is_in_full(page)) {
|
||||||
// put full pages in a heap local queue
|
// put full pages in a heap local queue (this is for heaps that cannot abandon, for example, if the heap can be destroyed)
|
||||||
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
|
mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page);
|
||||||
_mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
_mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,8 @@ static void* zone_valloc(malloc_zone_t* zone, size_t size) {
|
||||||
|
|
||||||
static void zone_free(malloc_zone_t* zone, void* p) {
|
static void zone_free(malloc_zone_t* zone, void* p) {
|
||||||
MI_UNUSED(zone);
|
MI_UNUSED(zone);
|
||||||
mi_cfree(p);
|
// mi_cfree(p); // checked free as `zone_free` may be called with invalid pointers
|
||||||
|
mi_free(p); // with the page_map and pagemap_commit=1 we can use the regular free
|
||||||
}
|
}
|
||||||
|
|
||||||
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
|
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue