diff --git a/cmake/mimalloc-config-version.cmake b/cmake/mimalloc-config-version.cmake index a44c121d..db881213 100644 --- a/cmake/mimalloc-config-version.cmake +++ b/cmake/mimalloc-config-version.cmake @@ -1,6 +1,6 @@ set(mi_version_major 2) set(mi_version_minor 1) -set(mi_version_patch 2) +set(mi_version_patch 4) set(mi_version ${mi_version_major}.${mi_version_minor}) set(PACKAGE_VERSION ${mi_version}) diff --git a/include/mimalloc.h b/include/mimalloc.h index 354aee58..11033c95 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 212 // major + 2 digits minor +#define MI_MALLOC_VERSION 214 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes @@ -332,18 +332,18 @@ typedef enum mi_option_e { mi_option_deprecated_segment_cache, mi_option_deprecated_page_reset, mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination - mi_option_deprecated_segment_reset, - mi_option_eager_commit_delay, + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) mi_option_os_tag, // tag used for OS logging (macOS only for now) mi_option_max_errors, // issue at most N error messages mi_option_max_warnings, // issue at most N warning messages - mi_option_max_segment_reclaim, + mi_option_max_segment_reclaim, mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe. mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit) - mi_option_arena_purge_mult, + mi_option_arena_purge_mult, mi_option_purge_extend_delay, mi_option_abandoned_reclaim_on_free, // reclaim abandoned segments on a free _mi_option_last, @@ -514,7 +514,7 @@ template struct _mi_heap_stl_allocator_common : publi protected: std::shared_ptr heap; template friend struct _mi_heap_stl_allocator_common; - + _mi_heap_stl_allocator_common() { mi_heap_t* hp = mi_heap_new(); this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ @@ -531,7 +531,7 @@ private: template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { using typename _mi_heap_stl_allocator_common::size_type; mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called - mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } @@ -548,7 +548,7 @@ template bool operator!=(const mi_heap_stl_allocator& x, template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { using typename _mi_heap_stl_allocator_common::size_type; mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called - mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 3d1154f1..a7712112 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -127,6 +127,7 @@ void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); bool _mi_arena_segment_clear_abandoned(mi_memid_t memid); void _mi_arena_segment_mark_abandoned(mi_memid_t memid); +size_t _mi_arena_segment_abandoned_count(void); typedef struct mi_arena_field_cursor_s { // abstract mi_arena_id_t start; diff --git a/src/arena.c b/src/arena.c index b4d07603..aa87b323 100644 --- a/src/arena.c +++ b/src/arena.c @@ -736,14 +736,18 @@ bool _mi_arena_contains(const void* p) { This is used to atomically abandon/reclaim segments (and crosses the arena API but it is convenient to have here). Abandoned segments still have live blocks; they get reclaimed - when a thread frees in it, or when a thread needs a fresh + when a thread frees a block in it, or when a thread needs a fresh segment; these threads scan the abandoned segments through the arena bitmaps. ----------------------------------------------------------- */ -// Maintain these for debug purposes +// Maintain a count of all abandoned segments static mi_decl_cache_align _Atomic(size_t)abandoned_count; +size_t _mi_arena_segment_abandoned_count(void) { + return mi_atomic_load_relaxed(&abandoned_count); +} + // reclaim a specific abandoned segment; `true` on success. bool _mi_arena_segment_clear_abandoned(mi_memid_t memid ) { @@ -888,7 +892,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int // consequetive bitmaps arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap - arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandonde bitmap + arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap // initialize committed bitmap? if (arena->blocks_committed != NULL && arena->memid.initially_committed) { diff --git a/src/options.c b/src/options.c index 2519f626..4706c8a8 100644 --- a/src/options.c +++ b/src/options.c @@ -81,7 +81,7 @@ static mi_option_desc_t options[_mi_option_last] = { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose { 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output { 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output - { 16, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try. + { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments per try. { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! #if (MI_INTPTR_SIZE>4) { 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time diff --git a/src/segment.c b/src/segment.c index 05c05285..eca50d46 100644 --- a/src/segment.c +++ b/src/segment.c @@ -1242,7 +1242,13 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice *reclaimed = false; mi_segment_t* segment; mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap,¤t); - long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 1024); // limit the work to bound allocation times + + // limit the tries to 10% (default) of the abandoned segments with at least 8 tries, and at most 1024. + const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100); + if (perc <= 0) return NULL; + const size_t abandoned_count = _mi_arena_segment_abandoned_count(); + const size_t relative_count = (abandoned_count > 10000 ? (abandoned_count / 100) * perc : (abandoned_count * perc) / 100); // avoid overflow + long max_tries = (long)(relative_count < 8 ? 8 : (relative_count > 1024 ? 1024 : relative_count)); while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { segment->abandoned_visits++;