remove os_tld and stats parameters to os interface

This commit is contained in:
daanx 2024-12-08 17:56:13 -08:00
parent d9a2f76ff7
commit c8607a8d01
12 changed files with 270 additions and 294 deletions

View file

@ -53,89 +53,100 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_externc #define mi_decl_externc
#endif #endif
// "libc.c"
#include <stdarg.h>
void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
char _mi_toupper(char c);
int _mi_strnicmp(const char* s, const char* t, size_t n);
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
size_t _mi_strlen(const char* s);
size_t _mi_strnlen(const char* s, size_t max_len);
bool _mi_getenv(const char* name, char* result, size_t result_size);
// "options.c" // "options.c"
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
void _mi_warning_message(const char* fmt, ...); void _mi_warning_message(const char* fmt, ...);
void _mi_verbose_message(const char* fmt, ...); void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...); void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void); void _mi_options_init(void);
long _mi_option_get_fast(mi_option_t option); long _mi_option_get_fast(mi_option_t option);
void _mi_error_message(int err, const char* fmt, ...); void _mi_error_message(int err, const char* fmt, ...);
// random.c // random.c
void _mi_random_init(mi_random_ctx_t* ctx); void _mi_random_init(mi_random_ctx_t* ctx);
void _mi_random_init_weak(mi_random_ctx_t* ctx); void _mi_random_init_weak(mi_random_ctx_t* ctx);
void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
uintptr_t _mi_random_next(mi_random_ctx_t* ctx); uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
uintptr_t _mi_heap_random_next(mi_heap_t* heap); uintptr_t _mi_heap_random_next(mi_heap_t* heap);
uintptr_t _mi_os_random_weak(uintptr_t extra_seed); uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x); static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c // init.c
extern mi_decl_cache_align mi_stats_t _mi_stats_main; extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty; extern mi_decl_cache_align const mi_page_t _mi_page_empty;
void _mi_process_load(void); void _mi_process_load(void);
void mi_cdecl _mi_process_done(void); void mi_cdecl _mi_process_done(void);
bool _mi_is_redirected(void); bool _mi_is_redirected(void);
bool _mi_allocator_init(const char** message); bool _mi_allocator_init(const char** message);
void _mi_allocator_done(void); void _mi_allocator_done(void);
bool _mi_is_main_thread(void); bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void); size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not initialized yet bool _mi_preloading(void); // true while the C runtime is not initialized yet
void _mi_thread_done(mi_heap_t* heap); void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void); void _mi_thread_data_collect(void);
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id); mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
void _mi_heap_guarded_init(mi_heap_t* heap); void _mi_heap_guarded_init(mi_heap_t* heap);
// os.c // os.c
void _mi_os_init(void); // called from process init void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); void* _mi_os_alloc(size_t size, mi_memid_t* memid);
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free(void* p, size_t size, mi_memid_t memid);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
size_t _mi_os_page_size(void); size_t _mi_os_page_size(void);
size_t _mi_os_good_alloc_size(size_t size); size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void); bool _mi_os_has_overcommit(void);
bool _mi_os_has_virtual_reserve(void); bool _mi_os_has_virtual_reserve(void);
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); bool _mi_os_reset(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); bool _mi_os_commit(void* p, size_t size, bool* is_zero);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_protect(void* addr, size_t size); bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size); bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); bool _mi_os_purge(void* p, size_t size);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats); void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats); void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
bool _mi_os_use_large_page(size_t size, size_t alignment); bool _mi_os_use_large_page(size_t size, size_t alignment);
size_t _mi_os_large_page_size(void); size_t _mi_os_large_page_size(void);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats); void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid);
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
bool _mi_arena_contains(const void* p); bool _mi_arena_contains(const void* p);
void _mi_arenas_collect(bool force_purge, mi_stats_t* stats); void _mi_arenas_collect(bool force_purge);
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); void _mi_arena_unsafe_destroy_all(void);
bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment); bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
void _mi_arena_segment_mark_abandoned(mi_segment_t* segment); void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid); void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size); void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
typedef struct mi_arena_field_cursor_s { // abstract struct typedef struct mi_arena_field_cursor_s { // abstract struct
size_t os_list_count; // max entries to visit in the OS abandoned list size_t os_list_count; // max entries to visit in the OS abandoned list
@ -151,63 +162,63 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr
void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current); void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current);
// "segment-map.c" // "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment); void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment); void _mi_segment_map_freed_at(const mi_segment_t* segment);
// "segment.c" // "segment.c"
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#else #else
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif #endif
void _mi_segments_collect(bool force, mi_segments_tld_t* tld); void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment); bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// "page.c" // "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page); void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
void _mi_page_force_abandon(mi_page_t* page); void _mi_page_force_abandon(mi_page_t* page);
void _mi_heap_delayed_free_all(mi_heap_t* heap); void _mi_heap_delayed_free_all(mi_heap_t* heap);
bool _mi_heap_delayed_free_partial(mi_heap_t* heap); bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force); void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force); void _mi_deferred_free(mi_heap_t* heap, bool force);
void _mi_page_free_collect(mi_page_t* page,bool force); void _mi_page_free_collect(mi_page_t* page,bool force);
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
size_t _mi_bin_size(uint8_t bin); // for stats size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats uint8_t _mi_bin(size_t size); // for stats
// "heap.c" // "heap.c"
void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
void _mi_heap_destroy_pages(mi_heap_t* heap); void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap); void _mi_heap_set_default_direct(mi_heap_t* heap);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
void _mi_heap_unsafe_destroy_all(void); void _mi_heap_unsafe_destroy_all(void);
mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page); void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg); bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
// "stats.c" // "stats.c"
void _mi_stats_done(mi_stats_t* stats); void _mi_stats_done(mi_stats_t* stats);
mi_msecs_t _mi_clock_now(void); mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start); mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void); mi_msecs_t _mi_clock_start(void);
@ -224,18 +235,6 @@ bool _mi_free_delayed_block(mi_block_t* block);
void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
// "libc.c"
#include <stdarg.h>
void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
char _mi_toupper(char c);
int _mi_strnicmp(const char* s, const char* t, size_t n);
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
size_t _mi_strlen(const char* s);
size_t _mi_strnlen(const char* s, size_t max_len);
bool _mi_getenv(const char* name, char* result, size_t result_size);
#if MI_DEBUG>1 #if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page); bool _mi_page_is_valid(mi_page_t* page);
#endif #endif
@ -806,13 +805,13 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
// Optimize numa node access for the common case (= one node) // Optimize numa node access for the common case (= one node)
// ------------------------------------------------------------------- // -------------------------------------------------------------------
int _mi_os_numa_node_get(mi_os_tld_t* tld); int _mi_os_numa_node_get(void);
size_t _mi_os_numa_node_count_get(void); size_t _mi_os_numa_node_count_get(void);
extern _Atomic(size_t) _mi_numa_node_count; extern _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) { static inline int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get(tld); else return _mi_os_numa_node_get();
} }
static inline size_t _mi_os_numa_node_count(void) { static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);

View file

@ -656,12 +656,6 @@ typedef struct mi_segment_queue_s {
mi_segment_t* last; mi_segment_t* last;
} mi_segment_queue_t; } mi_segment_queue_t;
// OS thread local data
typedef struct mi_os_tld_s {
size_t region_idx; // start point for next allocation
mi_stats_t* stats; // points to tld stats
} mi_os_tld_t;
// Segments thread local data // Segments thread local data
typedef struct mi_segments_tld_s { typedef struct mi_segments_tld_s {
mi_segment_queue_t small_free; // queue of segments with free small pages mi_segment_queue_t small_free; // queue of segments with free small pages
@ -674,7 +668,6 @@ typedef struct mi_segments_tld_s {
size_t reclaim_count;// number of reclaimed (abandoned) segments size_t reclaim_count;// number of reclaimed (abandoned) segments
mi_subproc_t* subproc; // sub-process this thread belongs to. mi_subproc_t* subproc; // sub-process this thread belongs to.
mi_stats_t* stats; // points to tld stats mi_stats_t* stats; // points to tld stats
mi_os_tld_t* os; // points to os tld
} mi_segments_tld_t; } mi_segments_tld_t;
// Thread local data // Thread local data
@ -684,7 +677,6 @@ struct mi_tld_s {
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_segments_tld_t segments; // segment tld mi_segments_tld_t segments; // segment tld
mi_os_tld_t os; // os tld
mi_stats_t stats; // statistics mi_stats_t stats; // statistics
}; };

View file

@ -186,7 +186,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
if (p != NULL) return p; if (p != NULL) return p;
// or fall back to the OS // or fall back to the OS
p = _mi_os_alloc(size, memid, &_mi_stats_main); p = _mi_os_alloc(size, memid);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// zero the OS memory if needed // zero the OS memory if needed
@ -199,7 +199,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) { void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
if (mi_memkind_is_os(memid.memkind)) { if (mi_memkind_is_os(memid.memkind)) {
_mi_os_free(p, size, memid, &_mi_stats_main); _mi_os_free(p, size, memid);
} }
else { else {
mi_assert(memid.memkind == MI_MEM_STATIC); mi_assert(memid.memkind == MI_MEM_STATIC);
@ -216,10 +216,10 @@ void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
----------------------------------------------------------- */ ----------------------------------------------------------- */
// claim the `blocks_inuse` bits // claim the `blocks_inuse` bits
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{ {
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) { if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true; return true;
}; };
@ -232,13 +232,13 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool commit, mi_memid_t* memid, mi_os_tld_t* tld) bool commit, mi_memid_t* memid)
{ {
MI_UNUSED(arena_index); MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
mi_bitmap_index_t bitmap_index; mi_bitmap_index_t bitmap_index;
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL; if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it! // claimed it!
void* p = mi_arena_block_start(arena, bitmap_index); void* p = mi_arena_block_start(arena, bitmap_index);
@ -268,7 +268,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) { if (any_uncommitted) {
bool commit_zero = false; bool commit_zero = false;
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) { if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
memid->initially_committed = false; memid->initially_committed = false;
} }
else { else {
@ -286,7 +286,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// allocate in a speficic arena // allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
{ {
MI_UNUSED_RELEASE(alignment); MI_UNUSED_RELEASE(alignment);
mi_assert(alignment <= MI_SEGMENT_ALIGN); mi_assert(alignment <= MI_SEGMENT_ALIGN);
@ -307,7 +307,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
} }
// try to allocate // try to allocate
void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld); void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment)); mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
return p; return p;
} }
@ -316,7 +316,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS // allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid )
{ {
MI_UNUSED(alignment); MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -326,21 +326,21 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
if (req_arena_id != _mi_arena_id_none()) { if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested // try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) { if (mi_arena_id_index(req_arena_id) < max_arena) {
void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p; if (p != NULL) return p;
} }
} }
else { else {
// try numa affine allocation // try numa affine allocation
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p; if (p != NULL) return p;
} }
// try from another numa node instead.. // try from another numa node instead..
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p; if (p != NULL) return p;
} }
} }
@ -385,18 +385,18 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) mi_arena_id_t req_arena_id, mi_memid_t* memid)
{ {
mi_assert_internal(memid != NULL && tld != NULL); mi_assert_internal(memid != NULL);
mi_assert_internal(size > 0); mi_assert_internal(size > 0);
*memid = _mi_memid_none(); *memid = _mi_memid_none();
const int numa_node = _mi_os_numa_node(tld); // current numa node const int numa_node = _mi_os_numa_node(); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed? if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed?
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p; if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena // otherwise, try to first eagerly reserve a new arena
@ -405,7 +405,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
// and try allocate in there // and try allocate in there
mi_assert_internal(req_arena_id == _mi_arena_id_none()); mi_assert_internal(req_arena_id == _mi_arena_id_none());
p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p; if (p != NULL) return p;
} }
} }
@ -420,16 +420,16 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// finally, fall back to the OS // finally, fall back to the OS
if (align_offset > 0) { if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
} }
else { else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
} }
} }
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
{ {
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld); return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
} }
@ -455,7 +455,7 @@ static long mi_arena_purge_delay(void) {
// reset or decommit in an arena and update the committed/decommit bitmaps // reset or decommit in an arena and update the committed/decommit bitmaps
// assumes we own the area (i.e. blocks_in_use is claimed by us) // assumes we own the area (i.e. blocks_in_use is claimed by us)
static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned); mi_assert_internal(!arena->memid.is_pinned);
@ -464,7 +464,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
bool needs_recommit; bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely // all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats); needs_recommit = _mi_os_purge(p, size);
} }
else { else {
// some blocks are not committed -- this can happen when a partially committed block is freed // some blocks are not committed -- this can happen when a partially committed block is freed
@ -472,7 +472,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted) // and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */);
if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); } if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
} }
@ -486,14 +486,14 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. // Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
// Note: assumes we (still) own the area as we may purge immediately // Note: assumes we (still) own the area as we may purge immediately
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_purge != NULL); mi_assert_internal(arena->blocks_purge != NULL);
const long delay = mi_arena_purge_delay(); const long delay = mi_arena_purge_delay();
if (delay < 0) return; // is purging allowed at all? if (delay < 0) return; // is purging allowed at all?
if (_mi_preloading() || delay == 0) { if (_mi_preloading() || delay == 0) {
// decommit directly // decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats); mi_arena_purge(arena, bitmap_idx, blocks);
} }
else { else {
// schedule decommit // schedule decommit
@ -511,7 +511,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
// purge a range of blocks // purge a range of blocks
// return true if the full range was purged. // return true if the full range was purged.
// assumes we own the area (i.e. blocks_in_use is claimed by us) // assumes we own the area (i.e. blocks_in_use is claimed by us)
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) { static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
const size_t endidx = startidx + bitlen; const size_t endidx = startidx + bitlen;
size_t bitidx = startidx; size_t bitidx = startidx;
bool all_purged = false; bool all_purged = false;
@ -524,7 +524,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
if (count > 0) { if (count > 0) {
// found range to be purged // found range to be purged
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx); const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
mi_arena_purge(arena, range_idx, count, stats); mi_arena_purge(arena, range_idx, count);
if (count == bitlen) { if (count == bitlen) {
all_purged = true; all_purged = true;
} }
@ -535,7 +535,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
} }
// returns true if anything was purged // returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{ {
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@ -571,7 +571,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (bitlen > 0) { if (bitlen > 0) {
// read purge again now that we have the in_use bits // read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]); purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) { if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
full_purge = false; full_purge = false;
} }
any_purged = true; any_purged = true;
@ -591,7 +591,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
return any_purged; return any_purged;
} }
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) { static void mi_arenas_try_purge( bool force, bool visit_all ) {
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count); const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
@ -606,7 +606,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
for (size_t i = 0; i < max_arena; i++) { for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) { if (arena != NULL) {
if (mi_arena_try_purge(arena, now, force, stats)) { if (mi_arena_try_purge(arena, now, force)) {
if (max_purge_count <= 1) break; if (max_purge_count <= 1) break;
max_purge_count--; max_purge_count--;
} }
@ -620,8 +620,8 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free Arena free
----------------------------------------------------------- */ ----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) { void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
mi_assert_internal(size > 0 && stats != NULL); mi_assert_internal(size > 0);
mi_assert_internal(committed_size <= size); mi_assert_internal(committed_size <= size);
if (p==NULL) return; if (p==NULL) return;
if (size==0) return; if (size==0) return;
@ -636,7 +636,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size) // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size); _mi_stat_decrease(&_mi_stats_main.committed, committed_size);
} }
_mi_os_free(p, size, memid, stats); _mi_os_free(p, size, memid);
} }
else if (memid.memkind == MI_MEM_ARENA) { else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena // allocated in an arena
@ -681,7 +681,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts). // works (as we should never reset decommitted parts).
} }
// (delay) purge the entire range // (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); mi_arena_schedule_purge(arena, bitmap_idx, blocks);
} }
// and make it available to others again // and make it available to others again
@ -697,7 +697,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
} }
// purge expired decommits // purge expired decommits
mi_arenas_try_purge(false, false, stats); mi_arenas_try_purge(false, false);
} }
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
@ -711,7 +711,7 @@ static void mi_arenas_unsafe_destroy(void) {
mi_lock_done(&arena->abandoned_visit_lock); mi_lock_done(&arena->abandoned_visit_lock);
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
_mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); _mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
} }
else { else {
new_max_arena = i; new_max_arena = i;
@ -726,15 +726,15 @@ static void mi_arenas_unsafe_destroy(void) {
} }
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired // Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) { void _mi_arenas_collect(bool force_purge) {
mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats); mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
} }
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` // destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory. // for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { void _mi_arena_unsafe_destroy_all(void) {
mi_arenas_unsafe_destroy(); mi_arenas_unsafe_destroy();
_mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
} }
// Is a pointer inside any of our arenas? // Is a pointer inside any of our arenas?
@ -838,11 +838,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
if (arena_id != NULL) *arena_id = _mi_arena_id_none(); if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
mi_memid_t memid; mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main); void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM; if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field? const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) { if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
_mi_os_free_ex(start, size, commit, memid, &_mi_stats_main); _mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM; return ENOMEM;
} }
@ -938,7 +938,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) { if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
_mi_os_free(p, hsize, memid, &_mi_stats_main); _mi_os_free(p, hsize, memid);
return ENOMEM; return ENOMEM;
} }
return 0; return 0;

View file

@ -182,7 +182,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Try to atomically claim a sequence of `count` bits starting from the field // Try to atomically claim a sequence of `count` bits starting from the field
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success. // at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`) // Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
{ {
mi_assert_internal(bitmap_idx != NULL); mi_assert_internal(bitmap_idx != NULL);
@ -242,7 +242,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// claimed! // claimed!
mi_stat_counter_increase(stats->arena_crossover_count,1); mi_stat_counter_increase(_mi_stats_main.arena_crossover_count,1);
*bitmap_idx = mi_bitmap_index_create(idx, initial_idx); *bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
return true; return true;
@ -262,10 +262,10 @@ rollback:
newmap = (map & ~initial_mask); newmap = (map & ~initial_mask);
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
} }
mi_stat_counter_increase(stats->arena_rollback_count,1); mi_stat_counter_increase(_mi_stats_main.arena_rollback_count,1);
// retry? (we make a recursive call instead of goto to be able to use const declarations) // retry? (we make a recursive call instead of goto to be able to use const declarations)
if (retries <= 2) { if (retries <= 2) {
return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats); return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
} }
else { else {
return false; return false;
@ -275,7 +275,7 @@ rollback:
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. // Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields. // Starts at idx, and wraps around to search in all `bitmap_fields` fields.
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) { bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
mi_assert_internal(count > 0); mi_assert_internal(count > 0);
if (count <= 2) { if (count <= 2) {
// we don't bother with crossover fields for small counts // we don't bother with crossover fields for small counts
@ -295,7 +295,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
} }
*/ */
// if that fails, then try to claim across fields // if that fails, then try to claim across fields
if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) { if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
return true; return true;
} }
} }

View file

@ -94,7 +94,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. // Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields. // Starts at idx, and wraps around to search in all `bitmap_fields` fields.
bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats); bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Set `count` bits at `bitmap_idx` to 0 atomically // Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously. // Returns `true` if all `count` bits were 1 previously.

View file

@ -166,7 +166,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
} }
// collect arenas (this is program wide so don't force purges on abandonment of threads) // collect arenas (this is program wide so don't force purges on abandonment of threads)
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats); _mi_arenas_collect(collect == MI_FORCE /* force purge? */);
} }
void _mi_heap_collect_abandon(mi_heap_t* heap) { void _mi_heap_collect_abandon(mi_heap_t* heap) {

View file

@ -136,9 +136,8 @@ static mi_decl_cache_align mi_tld_t tld_main = {
&_mi_heap_main, &_mi_heap_main, &_mi_heap_main, &_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
0, 0, 0, 0, 0, &mi_subproc_default, 0, 0, 0, 0, 0, &mi_subproc_default,
&tld_main.stats, &tld_main.os &tld_main.stats
}, // segments }, // segments
{ 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats { MI_STATS_NULL } // stats
}; };
@ -320,10 +319,10 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
// if that fails, allocate as meta data // if that fails, allocate as meta data
if (td == NULL) { if (td == NULL) {
mi_memid_t memid; mi_memid_t memid;
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) { if (td == NULL) {
// if this fails, try once more. (issue #257) // if this fails, try once more. (issue #257)
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) { if (td == NULL) {
// really out of memory // really out of memory
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
@ -353,7 +352,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
} }
} }
// if that fails, just free it directly // if that fails, just free it directly
_mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main); _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid);
} }
void _mi_thread_data_collect(void) { void _mi_thread_data_collect(void) {
@ -363,7 +362,7 @@ void _mi_thread_data_collect(void) {
if (td != NULL) { if (td != NULL) {
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) { if (td != NULL) {
_mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main); _mi_os_free(td, sizeof(mi_thread_data_t), td->memid);
} }
} }
} }
@ -399,9 +398,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->heap_backing = bheap; tld->heap_backing = bheap;
tld->heaps = NULL; tld->heaps = NULL;
tld->segments.subproc = &mi_subproc_default; tld->segments.subproc = &mi_subproc_default;
tld->segments.stats = &tld->stats; tld->segments.stats = &tld->stats;
tld->segments.os = &tld->os;
tld->os.stats = &tld->stats;
} }
// Free the thread local default heap (called from `mi_thread_done`) // Free the thread local default heap (called from `mi_thread_done`)
@ -685,7 +682,7 @@ void mi_cdecl _mi_process_done(void) {
if (mi_option_is_enabled(mi_option_destroy_on_exit)) { if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
mi_collect(true /* force */); mi_collect(true /* force */);
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats); _mi_arena_unsafe_destroy_all();
} }
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {

130
src/os.c
View file

@ -9,6 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc/atomic.h" #include "mimalloc/atomic.h"
#include "mimalloc/prim.h" #include "mimalloc/prim.h"
#define os_stats (&_mi_stats_main)
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Initialization. Initialization.
@ -85,8 +86,8 @@ void _mi_os_init(void) {
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Util Util
-------------------------------------------------------------- */ -------------------------------------------------------------- */
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0); mi_assert_internal(alignment != 0);
@ -161,23 +162,20 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
Free memory Free memory
-------------------------------------------------------------- */ -------------------------------------------------------------- */
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats); static void mi_os_free_huge_os_pages(void* p, size_t size);
static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) { static void mi_os_prim_free(void* addr, size_t size, bool still_committed) {
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
mi_assert_internal((size % _mi_os_page_size()) == 0); mi_assert_internal((size % _mi_os_page_size()) == 0);
if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
int err = _mi_prim_free(addr, size); int err = _mi_prim_free(addr, size);
if (err != 0) { if (err != 0) {
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
} }
if (still_committed) { _mi_stat_decrease(&stats->committed, size); } if (still_committed) { _mi_stat_decrease(&os_stats->committed, size); }
_mi_stat_decrease(&stats->reserved, size); _mi_stat_decrease(&os_stats->reserved, size);
} }
void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats) { void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
if (stats == NULL) stats = &_mi_stats_main;
if (mi_memkind_is_os(memid.memkind)) { if (mi_memkind_is_os(memid.memkind)) {
size_t csize = _mi_os_good_alloc_size(size); size_t csize = _mi_os_good_alloc_size(size);
void* base = addr; void* base = addr;
@ -191,10 +189,10 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
// free it // free it
if (memid.memkind == MI_MEM_OS_HUGE) { if (memid.memkind == MI_MEM_OS_HUGE) {
mi_assert(memid.is_pinned); mi_assert(memid.is_pinned);
mi_os_free_huge_os_pages(base, csize, stats); mi_os_free_huge_os_pages(base, csize);
} }
else { else {
mi_os_prim_free(base, csize, still_committed, stats); mi_os_prim_free(base, csize, still_committed);
} }
} }
else { else {
@ -203,9 +201,8 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
} }
} }
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) { void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
if (stats == NULL) stats = &_mi_stats_main; _mi_os_free_ex(p, size, true, memid);
_mi_os_free_ex(p, size, true, memid, stats);
} }
@ -215,7 +212,7 @@ void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
// Also `hint_addr` is a hint and may be ignored. // Also `hint_addr` is a hint and may be ignored.
static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) { static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_zero != NULL); mi_assert_internal(is_zero != NULL);
mi_assert_internal(is_large != NULL); mi_assert_internal(is_large != NULL);
@ -229,13 +226,13 @@ static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignm
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large); _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large);
} }
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
mi_stat_counter_increase(stats->mmap_calls, 1); mi_stat_counter_increase(os_stats->mmap_calls, 1);
if (p != NULL) { if (p != NULL) {
_mi_stat_increase(&stats->reserved, size); _mi_stat_increase(&os_stats->reserved, size);
if (commit) { if (commit) {
_mi_stat_increase(&stats->committed, size); _mi_stat_increase(&os_stats->committed, size);
// seems needed for asan (or `mimalloc-test-api` fails) // seems needed for asan (or `mimalloc-test-api` fails)
#ifdef MI_TRACK_ASAN #ifdef MI_TRACK_ASAN
if (*is_zero) { mi_track_mem_defined(p,size); } if (*is_zero) { mi_track_mem_defined(p,size); }
@ -246,14 +243,14 @@ static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignm
return p; return p;
} }
static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) { static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero, tld_stats); return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero);
} }
// Primitive aligned allocation from the OS. // Primitive aligned allocation from the OS.
// This function guarantees the allocated memory is aligned. // This function guarantees the allocated memory is aligned.
static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) { static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) {
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_large != NULL); mi_assert_internal(is_large != NULL);
@ -264,7 +261,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
size = _mi_align_up(size, _mi_os_page_size()); size = _mi_align_up(size, _mi_os_page_size());
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD) // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats); void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// aligned already? // aligned already?
@ -276,13 +273,13 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
#if !MI_TRACK_ASAN #if !MI_TRACK_ASAN
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
#endif #endif
mi_os_prim_free(p, size, commit, stats); mi_os_prim_free(p, size, commit);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment; const size_t over_size = size + alignment;
if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block
// over-allocate uncommitted (virtual) memory // over-allocate uncommitted (virtual) memory
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats); p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// set p to the aligned part in the full region // set p to the aligned part in the full region
@ -293,12 +290,12 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// explicitly commit only the aligned part // explicitly commit only the aligned part
if (commit) { if (commit) {
_mi_os_commit(p, size, NULL, stats); _mi_os_commit(p, size, NULL);
} }
} }
else { // mmap can free inside an allocation else { // mmap can free inside an allocation
// overallocate... // overallocate...
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats); p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero);
if (p == NULL) return NULL; if (p == NULL) return NULL;
// and selectively unmap parts around the over-allocated area. // and selectively unmap parts around the over-allocated area.
@ -307,8 +304,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
size_t mid_size = _mi_align_up(size, _mi_os_page_size()); size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size; size_t post_size = over_size - pre_size - mid_size;
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); } if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit); }
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit); }
// we can return the aligned pointer on `mmap` systems // we can return the aligned pointer on `mmap` systems
p = aligned_p; p = aligned_p;
*base = aligned_p; // since we freed the pre part, `*base == p`. *base = aligned_p; // since we freed the pre part, `*base == p`.
@ -324,33 +321,31 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
OS API: alloc and alloc_aligned OS API: alloc and alloc_aligned
----------------------------------------------------------- */ ----------------------------------------------------------- */
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
*memid = _mi_memid_none(); *memid = _mi_memid_none();
if (size == 0) return NULL; if (size == 0) return NULL;
if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size); size = _mi_os_good_alloc_size(size);
bool os_is_large = false; bool os_is_large = false;
bool os_is_zero = false; bool os_is_zero = false;
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats); void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
if (p != NULL) { if (p != NULL) {
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large); *memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
} }
return p; return p;
} }
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid)
{ {
MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
*memid = _mi_memid_none(); *memid = _mi_memid_none();
if (size == 0) return NULL; if (size == 0) return NULL;
if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size); size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size()); alignment = _mi_align_up(alignment, _mi_os_page_size());
bool os_is_large = false; bool os_is_large = false;
bool os_is_zero = false; bool os_is_zero = false;
void* os_base = NULL; void* os_base = NULL;
void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats ); void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
if (p != NULL) { if (p != NULL) {
*memid = _mi_memid_create_os(commit, os_is_zero, os_is_large); *memid = _mi_memid_create_os(commit, os_is_zero, os_is_large);
memid->mem.os.base = os_base; memid->mem.os.base = os_base;
@ -367,29 +362,28 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
to use the actual start of the memory region. to use the actual start of the memory region.
----------------------------------------------------------- */ ----------------------------------------------------------- */
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) { void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) {
mi_assert(offset <= MI_SEGMENT_SIZE); mi_assert(offset <= MI_SEGMENT_SIZE);
mi_assert(offset <= size); mi_assert(offset <= size);
mi_assert((alignment % _mi_os_page_size()) == 0); mi_assert((alignment % _mi_os_page_size()) == 0);
*memid = _mi_memid_none(); *memid = _mi_memid_none();
if (stats == NULL) stats = &_mi_stats_main;
if (offset > MI_SEGMENT_SIZE) return NULL; if (offset > MI_SEGMENT_SIZE) return NULL;
if (offset == 0) { if (offset == 0) {
// regular aligned allocation // regular aligned allocation
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats); return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
} }
else { else {
// overallocate to align at an offset // overallocate to align at an offset
const size_t extra = _mi_align_up(offset, alignment) - offset; const size_t extra = _mi_align_up(offset, alignment) - offset;
const size_t oversize = size + extra; const size_t oversize = size + extra;
void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats); void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid);
if (start == NULL) return NULL; if (start == NULL) return NULL;
void* const p = (uint8_t*)start + extra; void* const p = (uint8_t*)start + extra;
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
// decommit the overallocation at the start // decommit the overallocation at the start
if (commit && extra > _mi_os_page_size()) { if (commit && extra > _mi_os_page_size()) {
_mi_os_decommit(start, extra, stats); _mi_os_decommit(start, extra);
} }
return p; return p;
} }
@ -423,12 +417,10 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize); return mi_os_page_align_areax(true, addr, size, newsize);
} }
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (is_zero != NULL) { *is_zero = false; } if (is_zero != NULL) { *is_zero = false; }
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit _mi_stat_increase(&os_stats->committed, size); // use size for precise commit vs. decommit
_mi_stat_counter_increase(&stats->commit_calls, 1); _mi_stat_counter_increase(&os_stats->commit_calls, 1);
// page align range // page align range
size_t csize; size_t csize;
@ -454,11 +446,8 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
return true; return true;
} }
static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) { static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) { mi_assert_internal(needs_recommit!=NULL);
MI_UNUSED(tld_stats); _mi_stat_decrease(&os_stats->committed, size);
mi_stats_t* stats = &_mi_stats_main;
mi_assert_internal(needs_recommit!=NULL);
_mi_stat_decrease(&stats->committed, size);
// page align // page align
size_t csize; size_t csize;
@ -475,9 +464,9 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
return (err == 0); return (err == 0);
} }
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { bool _mi_os_decommit(void* addr, size_t size) {
bool needs_recommit; bool needs_recommit;
return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats); return mi_os_decommit_ex(addr, size, &needs_recommit);
} }
@ -485,13 +474,13 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
// but may be used later again. This will release physical memory // but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed. // pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset. // We page align to a conservative area inside the range to reset.
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { bool _mi_os_reset(void* addr, size_t size) {
// page align conservatively within the range // page align conservatively within the range
size_t csize; size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize); void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr) if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
_mi_stat_increase(&stats->reset, csize); _mi_stat_increase(&os_stats->reset, csize);
_mi_stat_counter_increase(&stats->reset_calls, 1); _mi_stat_counter_increase(&os_stats->reset_calls, 1);
#if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
memset(start, 0, csize); // pretend it is eagerly reset memset(start, 0, csize); // pretend it is eagerly reset
@ -507,22 +496,22 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
// either resets or decommits memory, returns true if the memory needs // either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on. // to be recommitted if it is to be re-used later on.
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset)
{ {
if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
_mi_stat_counter_increase(&stats->purge_calls, 1); _mi_stat_counter_increase(&os_stats->purge_calls, 1);
_mi_stat_increase(&stats->purged, size); _mi_stat_increase(&os_stats->purged, size);
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
!_mi_preloading()) // don't decommit during preloading (unsafe) !_mi_preloading()) // don't decommit during preloading (unsafe)
{ {
bool needs_recommit = true; bool needs_recommit = true;
mi_os_decommit_ex(p, size, &needs_recommit, stats); mi_os_decommit_ex(p, size, &needs_recommit);
return needs_recommit; return needs_recommit;
} }
else { else {
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
_mi_os_reset(p, size, stats); _mi_os_reset(p, size);
} }
return false; // needs no recommit return false; // needs no recommit
} }
@ -530,8 +519,8 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
// either resets or decommits memory, returns true if the memory needs // either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on. // to be recommitted if it is to be re-used later on.
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { bool _mi_os_purge(void* p, size_t size) {
return _mi_os_purge_ex(p, size, true, stats); return _mi_os_purge_ex(p, size, true);
} }
@ -639,15 +628,15 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// no success, issue a warning and break // no success, issue a warning and break
if (p != NULL) { if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main); mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true);
} }
break; break;
} }
// success, record it // success, record it
page++; // increase before timeout check (see issue #711) page++; // increase before timeout check (see issue #711)
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); _mi_stat_increase(&os_stats->committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); _mi_stat_increase(&os_stats->reserved, MI_HUGE_OS_PAGE_SIZE);
// check for timeout // check for timeout
if (max_msecs > 0) { if (max_msecs > 0) {
@ -681,11 +670,11 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// free every huge page in a range individually (as we allocated per page) // free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. // note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) { static void mi_os_free_huge_os_pages(void* p, size_t size) {
if (p==NULL || size==0) return; if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p; uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) { while (size >= MI_HUGE_OS_PAGE_SIZE) {
mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats); mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true);
size -= MI_HUGE_OS_PAGE_SIZE; size -= MI_HUGE_OS_PAGE_SIZE;
base += MI_HUGE_OS_PAGE_SIZE; base += MI_HUGE_OS_PAGE_SIZE;
} }
@ -714,8 +703,7 @@ size_t _mi_os_numa_node_count_get(void) {
return count; return count;
} }
int _mi_os_numa_node_get(mi_os_tld_t* tld) { int _mi_os_numa_node_get() {
MI_UNUSED(tld);
size_t numa_count = _mi_os_numa_node_count(); size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0 // never more than the node count and >= 0

View file

@ -276,7 +276,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(mi_heap_contains_queue(heap, pq)); mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size); mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif #endif
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os); mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
if (page == NULL) { if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL; return NULL;

View file

@ -55,11 +55,11 @@ static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bo
if (part == NULL) { if (part == NULL) {
if (!create_on_demand) return NULL; if (!create_on_demand) return NULL;
mi_memid_t memid; mi_memid_t memid;
part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid, NULL); part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid);
if (part == NULL) return NULL; if (part == NULL) return NULL;
mi_segmap_part_t* expected = NULL; mi_segmap_part_t* expected = NULL;
if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) { if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) {
_mi_os_free(part, sizeof(mi_segmap_part_t), memid, NULL); _mi_os_free(part, sizeof(mi_segmap_part_t), memid);
part = expected; part = expected;
if (part == NULL) return NULL; if (part == NULL) return NULL;
} }

View file

@ -189,7 +189,7 @@ static void mi_segment_protect_range(void* p, size_t size, bool protect) {
} }
} }
static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* tld) { static void mi_segment_protect(mi_segment_t* segment, bool protect) {
// add/remove guard pages // add/remove guard pages
if (MI_SECURE != 0) { if (MI_SECURE != 0) {
// in secure mode, we set up a protected page in between the segment info and the page data // in secure mode, we set up a protected page in between the segment info and the page data
@ -207,7 +207,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
if (protect && !segment->memid.initially_committed) { if (protect && !segment->memid.initially_committed) {
if (protect) { if (protect) {
// ensure secure page is committed // ensure secure page is committed
if (_mi_os_commit(start, os_psize, NULL, tld->stats)) { // if this fails that is ok (as it is an unaccessible page) if (_mi_os_commit(start, os_psize, NULL)) { // if this fails that is ok (as it is an unaccessible page)
mi_segment_protect_range(start, os_psize, protect); mi_segment_protect_range(start, os_psize, protect);
} }
} }
@ -241,23 +241,23 @@ static void mi_page_purge(mi_segment_t* segment, mi_page_t* page, mi_segments_tl
if (!segment->allow_purge) return; if (!segment->allow_purge) return;
mi_assert_internal(page->used == 0); mi_assert_internal(page->used == 0);
mi_assert_internal(page->free == NULL); mi_assert_internal(page->free == NULL);
mi_assert_expensive(!mi_pages_purge_contains(page, tld)); mi_assert_expensive(!mi_pages_purge_contains(page, tld)); MI_UNUSED(tld);
size_t psize; size_t psize;
void* start = mi_segment_raw_page_start(segment, page, &psize); void* start = mi_segment_raw_page_start(segment, page, &psize);
const bool needs_recommit = _mi_os_purge(start, psize, tld->stats); const bool needs_recommit = _mi_os_purge(start, psize);
if (needs_recommit) { page->is_committed = false; } if (needs_recommit) { page->is_committed = false; }
} }
static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
if (page->is_committed) return true; if (page->is_committed) return true;
mi_assert_internal(segment->allow_decommit); mi_assert_internal(segment->allow_decommit);
mi_assert_expensive(!mi_pages_purge_contains(page, tld)); mi_assert_expensive(!mi_pages_purge_contains(page, tld)); MI_UNUSED(tld);
size_t psize; size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize); uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false; bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0); const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
bool ok = _mi_os_commit(start, psize + gsize, &is_zero, tld->stats); bool ok = _mi_os_commit(start, psize + gsize, &is_zero);
if (!ok) return false; // failed to commit! if (!ok) return false; // failed to commit!
page->is_committed = true; page->is_committed = true;
page->used = 0; page->used = 0;
@ -502,7 +502,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
if (MI_SECURE != 0) { if (MI_SECURE != 0) {
mi_assert_internal(!segment->memid.is_pinned); mi_assert_internal(!segment->memid.is_pinned);
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set mi_segment_protect(segment, false); // ensure no more guard pages are set
} }
bool fully_committed = true; bool fully_committed = true;
@ -516,7 +516,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
MI_UNUSED(fully_committed); MI_UNUSED(fully_committed);
mi_assert_internal((fully_committed && committed_size == segment_size) || (!fully_committed && committed_size < segment_size)); mi_assert_internal((fully_committed && committed_size == segment_size) || (!fully_committed && committed_size < segment_size));
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats); _mi_arena_free(segment, segment_size, committed_size, segment->memid);
} }
// called from `heap_collect`. // called from `heap_collect`.
@ -537,7 +537,7 @@ void _mi_segments_collect(bool force, mi_segments_tld_t* tld) {
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id, static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id,
size_t pre_size, size_t info_size, bool commit, size_t segment_size, size_t pre_size, size_t info_size, bool commit, size_t segment_size,
mi_segments_tld_t* tld, mi_os_tld_t* tld_os) mi_segments_tld_t* tld)
{ {
mi_memid_t memid; mi_memid_t memid;
bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
@ -549,7 +549,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
segment_size = segment_size + (align_offset - pre_size); // adjust the segment size segment_size = segment_size + (align_offset - pre_size); // adjust the segment size
} }
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os); mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid);
if (segment == NULL) { if (segment == NULL) {
return NULL; // failed to allocate return NULL; // failed to allocate
} }
@ -557,10 +557,10 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
if (!memid.initially_committed) { if (!memid.initially_committed) {
// ensure the initial info is committed // ensure the initial info is committed
mi_assert_internal(!memid.is_pinned); mi_assert_internal(!memid.is_pinned);
bool ok = _mi_os_commit(segment, pre_size, NULL, tld_os->stats); bool ok = _mi_os_commit(segment, pre_size, NULL);
if (!ok) { if (!ok) {
// commit failed; we cannot touch the memory: free the segment directly and return `NULL` // commit failed; we cannot touch the memory: free the segment directly and return `NULL`
_mi_arena_free(segment, segment_size, 0, memid, tld_os->stats); _mi_arena_free(segment, segment_size, 0, memid);
return NULL; return NULL;
} }
} }
@ -578,7 +578,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, size_t page_alignment, static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, size_t page_alignment,
mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
{ {
// required is only > 0 for huge page allocations // required is only > 0 for huge page allocations
mi_assert_internal((required > 0 && page_kind > MI_PAGE_LARGE)|| (required==0 && page_kind <= MI_PAGE_LARGE)); mi_assert_internal((required > 0 && page_kind > MI_PAGE_LARGE)|| (required==0 && page_kind <= MI_PAGE_LARGE));
@ -610,7 +610,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
const bool init_commit = eager; // || (page_kind >= MI_PAGE_LARGE); const bool init_commit = eager; // || (page_kind >= MI_PAGE_LARGE);
// Allocate the segment from the OS (segment_size can change due to alignment) // Allocate the segment from the OS (segment_size can change due to alignment)
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, init_commit, init_segment_size, tld, os_tld); mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, init_commit, init_segment_size, tld);
if (segment == NULL) return NULL; if (segment == NULL) return NULL;
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
mi_assert_internal(segment->memid.is_pinned ? segment->memid.initially_committed : true); mi_assert_internal(segment->memid.is_pinned ? segment->memid.initially_committed : true);
@ -638,7 +638,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
segment->cookie = _mi_ptr_cookie(segment); segment->cookie = _mi_ptr_cookie(segment);
// set protection // set protection
mi_segment_protect(segment, true, tld->os); mi_segment_protect(segment, true);
// insert in free lists for small and medium pages // insert in free lists for small and medium pages
if (page_kind <= MI_PAGE_MEDIUM) { if (page_kind <= MI_PAGE_MEDIUM) {
@ -1142,7 +1142,7 @@ void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
Reclaim or allocate Reclaim or allocate
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld)
{ {
mi_assert_internal(page_kind <= MI_PAGE_LARGE); mi_assert_internal(page_kind <= MI_PAGE_LARGE);
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
@ -1164,7 +1164,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
return segment; return segment;
} }
// 2. otherwise allocate a fresh segment // 2. otherwise allocate a fresh segment
return mi_segment_alloc(0, page_kind, page_shift, 0, heap->arena_id, tld, os_tld); return mi_segment_alloc(0, page_kind, page_shift, 0, heap->arena_id, tld);
} }
@ -1203,11 +1203,11 @@ static mi_page_t* mi_segment_page_try_alloc_in_queue(mi_heap_t* heap, mi_page_ki
return NULL; return NULL;
} }
static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld) {
mi_page_t* page = mi_segment_page_try_alloc_in_queue(heap, kind, tld); mi_page_t* page = mi_segment_page_try_alloc_in_queue(heap, kind, tld);
if (page == NULL) { if (page == NULL) {
// possibly allocate or reclaim a fresh segment // possibly allocate or reclaim a fresh segment
mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld); mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld);
if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed) if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
mi_assert_internal(segment->page_kind==kind); mi_assert_internal(segment->page_kind==kind);
mi_assert_internal(segment->used < segment->capacity); mi_assert_internal(segment->used < segment->capacity);
@ -1222,20 +1222,20 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p
return page; return page;
} }
static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld); return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld);
} }
static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld, os_tld); return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld);
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
large page allocation large page allocation
----------------------------------------------------------- */ ----------------------------------------------------------- */
static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld); mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld);
if (segment == NULL) return NULL; if (segment == NULL) return NULL;
mi_page_t* page = mi_segment_find_free(segment, tld); mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
@ -1245,9 +1245,9 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size
return page; return page;
} }
static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
{ {
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, req_arena_id, tld, os_tld); mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, req_arena_id, tld);
if (segment == NULL) return NULL; if (segment == NULL) return NULL;
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size); mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
@ -1271,7 +1271,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
mi_assert_internal(psize - (aligned_p - start) >= size); mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start; ptrdiff_t decommit_size = aligned_p - decommit_start;
_mi_os_reset(decommit_start, decommit_size, os_tld->stats); // do not decommit as it may be in a region _mi_os_reset(decommit_start, decommit_size); // do not decommit as it may be in a region
} }
return page; return page;
@ -1318,7 +1318,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
if (usize > sizeof(mi_block_t)) { if (usize > sizeof(mi_block_t)) {
usize = usize - sizeof(mi_block_t); usize = usize - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t); uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_reset(p, usize, &_mi_stats_main); _mi_os_reset(p, usize);
} }
} }
} }
@ -1328,26 +1328,26 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
Page allocation Page allocation
----------------------------------------------------------- */ ----------------------------------------------------------- */
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) {
mi_page_t* page; mi_page_t* page;
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) { if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
mi_assert_internal(_mi_is_power_of_two(page_alignment)); mi_assert_internal(_mi_is_power_of_two(page_alignment));
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE); mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
//mi_assert_internal((MI_SEGMENT_SIZE % page_alignment) == 0); //mi_assert_internal((MI_SEGMENT_SIZE % page_alignment) == 0);
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; } if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld); page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld);
} }
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
page = mi_segment_small_page_alloc(heap, block_size, tld, os_tld); page = mi_segment_small_page_alloc(heap, block_size, tld);
} }
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld); page = mi_segment_medium_page_alloc(heap, block_size, tld);
} }
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX /* || mi_is_good_fit(block_size, MI_LARGE_PAGE_SIZE - sizeof(mi_segment_t)) */ ) { else if (block_size <= MI_LARGE_OBJ_SIZE_MAX /* || mi_is_good_fit(block_size, MI_LARGE_PAGE_SIZE - sizeof(mi_segment_t)) */ ) {
page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld); page = mi_segment_large_page_alloc(heap, block_size, tld);
} }
else { else {
page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld); page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld);
} }
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);

View file

@ -36,7 +36,7 @@ static int ITER = 400;
static int THREADS = 8; static int THREADS = 8;
static int SCALE = 25; static int SCALE = 25;
static int ITER = 20; static int ITER = 20;
#elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits #elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
static int THREADS = 8; static int THREADS = 8;
static int SCALE = 10; static int SCALE = 10;
static int ITER = 10; static int ITER = 10;