diff --git a/ide/vs2022/mimalloc-override.vcxproj b/ide/vs2022/mimalloc-override.vcxproj
index 4383d886..32bd97d1 100644
--- a/ide/vs2022/mimalloc-override.vcxproj
+++ b/ide/vs2022/mimalloc-override.vcxproj
@@ -265,7 +265,6 @@
-
diff --git a/ide/vs2022/mimalloc-override.vcxproj.filters b/ide/vs2022/mimalloc-override.vcxproj.filters
index a9f66c35..6656c16d 100644
--- a/ide/vs2022/mimalloc-override.vcxproj.filters
+++ b/ide/vs2022/mimalloc-override.vcxproj.filters
@@ -46,9 +46,6 @@
Sources
-
- Sources
-
Sources
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index 3dd7326f..41fe0b46 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -214,6 +214,12 @@
+
+ true
+ true
+ true
+ true
+
false
@@ -258,7 +264,6 @@
-
diff --git a/ide/vs2022/mimalloc.vcxproj.filters b/ide/vs2022/mimalloc.vcxproj.filters
index 2eed7e90..237ef1ed 100644
--- a/ide/vs2022/mimalloc.vcxproj.filters
+++ b/ide/vs2022/mimalloc.vcxproj.filters
@@ -58,6 +58,9 @@
Sources
+
+ Sources
+
@@ -87,9 +90,6 @@
Headers
-
- Headers
-
Headers
diff --git a/include/mimalloc/bits.h b/include/mimalloc/bits.h
index ad7ea3e6..d6695a00 100644
--- a/include/mimalloc/bits.h
+++ b/include/mimalloc/bits.h
@@ -282,7 +282,7 @@ static inline size_t mi_rotr(size_t x, size_t r) {
#elif (mi_has_builtin(rotateright32) && MI_SIZE_BITS==32)
return mi_builtin(rotateright32)(x,r);
#elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32)
- #if MI_BFIELD_SIZE==4
+ #if MI_SIZE_BITS==32
return _lrotr(x,(int)r);
#else
return _rotr64(x,(int)r);
diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h
index d60b0c15..515acfc1 100644
--- a/include/mimalloc/internal.h
+++ b/include/mimalloc/internal.h
@@ -140,6 +140,8 @@ void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
mi_page_t* _mi_arena_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment);
void _mi_arena_page_abandon(mi_page_t* page, mi_tld_t* tld);
void _mi_arena_page_free(mi_page_t* page, mi_tld_t* tld);
+bool _mi_arena_try_reclaim(mi_heap_t* heap, mi_page_t* page);
+void _mi_arena_reclaim_all_abandoned(mi_heap_t* heap);
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
@@ -567,11 +569,11 @@ static inline bool mi_page_mostly_used(const mi_page_t* page) {
return (page->reserved - page->used <= frac);
}
-static inline bool mi_page_is_abandoned(mi_page_t* page) {
+static inline bool mi_page_is_abandoned(const mi_page_t* page) {
return (mi_page_thread_id(page) == 0);
}
-static inline bool mi_page_is_huge(mi_page_t* page) {
+static inline bool mi_page_is_huge(const mi_page_t* page) {
return (page->block_size > MI_LARGE_MAX_OBJ_SIZE);
}
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index b4da4ded..43dc2d36 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -16,12 +16,12 @@ terms of the MIT license. A copy of the license can be found in the file
// ------------------------------------------------------
static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
- // objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`).
+ // objects up to `MI_PAGE_ALIGN` are allocated aligned to their size
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
if (alignment > size) return false;
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
const size_t bsize = mi_good_size(size);
- return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
+ return (bsize <= MI_PAGE_ALIGN && (bsize & (alignment-1)) == 0);
}
#if MI_GUARDED
diff --git a/src/arena.c b/src/arena.c
index c9f8400b..0db8acf3 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -298,7 +298,7 @@ static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_id_t req_are
for (size_t i = 0; i < _max_arena; i++) { \
size_t _idx = i + _start; \
if (_idx >= _max_arena) { _idx -= _max_arena; } \
- const mi_arena_id_t var_arena_id = mi_arena_id_create(_idx); \
+ const mi_arena_id_t var_arena_id = mi_arena_id_create(_idx); MI_UNUSED(var_arena_id);\
mi_arena_t* const var_arena = mi_arena_from_index(_idx); \
if (mi_arena_is_suitable(var_arena,req_arena_id,subproc,-1 /* todo: numa node */,allow_large)) \
{
@@ -341,6 +341,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(
mi_assert(block_count <= MI_ARENA_MAX_OBJ_BLOCKS);
mi_assert(alignment <= MI_ARENA_BLOCK_ALIGN);
+ // try to find free blocks in the arena's
void* p = mi_arena_try_find_free(block_count, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
@@ -354,6 +355,8 @@ static mi_decl_noinline void* mi_arena_try_alloc(
if (p != NULL) return p;
}
}
+
+ return NULL;
}
// Allocate from the OS (if allowed)
@@ -445,7 +448,7 @@ static mi_page_t* mi_arena_page_try_find_abandoned(size_t block_count, size_t bl
}
}
mi_forall_arenas_end();
- return false;
+ return NULL;
}
static mi_page_t* mi_arena_page_alloc_fresh(size_t block_count, size_t block_size, mi_arena_id_t req_arena_id, mi_tld_t* tld)
@@ -455,7 +458,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t block_count, size_t block_siz
const size_t alignment = MI_ARENA_BLOCK_ALIGN;
// try to allocate from free space in arena's
- mi_memid_t memid;
+ mi_memid_t memid = _mi_memid_none();
mi_page_t* page = NULL;
if (_mi_option_get_fast(mi_option_disallow_arena_alloc)==0 && req_arena_id == _mi_arena_id_none()) {
page = (mi_page_t*)mi_arena_try_alloc(block_count, alignment, commit, allow_large, req_arena_id, &memid, tld);
@@ -472,8 +475,8 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t block_count, size_t block_siz
_mi_memzero_aligned(page, sizeof(*page));
mi_assert(MI_PAGE_INFO_SIZE >= _mi_align_up(sizeof(*page), MI_PAGE_ALIGN));
const size_t reserved = (mi_size_of_blocks(block_count) - MI_PAGE_INFO_SIZE) / block_size;
- mi_assert_internal(reserved > 0 && reserved < UINT16_MAX);
- page->reserved = reserved;
+ mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
+ page->reserved = (uint16_t)reserved;
page->page_start = (uint8_t*)page + MI_PAGE_INFO_SIZE;
page->block_size = block_size;
page->memid = memid;
@@ -493,7 +496,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t block_count, size_t block_siz
// block_count: arena block count for the page
// block size : page block size
static mi_page_t* mi_arena_page_allocN(mi_heap_t* heap, size_t block_count, size_t block_size) {
- const size_t req_arena_id = heap->arena_id;
+ const mi_arena_id_t req_arena_id = heap->arena_id;
mi_tld_t* const tld = heap->tld;
// 1. look for an abandoned page
@@ -515,6 +518,7 @@ static mi_page_t* mi_arena_page_allocN(mi_heap_t* heap, size_t block_count, size
static mi_page_t* mi_singleton_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment) {
+ MI_UNUSED(heap); MI_UNUSED(block_size); MI_UNUSED(page_alignment);
_mi_error_message(EINVAL, "singleton page is not yet implemented\n");
return NULL;
}
diff --git a/src/bitmap.c b/src/bitmap.c
index 9faa9ae9..24c0d9c9 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -149,7 +149,7 @@ static bool mi_bitmap_chunk_xsetN(mi_bit_t set, mi_bitmap_chunk_t* chunk, size_t
mi_assert_internal(idx + m <= MI_BFIELD_BITS);
mi_assert_internal(field < MI_BITMAP_CHUNK_FIELDS);
const size_t mask = (m == MI_BFIELD_BITS ? ~MI_ZU(0) : ((MI_ZU(1)<bfields[field], mask, &already_xset);
all_already_xset = all_already_xset && already_xset;
// next field
@@ -268,7 +268,6 @@ static inline bool mi_bitmap_chunk_find_and_try_clear(mi_bitmap_chunk_t* chunk,
// try again
}
#else
- size_t idx;
for(int i = 0; i < MI_BITMAP_CHUNK_FIELDS; i++) {
size_t idx;
if mi_unlikely(mi_bfield_find_least_bit(chunk->bfields[i],&idx)) { // find least 1-bit
@@ -306,7 +305,6 @@ static inline bool mi_bitmap_chunk_find_and_try_clear8(mi_bitmap_chunk_t* chunk,
// try again
}
#else
- size_t idx;
for(int i = 0; i < MI_BITMAP_CHUNK_FIELDS; i++) {
const mi_bfield_t x = chunk->bfields[i];
// has_set8 has low bit in each byte set if the byte in x == 0xFF
@@ -374,7 +372,7 @@ static inline bool mi_bitmap_chunk_find_and_try_clearN(mi_bitmap_chunk_t* chunk,
// are all bits in a bitmap chunk set?
-static bool mi_bitmap_chunk_all_are_set(mi_bitmap_chunk_t* chunk) {
+static inline bool mi_bitmap_chunk_all_are_set(mi_bitmap_chunk_t* chunk) {
#if defined(__AVX2__) && (MI_BITMAP_CHUNK_BITS==256)
const __m256i vec = _mm256_load_si256((const __m256i*)chunk->bfields);
return _mm256_test_all_ones(vec);
diff --git a/src/free.c b/src/free.c
index e1cc9276..224070fe 100644
--- a/src/free.c
+++ b/src/free.c
@@ -233,7 +233,7 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block
mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
{
// the page is abandoned, try to reclaim it into our heap
- if (_mi_heap_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue
+ if (_mi_arena_try_reclaim(mi_heap_get_default(), page)) { // TODO: avoid putting it in the full free queue
mi_assert_internal(_mi_thread_id() == mi_page_thread_id(page));
// mi_assert_internal(mi_heap_get_default()->tld->subproc == page->subproc);
mi_free(block); // recursively free as now it will be a local free in our heap
diff --git a/src/heap.c b/src/heap.c
index e4955ba7..8ee66055 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -54,9 +54,7 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
MI_UNUSED(arg1);
MI_UNUSED(arg2);
MI_UNUSED(pq);
- mi_assert_internal(mi_page_heap(page) == heap);
- mi_segment_t* segment = _mi_page_segment(page);
- mi_assert_internal(segment->thread_id == heap->thread_id);
+ mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_expensive(_mi_page_is_valid(page));
return true;
}
@@ -135,7 +133,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
// if all memory is freed by now, all segments should be freed.
// note: this only collects in the current subprocess
- _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
+ _mi_arena_reclaim_all_abandoned(heap);
}
// if abandoning, mark all pages to no longer add to delayed_free
@@ -155,7 +153,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
// collect segments (purge pages, this can be expensive so don't force on abandonment)
- _mi_segments_collect(collect == MI_FORCE, &heap->tld->segments);
+ // _mi_segments_collect(collect == MI_FORCE, &heap->tld->segments);
// if forced, collect thread data cache on program-exit (or shared library unload)
if (force && is_main_thread && mi_heap_is_backing(heap)) {
@@ -320,13 +318,13 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
// stats
const size_t bsize = mi_page_block_size(page);
- if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
+ if (bsize > MI_LARGE_MAX_OBJ_SIZE) {
mi_heap_stat_decrease(heap, huge, bsize);
}
#if (MI_STAT)
_mi_page_free_collect(page, false); // update used count
const size_t inuse = page->used;
- if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ if (bsize <= MI_LARGE_MAX_OBJ_SIZE) {
mi_heap_stat_decrease(heap, normal, bsize * inuse);
#if (MI_STAT>1)
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
@@ -343,7 +341,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
// mi_page_free(page,false);
page->next = NULL;
page->prev = NULL;
- _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments);
+ _mi_arena_page_free(page,heap->tld);
return true; // keep going
}
@@ -483,11 +481,8 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
// static since it is not thread safe to access heaps from other threads.
static mi_heap_t* mi_heap_of_block(const void* p) {
if (p == NULL) return NULL;
- mi_segment_t* segment = _mi_ptr_segment(p);
- bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
- mi_assert_internal(valid);
- if mi_unlikely(!valid) return NULL;
- return mi_page_heap(_mi_segment_page_of(segment,p));
+ mi_page_t* page = _mi_ptr_page(p); // TODO: check pointer validity?
+ return mi_page_heap(page);
}
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
@@ -562,7 +557,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
if (page->used == 0) return true;
size_t psize;
- uint8_t* const pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
+ uint8_t* const pstart = mi_page_area(page, &psize);
mi_heap_t* const heap = mi_page_heap(page);
const size_t bsize = mi_page_block_size(page);
const size_t ubsize = mi_page_usable_block_size(page); // without padding
diff --git a/src/init.c b/src/init.c
index 2544f097..215d6be8 100644
--- a/src/init.c
+++ b/src/init.c
@@ -14,8 +14,6 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
- 0,
- false, false, false, false,
0, // capacity
0, // reserved capacity
{ 0 }, // flags
@@ -33,10 +31,9 @@ const mi_page_t _mi_page_empty = {
#endif
MI_ATOMIC_VAR_INIT(0), // xthread_free
MI_ATOMIC_VAR_INIT(0), // xheap
- NULL, NULL
- #if MI_INTPTR_SIZE==4
- , { NULL }
- #endif
+ MI_ATOMIC_VAR_INIT(0), // xthread_id
+ NULL, NULL, // next, prev
+ { { NULL, 0}, false, false, false, MI_MEM_NONE } // memid
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
@@ -63,8 +60,8 @@ const mi_page_t _mi_page_empty = {
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ }
+ QNULL(MI_LARGE_MAX_OBJ_WSIZE + 1 /* 655360, Huge queue */), \
+ QNULL(MI_LARGE_MAX_OBJ_WSIZE + 2) /* Full queue */ }
#define MI_STAT_COUNT_NULL() {0,0,0,0}
@@ -82,8 +79,6 @@ const mi_page_t _mi_page_empty = {
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
- MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
- MI_STAT_COUNT_NULL(), \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
@@ -101,10 +96,10 @@ const mi_page_t _mi_page_empty = {
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
NULL,
- MI_ATOMIC_VAR_INIT(NULL),
- 0, // tid
+ MI_ATOMIC_VAR_INIT(NULL), // thread delayed free
+ 0, // thread_id
+ 0, // arena_id
0, // cookie
- 0, // arena id
{ 0, 0 }, // keys
{ {0}, {0}, 0, true }, // random
0, // page count
@@ -124,17 +119,6 @@ mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
return _mi_prim_thread_id();
}
-// Thread sequence number
-static _Atomic(size_t) mi_tcount;
-static mi_decl_thread size_t mi_tseq;
-
-size_t _mi_thread_seq_id(void) mi_attr_noexcept {
- size_t tseq = mi_tseq;
- if (tseq == 0) {
- mi_tseq = tseq = mi_atomic_add_acq_rel(&mi_tcount,1);
- }
- return tseq;
-}
// the thread-local default heap for allocation
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
@@ -146,12 +130,10 @@ static mi_decl_cache_align mi_subproc_t mi_subproc_default;
static mi_decl_cache_align mi_tld_t tld_main = {
0, false,
&_mi_heap_main, &_mi_heap_main,
- { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
- 0, 0, 0, 0, 0, &mi_subproc_default,
- &tld_main.stats, &tld_main.os
- }, // segments
+ NULL, // subproc
+ 0, // tseq
{ 0, &tld_main.stats }, // os
- { MI_STATS_NULL } // stats
+ { MI_STATS_NULL } // stats
};
mi_decl_cache_align mi_heap_t _mi_heap_main = {
@@ -287,9 +269,9 @@ void mi_subproc_delete(mi_subproc_id_t subproc_id) {
void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) {
mi_heap_t* heap = mi_heap_get_default();
if (heap == NULL) return;
- mi_assert(heap->tld->segments.subproc == &mi_subproc_default);
- if (heap->tld->segments.subproc != &mi_subproc_default) return;
- heap->tld->segments.subproc = _mi_subproc_from_id(subproc_id);
+ mi_assert(heap->tld->subproc == &mi_subproc_default);
+ if (heap->tld->subproc != &mi_subproc_default) return;
+ heap->tld->subproc = _mi_subproc_from_id(subproc_id);
}
@@ -405,14 +387,16 @@ static bool _mi_thread_heap_init(void) {
return false;
}
+// Thread sequence number
+static _Atomic(size_t) mi_tcount;
+
// initialize thread local data
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
_mi_memzero_aligned(tld,sizeof(mi_tld_t));
tld->heap_backing = bheap;
tld->heaps = NULL;
- tld->segments.subproc = &mi_subproc_default;
- tld->segments.stats = &tld->stats;
- tld->segments.os = &tld->os;
+ tld->subproc = &mi_subproc_default;
+ tld->tseq = mi_atomic_add_acq_rel(&mi_tcount, 1);
tld->os.stats = &tld->stats;
}
@@ -449,8 +433,7 @@ static bool _mi_thread_heap_done(mi_heap_t* heap) {
_mi_stats_done(&heap->tld->stats);
// free if not the main thread
- if (heap != &_mi_heap_main) {
- mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
+ if (heap != &_mi_heap_main) {
mi_thread_data_free((mi_thread_data_t*)heap);
}
else {
diff --git a/src/os.c b/src/os.c
index 83521766..da41d152 100644
--- a/src/os.c
+++ b/src/os.c
@@ -245,7 +245,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
// this is handled though by having the `base` field in the memid's
*base = p; // remember the base
- p = mi_align_up_ptr(p, alignment);
+ p = _mi_align_up_ptr(p, alignment);
// explicitly commit only the aligned part
if (commit) {
@@ -258,7 +258,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
if (p == NULL) return NULL;
// and selectively unmap parts around the over-allocated area.
- void* aligned_p = mi_align_up_ptr(p, alignment);
+ void* aligned_p = _mi_align_up_ptr(p, alignment);
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size;
@@ -316,6 +316,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
}
void* _mi_os_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
+ MI_UNUSED(stats);
void* p = _mi_os_alloc(size, memid, &_mi_stats_main);
if (p == NULL) return NULL;
@@ -373,10 +374,10 @@ static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size,
if (size == 0 || addr == NULL) return NULL;
// page align conservatively within the range
- void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size())
+ void* start = (conservative ? _mi_align_up_ptr(addr, _mi_os_page_size())
: mi_align_down_ptr(addr, _mi_os_page_size()));
void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size())
- : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
+ : _mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size()));
ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
if (diff <= 0) return NULL;
diff --git a/src/page-map.c b/src/page-map.c
index cb527886..d70c3ee6 100644
--- a/src/page-map.c
+++ b/src/page-map.c
@@ -74,7 +74,7 @@ void _mi_page_map_register(mi_page_t* page) {
}
// set the offsets
- for (int i = 0; i < block_count; i++) {
+ for (int i = 0; i < (int)block_count; i++) {
mi_assert_internal(i < 128);
_mi_page_map[idx + i] = (signed char)(-i-1);
}
diff --git a/src/page-queue.c b/src/page-queue.c
index 0a791adb..c6b19985 100644
--- a/src/page-queue.c
+++ b/src/page-queue.c
@@ -38,15 +38,15 @@ terms of the MIT license. A copy of the license can be found in the file
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
+ return (pq->block_size == (MI_LARGE_MAX_OBJ_SIZE+sizeof(uintptr_t)));
}
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
+ return (pq->block_size == (MI_LARGE_MAX_OBJ_SIZE+(2*sizeof(uintptr_t))));
}
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
- return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
+ return (pq->block_size > MI_LARGE_MAX_OBJ_SIZE);
}
/* -----------------------------------------------------------
@@ -76,7 +76,7 @@ static inline uint8_t mi_bin(size_t size) {
bin = (uint8_t)wsize;
}
#endif
- else if (wsize > MI_LARGE_OBJ_WSIZE_MAX) {
+ else if (wsize > MI_LARGE_MAX_OBJ_WSIZE) {
bin = MI_BIN_HUGE;
}
else {
@@ -113,7 +113,7 @@ size_t _mi_bin_size(uint8_t bin) {
// Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept {
- if (size <= MI_LARGE_OBJ_SIZE_MAX) {
+ if (size <= MI_LARGE_MAX_OBJ_SIZE) {
return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
}
else {
diff --git a/src/page.c b/src/page.c
index fa006085..122b4324 100644
--- a/src/page.c
+++ b/src/page.c
@@ -36,8 +36,8 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta
return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
}
-static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
-static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld);
+//static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld);
+static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page);
#if (MI_DEBUG>=3)
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
@@ -83,7 +83,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->capacity <= page->reserved);
// const size_t bsize = mi_page_block_size(page);
- uint8_t* start = mi_page_start(page);
+ // uint8_t* start = mi_page_start(page);
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
mi_assert_internal(mi_page_list_is_valid(page,page->free));
@@ -414,6 +414,7 @@ void _mi_page_force_abandon(mi_page_t* page) {
// Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
+ MI_UNUSED(force);
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
@@ -784,7 +785,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
if (page != NULL && !mi_page_immediate_available(page)) {
mi_assert_internal(mi_page_is_expandable(page));
- mi_page_extend_free(heap, page, heap->tld);
+ mi_page_extend_free(heap, page);
}
if (page == NULL) {
diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c
index 1d3d6f41..418c950f 100644
--- a/src/prim/windows/prim.c
+++ b/src/prim/windows/prim.c
@@ -127,7 +127,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
ULONGLONG memInKiB = 0;
if (GetPhysicallyInstalledSystemMemory(&memInKiB)) {
if (memInKiB > 0 && memInKiB < (SIZE_MAX / MI_KiB)) {
- config->physical_memory = memInKiB * MI_KiB;
+ config->physical_memory = (size_t)(memInKiB * MI_KiB);
}
}
// get the VirtualAlloc2 function
@@ -175,7 +175,7 @@ int _mi_prim_free(void* addr, size_t size ) {
// the start of the region.
MEMORY_BASIC_INFORMATION info = { 0 };
VirtualQuery(addr, &info, sizeof(info));
- if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) {
+ if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)(4*MI_MiB)) {
errcode = 0;
err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
if (err) { errcode = GetLastError(); }
@@ -239,7 +239,7 @@ static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignmen
// success, return the address
return p;
}
- else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) &&
+ else if (max_retry_msecs > 0 && (try_alignment <= 8*MI_MiB) &&
(flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 &&
win_is_out_of_memory_error(GetLastError())) {
// if committing regular memory and being out-of-memory,
diff --git a/src/stats.c b/src/stats.c
index 29376ace..14489937 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -90,7 +90,6 @@ static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t
// must be thread safe as it is called from stats_merge
static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
if (stats==src) return;
- mi_stat_add(&stats->segments, &src->segments,1);
mi_stat_add(&stats->pages, &src->pages,1);
mi_stat_add(&stats->reserved, &src->reserved, 1);
mi_stat_add(&stats->committed, &src->committed, 1);
@@ -99,11 +98,9 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
mi_stat_add(&stats->page_committed, &src->page_committed, 1);
mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1);
- mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1);
mi_stat_add(&stats->threads, &src->threads, 1);
mi_stat_add(&stats->malloc, &src->malloc, 1);
- mi_stat_add(&stats->segments_cache, &src->segments_cache, 1);
mi_stat_add(&stats->normal, &src->normal, 1);
mi_stat_add(&stats->huge, &src->huge, 1);
mi_stat_add(&stats->giant, &src->giant, 1);
@@ -329,9 +326,9 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
mi_stat_peak_print(&stats->reset, "reset", 1, out, arg );
mi_stat_peak_print(&stats->purged, "purged", 1, out, arg );
mi_stat_print(&stats->page_committed, "touched", 1, out, arg);
- mi_stat_print(&stats->segments, "segments", -1, out, arg);
- mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg);
- mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg);
+ //mi_stat_print(&stats->segments, "segments", -1, out, arg);
+ //mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg);
+ //mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg);
mi_stat_print(&stats->pages, "pages", -1, out, arg);
mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg);
mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg);