diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h index ad948d36..f2fea3f7 100644 --- a/include/mimalloc/types.h +++ b/include/mimalloc/types.h @@ -377,7 +377,7 @@ static inline bool mi_memkind_is_os(mi_memkind_t memkind) { typedef struct mi_memid_os_info { void* base; // actual base address of the block (used for offset aligned allocations) - size_t alignment; // alignment at allocation + size_t size; // full allocation size } mi_memid_os_info_t; typedef struct mi_memid_arena_info { diff --git a/src/os.c b/src/os.c index 32cb11c3..7e7dcb2c 100644 --- a/src/os.c +++ b/src/os.c @@ -157,34 +157,42 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { } #endif - /* ----------------------------------------------------------- Free memory -------------------------------------------------------------- */ static void mi_os_free_huge_os_pages(void* p, size_t size); -static void mi_os_prim_free(void* addr, size_t size, bool still_committed) { +static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) { mi_assert_internal((size % _mi_os_page_size()) == 0); if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) int err = _mi_prim_free(addr, size); if (err != 0) { _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); } - if (still_committed) { _mi_stat_decrease(&os_stats->committed, size); } + if (commit_size > 0) { + _mi_stat_decrease(&os_stats->committed, commit_size); + } _mi_stat_decrease(&os_stats->reserved, size); } void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) { if (mi_memkind_is_os(memid.memkind)) { - size_t csize = _mi_os_good_alloc_size(size); + size_t csize = memid.mem.os.size; + if (csize==0) { _mi_os_good_alloc_size(size); } + size_t commit_size = (still_committed ? csize : 0); void* base = addr; // different base? (due to alignment) - if (memid.mem.os.base != NULL) { - mi_assert(memid.mem.os.base <= addr); - mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr); + if (memid.mem.os.base != base) { + mi_assert(memid.mem.os.base <= addr); base = memid.mem.os.base; - csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base); + const size_t diff = (uint8_t*)addr - (uint8_t*)memid.mem.os.base; + if (memid.mem.os.size==0) { + csize += diff; + } + if (still_committed) { + commit_size -= diff; // the (addr-base) part was already un-committed + } } // free it if (memid.memkind == MI_MEM_OS_HUGE) { @@ -192,7 +200,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me mi_os_free_huge_os_pages(base, csize); } else { - mi_os_prim_free(base, csize, still_committed); + mi_os_prim_free(base, csize, (still_committed ? commit_size : 0)); } } else { @@ -273,7 +281,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit #if !MI_TRACK_ASAN _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); #endif - mi_os_prim_free(p, size, commit); + if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); } if (size >= (SIZE_MAX - alignment)) return NULL; // overflow const size_t over_size = size + alignment; @@ -304,8 +312,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit size_t mid_size = _mi_align_up(size, _mi_os_page_size()); size_t post_size = over_size - pre_size - mid_size; mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); - if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit); } - if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit); } + if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); } + if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); } // we can return the aligned pointer on `mmap` systems p = aligned_p; *base = aligned_p; // since we freed the pre part, `*base == p`. @@ -446,8 +454,13 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero) { return true; } -static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) { mi_assert_internal(needs_recommit!=NULL); - _mi_stat_decrease(&os_stats->committed, size); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero) { + return _mi_os_commit_ex(addr, size, is_zero, size); +} + +static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) { + mi_assert_internal(needs_recommit!=NULL); + mi_os_stat_decrease(committed, stat_size); // page align size_t csize; @@ -466,7 +479,7 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) { bool _mi_os_decommit(void* addr, size_t size) { bool needs_recommit; - return mi_os_decommit_ex(addr, size, &needs_recommit); + return mi_os_decommit_ex(addr, size, &needs_recommit, size); } @@ -628,7 +641,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse // no success, issue a warning and break if (p != NULL) { _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); - mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true); + mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE); } break; } @@ -674,7 +687,7 @@ static void mi_os_free_huge_os_pages(void* p, size_t size) { if (p==NULL || size==0) return; uint8_t* base = (uint8_t*)p; while (size >= MI_HUGE_OS_PAGE_SIZE) { - mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true); + mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE); size -= MI_HUGE_OS_PAGE_SIZE; base += MI_HUGE_OS_PAGE_SIZE; }