merge from dev

This commit is contained in:
daan 2019-09-17 17:49:56 -07:00
commit 8857f0a5ef
67 changed files with 1912 additions and 934 deletions

View file

@ -14,39 +14,47 @@ terms of the MIT license. A copy of the license can be found in the file
// Aligned Allocation
// ------------------------------------------------------
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept {
// note: we don't require `size > offset`, we just guarantee that
// the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0 && alignment % sizeof(uintptr_t) == 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_malloc_zero(heap,size,zero);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
// try if there is a current small block with just the right alignment
if (size <= MI_SMALL_SIZE_MAX) {
mi_assert(alignment > 0 && alignment % sizeof(void*) == 0);
if (mi_unlikely(size > PTRDIFF_MAX)) return NULL; // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) return NULL; // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
// try if there is a small block available with just the right alignment
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
mi_page_t* page = _mi_heap_get_free_small_page(heap,size);
if (page->free != NULL &&
(((uintptr_t)page->free + offset) % alignment) == 0)
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
if (mi_likely(page->free != NULL && is_aligned))
{
#if MI_STAT>1
mi_heap_stat_increase( heap, malloc, size);
mi_heap_stat_increase( heap, malloc, size);
#endif
void* p = _mi_page_malloc(heap,page,size);
void* p = _mi_page_malloc(heap,page,size); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
if (zero) memset(p,0,size);
if (zero) _mi_block_zero_init(page,p,size);
return p;
}
}
// use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=size && size<=MI_MEDIUM_OBJ_SIZE_MAX && (size&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
}
// otherwise over-allocate
void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero);
if (p == NULL) return NULL;
// .. and align within the allocation
mi_page_set_has_aligned( _mi_ptr_page(p), true );
uintptr_t adjust = alignment - (((uintptr_t)p + offset) % alignment);
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
mi_assert_internal(adjust % sizeof(uintptr_t) == 0);
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal( p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p),_mi_ptr_page(aligned_p),aligned_p) );
return aligned_p;
@ -117,9 +125,16 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
if (newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
const mi_page_t* page = _mi_ptr_page(newp);
if (page->flags.is_zero) {
// already zero initialized
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
}
else {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
}
}
memcpy(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free if successful
@ -143,6 +158,26 @@ void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t a
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
}
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
}
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
}
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}
void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
@ -151,13 +186,19 @@ void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noex
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
void* mi_aligned_offset_recalloc(void* p, size_t size, size_t newcount, size_t alignment, size_t offset) mi_attr_noexcept {
size_t newsize;
if (mi_mul_overflow(size,newcount,&newsize)) return NULL;
return mi_heap_realloc_zero_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset, true );
void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
void* mi_aligned_recalloc(void* p, size_t size, size_t newcount, size_t alignment) mi_attr_noexcept {
size_t newsize;
if (mi_mul_overflow(size, newcount, &newsize)) return NULL;
return mi_heap_realloc_zero_aligned(mi_get_default_heap(), p, newsize, alignment, true );
void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
}
void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}

View file

@ -16,6 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include <psapi.h>
#include <stdlib.h> // getenv
#include <stdio.h> // _setmaxstdio
#include <string.h> // strstr

View file

@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
#if defined(MI_MALLOC_OVERRIDE) && defined(_WIN32) && !(defined(MI_SHARED_LIB) && defined(_DLL))
#error "It is only possible to override "malloc" on Windows when building as a 64-bit DLL (and linking the C runtime as a DLL)"
#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
#endif
#if defined(MI_MALLOC_OVERRIDE) && !defined(_WIN32)

View file

@ -48,17 +48,13 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
// <http://man7.org/linux/man-pages/man3/posix_memalign.3.html>
if (p == NULL) return EINVAL;
if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment
if ((alignment & (alignment - 1)) != 0) return EINVAL; // not a power of 2
if (!_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2
void* q = mi_malloc_aligned(size, alignment);
if (q==NULL && size != 0) return ENOMEM;
*p = q;
return 0;
}
int mi__posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept {
return mi_posix_memalign(p, alignment, size);
}
void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
return mi_malloc_aligned(size, alignment);
}
@ -75,6 +71,8 @@ void* mi_pvalloc(size_t size) mi_attr_noexcept {
}
void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
if (alignment==0 || !_mi_is_power_of_two(alignment)) return NULL;
if ((size&(alignment-1)) != 0) return NULL; // C11 requires integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
return mi_malloc_aligned(size, alignment);
}
@ -90,12 +88,6 @@ void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft
return res;
}
void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { // Microsoft
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
return _mi_heap_realloc_zero(mi_get_default_heap(), p, total, true);
}
unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
if (s==NULL) return NULL;
size_t len;
@ -149,3 +141,11 @@ int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name)
return 0;
#endif
}
void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
return mi_recalloc_aligned_at(p, newcount, size, alignment, offset);
}
void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
return mi_recalloc_aligned(p, newcount, size, alignment);
}

View file

@ -33,7 +33,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
#if (MI_DEBUG)
memset(block, MI_DEBUG_UNINIT, size);
if (!page->flags.is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
#elif (MI_SECURE)
block->next = 0;
#endif
@ -89,9 +89,29 @@ extern inline void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
}
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// note: we need to initialize the whole block to zero, not just size
// or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
UNUSED(size);
mi_assert_internal(p != NULL);
mi_assert_internal(size > 0 && page->block_size >= size);
mi_assert_internal(_mi_ptr_page(p)==page);
if (page->flags.is_zero) {
// already zero initialized memory?
((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p,page->block_size));
}
else {
// otherwise memset
memset(p, 0, page->block_size);
}
}
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
void* p = mi_heap_malloc(heap,size);
if (zero && p != NULL) memset(p,0,size);
if (zero && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p;
}
@ -127,6 +147,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_block_set_next(page, block, page->free);
page->free = block;
page->used--;
page->flags.is_zero = false;
_mi_segment_page_free(page,true,&heap->tld->segments);
}
return;
@ -254,7 +275,7 @@ void mi_free(void* p) mi_attr_noexcept
// huge page stat is accounted for in `_mi_page_retire`
#endif
if (mi_likely(tid == segment->thread_id && page->flags.value == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)p;
mi_block_set_next(page, block, page->local_free);
@ -405,6 +426,17 @@ void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcep
return newp;
}
void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_mul_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
}
@ -418,6 +450,16 @@ void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
}
void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
}
void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
}
// ------------------------------------------------------
// strdup, strndup, and realpath
// ------------------------------------------------------

View file

@ -108,10 +108,9 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
_mi_deferred_free(heap,collect > NORMAL);
if (!mi_heap_is_initialized(heap)) return;
_mi_deferred_free(heap, collect > NORMAL);
// collect (some) abandoned pages
if (collect >= NORMAL && !heap->no_reclaim) {
if (collect == NORMAL) {

View file

@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
0, false, false, false, 0, 0,
0, false, false, false, false, 0, 0,
{ 0 },
NULL, // free
#if MI_SECURE
@ -96,7 +96,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
#define tld_main_stats ((mi_stats_t*)((uint8_t*)&tld_main + offsetof(mi_tld_t,stats)))
static mi_tld_t tld_main = {
0,
0, false,
&_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats }, // segments
{ 0, tld_main_stats }, // os
@ -352,9 +352,7 @@ void mi_thread_init(void) mi_attr_noexcept
pthread_setspecific(mi_pthread_key, (void*)(_mi_thread_id()|1)); // set to a dummy value so that `mi_pthread_done` is called
#endif
#if (MI_DEBUG>0) && !defined(NDEBUG) // not in release mode as that leads to crashes on Windows dynamic override
_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
#endif
//_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
}
void mi_thread_done(void) mi_attr_noexcept {
@ -367,11 +365,9 @@ void mi_thread_done(void) mi_attr_noexcept {
// abandon the thread local heap
if (_mi_heap_done()) return; // returns true if already ran
#if (MI_DEBUG>0)
if (!_mi_is_main_thread()) {
_mi_verbose_message("thread done: 0x%zx\n", _mi_thread_id());
}
#endif
//if (!_mi_is_main_thread()) {
// _mi_verbose_message("thread done: 0x%zx\n", _mi_thread_id());
//}
}
@ -388,14 +384,26 @@ bool _mi_preloading() {
return os_preloading;
}
bool mi_is_redirected() mi_attr_noexcept {
return mi_redirected;
}
// Communicate with the redirection module on Windows
#if 0
#if defined(_WIN32) && defined(MI_SHARED_LIB)
#ifdef __cplusplus
extern "C" {
#endif
mi_decl_export void _mi_redirect_init() {
// called on redirection
mi_redirected = true;
mi_decl_export void _mi_redirect_entry(DWORD reason) {
// called on redirection; careful as this may be called before DllMain
if (reason == DLL_PROCESS_ATTACH) {
mi_redirected = true;
}
else if (reason == DLL_PROCESS_DETACH) {
mi_redirected = false;
}
else if (reason == DLL_THREAD_DETACH) {
mi_thread_done();
}
}
__declspec(dllimport) bool mi_allocator_init(const char** message);
__declspec(dllimport) void mi_allocator_done();
@ -424,7 +432,9 @@ static void mi_process_load(void) {
// show message from the redirector (if present)
const char* msg = NULL;
mi_allocator_init(&msg);
if (msg != NULL) _mi_verbose_message(msg);
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
_mi_fputs(NULL,NULL,msg);
}
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
@ -483,9 +493,7 @@ static void mi_process_done(void) {
#if defined(_WIN32) && defined(MI_SHARED_LIB)
// Windows DLL: easy to hook into process_init and thread_done
#include <windows.h>
// Windows DLL: easy to hook into process_init and thread_done
__declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
UNUSED(reserved);
UNUSED(inst);
@ -493,7 +501,7 @@ static void mi_process_done(void) {
mi_process_load();
}
else if (reason==DLL_THREAD_DETACH) {
mi_thread_done();
if (!mi_is_redirected()) mi_thread_done();
}
return TRUE;
}

View file

@ -17,7 +17,7 @@ We need this memory layer between the raw OS calls because of:
to reuse memory effectively.
2. It turns out that for large objects, between 1MiB and 32MiB (?), the cost of
an OS allocation/free is still (much) too expensive relative to the accesses in that
object :-( (`mallloc-large` tests this). This means we need a cheaper way to
object :-( (`malloc-large` tests this). This means we need a cheaper way to
reuse memory.
3. This layer can help with a NUMA aware allocation in the future.
@ -39,14 +39,16 @@ Possible issues:
// Internal raw OS interface
size_t _mi_os_large_page_size();
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_tld_t* tld);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld);
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment);
bool _mi_os_is_huge_reserved(void* p);
// Constants
#if (MI_INTPTR_SIZE==8)
@ -66,11 +68,25 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_tld
#define MI_REGION_MAP_FULL UINTPTR_MAX
typedef uintptr_t mi_region_info_t;
static inline mi_region_info_t mi_region_info_create(void* start, bool is_large, bool is_committed) {
return ((uintptr_t)start | ((is_large?1:0) << 1) | (is_committed?1:0));
}
static inline void* mi_region_info_read(mi_region_info_t info, bool* is_large, bool* is_committed) {
if (is_large) *is_large = ((info&0x02) != 0);
if (is_committed) *is_committed = ((info&0x01) != 0);
return (void*)(info & ~0x03);
}
// A region owns a chunk of REGION_SIZE (256MiB) (virtual) memory with
// a bit map with one bit per MI_SEGMENT_SIZE (4MiB) block.
typedef struct mem_region_s {
volatile _Atomic(uintptr_t) map; // in-use bit per MI_SEGMENT_SIZE block
volatile _Atomic(void*) start; // start of virtual memory area
volatile _Atomic(uintptr_t) map; // in-use bit per MI_SEGMENT_SIZE block
volatile _Atomic(mi_region_info_t) info; // start of virtual memory area, and flags
volatile _Atomic(uintptr_t) dirty_mask; // bit per block if the contents are not zero'd
} mem_region_t;
@ -108,7 +124,7 @@ bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
if (p==NULL) return false;
size_t count = mi_atomic_read_relaxed(&regions_count);
for (size_t i = 0; i < count; i++) {
uint8_t* start = (uint8_t*)mi_atomic_read_ptr_relaxed(&regions[i].start);
uint8_t* start = (uint8_t*)mi_region_info_read( mi_atomic_read_relaxed(&regions[i].info), NULL, NULL);
if (start != NULL && (uint8_t*)p >= start && (uint8_t*)p < start + MI_REGION_SIZE) return true;
}
return false;
@ -123,7 +139,8 @@ Commit from a region
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks,
size_t size, bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld)
{
size_t mask = mi_region_block_mask(blocks,bitidx);
mi_assert_internal(mask != 0);
@ -131,10 +148,21 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
mi_assert_internal(&regions[idx] == region);
// ensure the region is reserved
void* start = mi_atomic_read_ptr(&region->start);
if (start == NULL)
mi_region_info_t info = mi_atomic_read(&region->info);
if (info == 0)
{
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, mi_option_is_enabled(mi_option_eager_region_commit), tld);
bool region_commit = mi_option_is_enabled(mi_option_eager_region_commit);
bool region_large = *allow_large;
void* start = NULL;
if (region_large) {
start = _mi_os_try_alloc_from_huge_reserved(MI_REGION_SIZE, MI_SEGMENT_ALIGN);
if (start != NULL) { region_commit = true; }
}
if (start == NULL) {
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, region_commit, &region_large, tld);
}
mi_assert_internal(!(region_large && !*allow_large));
if (start == NULL) {
// failure to allocate from the OS! unclaim the blocks and fail
size_t map;
@ -145,7 +173,8 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
}
// set the newly allocated region
if (mi_atomic_cas_ptr_strong(&region->start, start, NULL)) {
info = mi_region_info_create(start,region_large,region_commit);
if (mi_atomic_cas_strong(&region->info, info, 0)) {
// update the region count
mi_atomic_increment(&regions_count);
}
@ -153,30 +182,52 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
// failed, another thread allocated just before us!
// we assign it to a later slot instead (up to 4 tries).
for(size_t i = 1; i <= 4 && idx + i < MI_REGION_MAX; i++) {
if (mi_atomic_cas_ptr_strong(&regions[idx+i].start, start, NULL)) {
if (mi_atomic_cas_strong(&regions[idx+i].info, info, 0)) {
mi_atomic_increment(&regions_count);
start = NULL;
break;
}
}
}
if (start != NULL) {
// free it if we didn't succeed to save it to some other region
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
_mi_os_free_ex(start, MI_REGION_SIZE, region_commit, tld->stats);
}
// and continue with the memory at our index
start = mi_atomic_read_ptr(&region->start);
info = mi_atomic_read(&region->info);
}
}
mi_assert_internal(start == mi_atomic_read_ptr(&region->start));
mi_assert_internal(start != NULL);
mi_assert_internal(info == mi_atomic_read(&region->info));
mi_assert_internal(info != 0);
// Commit the blocks to memory
bool region_is_committed = false;
bool region_is_large = false;
void* start = mi_region_info_read(info,&region_is_large,&region_is_committed);
mi_assert_internal(!(region_is_large && !*allow_large));
mi_assert_internal(start!=NULL);
// set dirty bits
uintptr_t m;
do {
m = mi_atomic_read(&region->dirty_mask);
} while (!mi_atomic_cas_weak(&region->dirty_mask, m | mask, m));
*is_zero = ((m & mask) == 0); // no dirty bit set in our claimed range?
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
if (commit && !mi_option_is_enabled(mi_option_eager_region_commit)) {
_mi_os_commit(blocks_start, mi_good_commit_size(size), tld->stats); // only commit needed size (unless using large OS pages)
if (*commit && !region_is_committed) {
// ensure commit
bool commit_zero = false;
_mi_os_commit(blocks_start, mi_good_commit_size(size), &commit_zero, tld->stats); // only commit needed size (unless using large OS pages)
if (commit_zero) *is_zero = true;
}
else if (!*commit && region_is_committed) {
// but even when no commit is requested, we might have committed anyway (in a huge OS page for example)
*commit = true;
}
// and return the allocation
mi_assert_internal(blocks_start != NULL);
*allow_large = region_is_large;
*p = blocks_start;
*id = (idx*MI_REGION_MAP_BITS) + bitidx;
return true;
@ -220,7 +271,8 @@ static inline size_t mi_bsr(uintptr_t x) {
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size,
bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(p != NULL && id != NULL);
mi_assert_internal(blocks < MI_REGION_MAP_BITS);
@ -228,6 +280,7 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
const uintptr_t mask = mi_region_block_mask(blocks, 0);
const size_t bitidx_max = MI_REGION_MAP_BITS - blocks;
uintptr_t map = mi_atomic_read(&region->map);
if (map==MI_REGION_MAP_FULL) return true;
#ifdef MI_HAVE_BITSCAN
size_t bitidx = mi_bsf(~map); // quickly find the first zero bit if possible
@ -242,7 +295,7 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
mi_assert_internal((m >> bitidx) == mask); // no overflow?
uintptr_t newmap = map | m;
mi_assert_internal((newmap^map) >> bitidx == mask);
if (!mi_atomic_cas_weak(&region->map, newmap, map)) {
if (!mi_atomic_cas_weak(&region->map, newmap, map)) { // TODO: use strong cas here?
// no success, another thread claimed concurrently.. keep going
map = mi_atomic_read(&region->map);
continue;
@ -250,7 +303,8 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
else {
// success, we claimed the bits
// now commit the block memory -- this can still fail
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, p, id, tld);
return mi_region_commit_blocks(region, idx, bitidx, blocks,
size, commit, allow_large, is_zero, p, id, tld);
}
}
else {
@ -273,18 +327,32 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/0 before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size,
bool* commit, bool* allow_large, bool* is_zero,
void** p, size_t* id, mi_os_tld_t* tld)
{
// check if there are available blocks in the region..
mi_assert_internal(idx < MI_REGION_MAX);
mem_region_t* region = &regions[idx];
uintptr_t m = mi_atomic_read_relaxed(&region->map);
if (m != MI_REGION_MAP_FULL) { // some bits are zero
return mi_region_alloc_blocks(region, idx, blocks, size, commit, p, id, tld);
}
else {
return true; // no error, but no success either
bool ok = (*commit || *allow_large); // committing or allow-large is always ok
if (!ok) {
// otherwise skip incompatible regions if possible.
// this is not guaranteed due to multiple threads allocating at the same time but
// that's ok. In secure mode, large is never allowed for any thread, so that works out;
// otherwise we might just not be able to reset/decommit individual pages sometimes.
mi_region_info_t info = mi_atomic_read_relaxed(&region->info);
bool is_large;
bool is_committed;
void* start = mi_region_info_read(info,&is_large,&is_committed);
ok = (start == NULL || (*commit || !is_committed) || (*allow_large || !is_large)); // Todo: test with one bitmap operation?
}
if (ok) {
return mi_region_alloc_blocks(region, idx, blocks, size, commit, allow_large, is_zero, p, id, tld);
}
}
return true; // no error, but no success either
}
/* ----------------------------------------------------------------------------
@ -293,15 +361,20 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, b
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t* id, mi_os_tld_t* tld)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero,
size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(id != NULL && tld != NULL);
mi_assert_internal(size > 0);
*id = SIZE_MAX;
*is_zero = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
// use direct OS allocation for huge blocks or alignment (with `id = SIZE_MAX`)
if (size > MI_REGION_MAX_ALLOC_SIZE || alignment > MI_SEGMENT_ALIGN) {
return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, true, tld); // round up size
*is_zero = true;
return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, *commit, large, tld); // round up size
}
// always round size to OS page size multiple (so commit/decommit go over the entire range)
@ -315,27 +388,29 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
// find a range of free blocks
void* p = NULL;
size_t count = mi_atomic_read(&regions_count);
size_t idx = tld->region_idx; // start index is per-thread to reduce contention
size_t idx = tld->region_idx; // start at 0 to reuse low addresses? Or, use tld->region_idx to reduce contention?
for (size_t visited = 0; visited < count; visited++, idx++) {
if (idx >= count) idx = 0; // wrap around
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, is_zero, &p, id, tld)) return NULL; // error
if (p != NULL) break;
}
if (p == NULL) {
// no free range in existing regions -- try to extend beyond the count.. but at most 4 regions
for (idx = count; idx < count + 4 && idx < MI_REGION_MAX; idx++) {
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error
// no free range in existing regions -- try to extend beyond the count.. but at most 8 regions
for (idx = count; idx < mi_atomic_read_relaxed(&regions_count) + 8 && idx < MI_REGION_MAX; idx++) {
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, large, is_zero, &p, id, tld)) return NULL; // error
if (p != NULL) break;
}
}
if (p == NULL) {
// we could not find a place to allocate, fall back to the os directly
p = _mi_os_alloc_aligned(size, alignment, commit, tld);
_mi_warning_message("unable to allocate from region: size %zu\n", size);
*is_zero = true;
p = _mi_os_alloc_aligned(size, alignment, commit, large, tld);
}
else {
tld->region_idx = idx; // next start of search
tld->region_idx = idx; // next start of search? currently not used as we use first-fit
}
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
@ -343,10 +418,6 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t*
}
// Allocate `size` memory. Return non NULL on success, with a given memory `id`.
void* _mi_mem_alloc(size_t size, bool commit, size_t* id, mi_os_tld_t* tld) {
return _mi_mem_alloc_aligned(size,0,commit,id,tld);
}
/* ----------------------------------------------------------------------------
Free
@ -374,7 +445,10 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
mi_assert_internal(idx < MI_REGION_MAX); if (idx >= MI_REGION_MAX) return; // or `abort`?
mem_region_t* region = &regions[idx];
mi_assert_internal((mi_atomic_read_relaxed(&region->map) & mask) == mask ); // claimed?
void* start = mi_atomic_read_ptr(&region->start);
mi_region_info_t info = mi_atomic_read(&region->info);
bool is_large;
bool is_eager_committed;
void* start = mi_region_info_read(info,&is_large,&is_eager_committed);
mi_assert_internal(start != NULL);
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
mi_assert_internal(blocks_start == p); // not a pointer in our area?
@ -385,18 +459,20 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
// TODO: implement delayed decommit/reset as these calls are too expensive
// if the memory is reused soon.
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
if (!mi_option_is_enabled(mi_option_large_os_pages)) {
if (mi_option_is_enabled(mi_option_eager_region_commit)) {
//_mi_os_reset(p, size, stats);
}
else {
//_mi_os_decommit(p, size, stats);
if (!is_large) {
if (mi_option_is_enabled(mi_option_segment_reset)) {
_mi_os_reset(p, size, stats); //
// _mi_os_decommit(p,size,stats); // if !is_eager_committed (and clear dirty bits)
}
// else { _mi_os_reset(p,size,stats); }
}
if (!is_eager_committed) {
// adjust commit statistics as we commit again when re-using the same slot
_mi_stat_decrease(&stats->committed, mi_good_commit_size(size));
}
// TODO: should we free empty regions? currently only done _mi_mem_collect.
// this frees up virtual address space which
// might be useful on 32-bit systems?
// this frees up virtual address space which might be useful on 32-bit systems?
// and unclaim
uintptr_t map;
@ -416,17 +492,21 @@ void _mi_mem_collect(mi_stats_t* stats) {
// free every region that has no segments in use.
for (size_t i = 0; i < regions_count; i++) {
mem_region_t* region = &regions[i];
if (mi_atomic_read_relaxed(&region->map) == 0 && region->start != NULL) {
if (mi_atomic_read_relaxed(&region->map) == 0) {
// if no segments used, try to claim the whole region
uintptr_t m;
do {
m = mi_atomic_read_relaxed(&region->map);
} while(m == 0 && !mi_atomic_cas_weak(&region->map, ~((uintptr_t)0), 0 ));
if (m == 0) {
// on success, free the whole region
if (region->start != NULL) _mi_os_free((void*)region->start, MI_REGION_SIZE, stats);
// on success, free the whole region (unless it was huge reserved)
bool is_eager_committed;
void* start = mi_region_info_read(mi_atomic_read(&region->info), NULL, &is_eager_committed);
if (start != NULL && !_mi_os_is_huge_reserved(start)) {
_mi_os_free_ex(start, MI_REGION_SIZE, is_eager_committed, stats);
}
// and release
mi_atomic_write_ptr(&region->start,NULL);
mi_atomic_write(&region->info,0);
mi_atomic_write(&region->map,0);
}
}
@ -437,8 +517,8 @@ void _mi_mem_collect(mi_stats_t* stats) {
Other
-----------------------------------------------------------------------------*/
bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_commit(p, size, stats);
bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats) {
return _mi_os_commit(p, size, is_zero, stats);
}
bool _mi_mem_decommit(void* p, size_t size, mi_stats_t* stats) {
@ -449,8 +529,8 @@ bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_reset(p, size, stats);
}
bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_unreset(p, size, stats);
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats) {
return _mi_os_unreset(p, size, is_zero, stats);
}
bool _mi_mem_protect(void* p, size_t size) {

View file

@ -51,16 +51,10 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(show_stats) },
{ 0, UNINIT, MI_OPTION(verbose) },
#if MI_SECURE
{ MI_SECURE, INITIALIZED, MI_OPTION(secure) }, // in a secure build the environment setting is ignored
#else
{ 0, UNINIT, MI_OPTION(secure) },
#endif
// the following options are experimental and not all combinations make sense.
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: if eager_region_commit is on, this should be on too.
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: needs to be on when eager_region_commit is enabled
#ifdef _WIN32 // and BSD?
{ 1, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
{ 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
#else
{ 1, UNINIT, MI_OPTION(eager_region_commit) },
#endif
@ -69,7 +63,10 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) },
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) } // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free
{ 100, UNINIT, MI_OPTION(os_tag) } // only apple specific for now but might serve more or less related purpose
};
static void mi_option_init(mi_option_desc_t* desc);
@ -77,7 +74,12 @@ static void mi_option_init(mi_option_desc_t* desc);
void _mi_options_init(void) {
// called on process load
for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_get((mi_option_t)i); // initialize
mi_option_t option = (mi_option_t)i;
mi_option_get(option); // initialize
if (option != mi_option_verbose) {
mi_option_desc_t* desc = &options[option];
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
}
}
}
@ -86,10 +88,7 @@ long mi_option_get(mi_option_t option) {
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
if (mi_unlikely(desc->init == UNINIT)) {
mi_option_init(desc);
if (option != mi_option_verbose) {
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
}
mi_option_init(desc);
}
return desc->value;
}
@ -131,6 +130,35 @@ void mi_option_disable(mi_option_t option) {
}
static void mi_out_stderr(const char* msg) {
#ifdef _WIN32
// on windows with redirection, the C runtime cannot handle locale dependent output
// after the main thread closes so we use direct console output.
_cputs(msg);
#else
fputs(msg, stderr);
#endif
}
// --------------------------------------------------------
// Default output handler
// --------------------------------------------------------
// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t.
// For now, don't register output from multiple threads.
#pragma warning(suppress:4180)
static mi_output_fun* volatile mi_out_default; // = NULL
static mi_output_fun* mi_out_get_default(void) {
mi_output_fun* out = mi_out_default;
return (out == NULL ? &mi_out_stderr : out);
}
void mi_register_output(mi_output_fun* out) mi_attr_noexcept {
mi_out_default = out;
}
// --------------------------------------------------------
// Messages
// --------------------------------------------------------
@ -141,33 +169,30 @@ static volatile _Atomic(uintptr_t) error_count; // = 0; // when MAX_ERROR_COUNT
// inside the C runtime causes another message.
static mi_decl_thread bool recurse = false;
// Define our own limited `fprintf` that avoids memory allocation.
// We do this using `snprintf` with a limited buffer.
static void mi_vfprintf( FILE* out, const char* prefix, const char* fmt, va_list args ) {
char buf[256];
if (fmt==NULL) return;
void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message) {
if (_mi_preloading() || recurse) return;
if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) out = mi_out_get_default();
recurse = true;
if (out==NULL) out = stdout;
vsnprintf(buf,sizeof(buf)-1,fmt,args);
#ifdef _WIN32
// on windows with redirection, the C runtime cannot handle locale dependent output
// after the main thread closes so use direct console output.
if (out==stderr) {
if (prefix != NULL) _cputs(prefix);
_cputs(buf);
}
else
#endif
{
if (prefix != NULL) fputs(prefix,out);
fputs(buf,out);
}
if (prefix != NULL) out(prefix);
out(message);
recurse = false;
return;
}
void _mi_fprintf( FILE* out, const char* fmt, ... ) {
// Define our own limited `fprintf` that avoids memory allocation.
// We do this using `snprintf` with a limited buffer.
static void mi_vfprintf( mi_output_fun* out, const char* prefix, const char* fmt, va_list args ) {
char buf[512];
if (fmt==NULL) return;
if (_mi_preloading() || recurse) return;
recurse = true;
vsnprintf(buf,sizeof(buf)-1,fmt,args);
recurse = false;
_mi_fputs(out,prefix,buf);
}
void _mi_fprintf( mi_output_fun* out, const char* fmt, ... ) {
va_list args;
va_start(args,fmt);
mi_vfprintf(out,NULL,fmt,args);
@ -178,7 +203,7 @@ void _mi_trace_message(const char* fmt, ...) {
if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
va_list args;
va_start(args, fmt);
mi_vfprintf(stderr, "mimalloc: ", fmt, args);
mi_vfprintf(NULL, "mimalloc: ", fmt, args);
va_end(args);
}
@ -186,7 +211,7 @@ void _mi_verbose_message(const char* fmt, ...) {
if (!mi_option_is_enabled(mi_option_verbose)) return;
va_list args;
va_start(args,fmt);
mi_vfprintf(stderr, "mimalloc: ", fmt, args);
mi_vfprintf(NULL, "mimalloc: ", fmt, args);
va_end(args);
}
@ -195,7 +220,7 @@ void _mi_error_message(const char* fmt, ...) {
if (mi_atomic_increment(&error_count) > MAX_ERROR_COUNT) return;
va_list args;
va_start(args,fmt);
mi_vfprintf(stderr, "mimalloc: error: ", fmt, args);
mi_vfprintf(NULL, "mimalloc: error: ", fmt, args);
va_end(args);
mi_assert(false);
}
@ -205,14 +230,14 @@ void _mi_warning_message(const char* fmt, ...) {
if (mi_atomic_increment(&error_count) > MAX_ERROR_COUNT) return;
va_list args;
va_start(args,fmt);
mi_vfprintf(stderr, "mimalloc: warning: ", fmt, args);
mi_vfprintf(NULL, "mimalloc: warning: ", fmt, args);
va_end(args);
}
#if MI_DEBUG
void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) {
_mi_fprintf(stderr,"mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion);
_mi_fprintf(NULL,"mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion);
abort();
}
#endif

366
src/os.c
View file

@ -35,10 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
On windows initializes support for aligned allocation and
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
----------------------------------------------------------- */
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
static bool mi_os_is_huge_reserved(void* p);
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_is_huge_reserved(void* p);
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment);
static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
@ -77,11 +76,16 @@ static bool use_large_os_page(size_t size, size_t alignment) {
return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0);
}
// round to a good allocation size
static size_t mi_os_good_alloc_size(size_t size, size_t alignment) {
UNUSED(alignment);
if (size >= (SIZE_MAX - os_alloc_granularity)) return size; // possible overflow?
return _mi_align_up(size, os_alloc_granularity);
// round to a good OS allocation size (bounded by max 12.5% waste)
size_t _mi_os_good_alloc_size(size_t size) {
size_t align_size;
if (size < 512*KiB) align_size = _mi_os_page_size();
else if (size < 2*MiB) align_size = 64*KiB;
else if (size < 8*MiB) align_size = 256*KiB;
else if (size < 32*MiB) align_size = 1*MiB;
else align_size = 4*MiB;
if (size >= (SIZE_MAX - align_size)) return size; // possible overflow?
return _mi_align_up(size, align_size);
}
#if defined(_WIN32)
@ -95,6 +99,41 @@ typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*
static PVirtualAlloc2 pVirtualAlloc2 = NULL;
static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
static bool mi_win_enable_large_os_pages()
{
if (large_os_page_size > 0) return true;
// Try to see if large OS pages are supported
// To use large pages on Windows, we first need access permission
// Set "Lock pages in memory" permission in the group policy editor
// <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
unsigned long err = 0;
HANDLE token = NULL;
BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
if (ok) {
TOKEN_PRIVILEGES tp;
ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid);
if (ok) {
tp.PrivilegeCount = 1;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
if (ok) {
err = GetLastError();
ok = (err == ERROR_SUCCESS);
if (ok) {
large_os_page_size = GetLargePageMinimum();
}
}
}
CloseHandle(token);
}
if (!ok) {
if (err == 0) err = GetLastError();
_mi_warning_message("cannot enable large OS page support, error %lu\n", err);
}
return (ok!=0);
}
void _mi_os_init(void) {
// get the page size
SYSTEM_INFO si;
@ -111,40 +150,12 @@ void _mi_os_init(void) {
FreeLibrary(hDll);
}
hDll = LoadLibrary(TEXT("ntdll.dll"));
if (hDll != NULL) {
if (hDll != NULL) {
pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
FreeLibrary(hDll);
}
// Try to see if large OS pages are supported
unsigned long err = 0;
bool ok = mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages);
if (ok) {
// To use large pages on Windows, we first need access permission
// Set "Lock pages in memory" permission in the group policy editor
// <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
HANDLE token = NULL;
ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token) != 0;
if (ok) {
TOKEN_PRIVILEGES tp;
ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid) != 0;
if (ok) {
tp.PrivilegeCount = 1;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0) != 0;
if (ok) {
err = GetLastError();
ok = (err == ERROR_SUCCESS);
if (ok) {
large_os_page_size = GetLargePageMinimum();
}
}
}
CloseHandle(token);
}
if (!ok) {
if (err == 0) err = GetLastError();
_mi_warning_message("cannot enable large OS page support, error %lu\n", err);
}
if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
mi_win_enable_large_os_pages();
}
}
#elif defined(__wasi__)
@ -171,9 +182,9 @@ void _mi_os_init() {
Raw allocation on Windows (VirtualAlloc) and Unix's (mmap).
----------------------------------------------------------- */
static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* stats)
{
if (addr == NULL || size == 0 || mi_os_is_huge_reserved(addr)) return true;
if (addr == NULL || size == 0 || _mi_os_is_huge_reserved(addr)) return true;
bool err = false;
#if defined(_WIN32)
err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
@ -182,7 +193,7 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
#else
err = (munmap(addr, size) == -1);
#endif
_mi_stat_decrease(&stats->committed, size); // TODO: what if never committed?
if (was_committed) _mi_stat_decrease(&stats->committed, size);
_mi_stat_decrease(&stats->reserved, size);
if (err) {
#pragma warning(suppress:4996)
@ -194,12 +205,14 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
}
}
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size);
#ifdef _WIN32
static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
if ((size % ((uintptr_t)1 << 30)) == 0 /* 1GiB multiple */
&& (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0
&& (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0 && (flags & MEM_RESERVE) != 0
&& (addr != NULL || try_alignment == 0 || try_alignment % _mi_os_page_size() == 0)
&& pNtAllocateVirtualMemoryEx != NULL)
{
@ -211,7 +224,7 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
param.ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
SIZE_T psize = size;
void* base = addr;
NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags | MEM_RESERVE, PAGE_READWRITE, &param, 1);
NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, &param, 1);
if (err == 0) {
return base;
}
@ -221,19 +234,14 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
}
}
#endif
#if (MI_INTPTR_SIZE >= 8)
// on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations
static volatile _Atomic(intptr_t) aligned_base = ATOMIC_VAR_INIT((intptr_t)4 << 40); // starting at 4TiB
if (addr == NULL && try_alignment > 0 &&
try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE) == 0)
{
intptr_t hint = mi_atomic_add(&aligned_base, size);
if (hint%try_alignment == 0) {
return VirtualAlloc((void*)hint, size, flags, PAGE_READWRITE);
}
#if (MI_INTPTR_SIZE >= 8)
// on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
return VirtualAlloc(hint, size, flags, PAGE_READWRITE);
}
#endif
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
// on modern Windows try use VirtualAlloc2 for aligned allocation
if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
MEM_ADDRESS_REQUIREMENTS reqs = { 0 };
@ -247,10 +255,12 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
}
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only) {
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
mi_assert_internal(!(large_only && !allow_large));
static volatile _Atomic(uintptr_t) large_page_try_ok; // = 0;
void* p = NULL;
if (large_only || use_large_os_page(size, try_alignment)) {
if ((large_only || use_large_os_page(size, try_alignment))
&& allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (!large_only && try_ok > 0) {
// if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
@ -259,7 +269,8 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
}
else {
// large OS pages must always reserve and commit.
p = mi_win_virtual_allocx(addr, size, try_alignment, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE | flags);
*is_large = true;
p = mi_win_virtual_allocx(addr, size, try_alignment, flags | MEM_LARGE_PAGES);
if (large_only) return p;
// fall back to non-large page allocation on error (`p == NULL`).
if (p == NULL) {
@ -268,6 +279,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
}
}
if (p == NULL) {
*is_large = ((flags&MEM_LARGE_PAGES) != 0);
p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
}
if (p == NULL) {
@ -295,14 +307,13 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr
void* p = NULL;
#if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
// on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations
static volatile _Atomic(intptr_t) aligned_base = ATOMIC_VAR_INIT((intptr_t)1 << 42); // starting at 4TiB
if (addr==NULL && try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) {
intptr_t hint = mi_atomic_add(&aligned_base,size);
if (hint%try_alignment == 0) {
p = mmap((void*)hint,size,protect_flags,flags,fd,0);
if (p==MAP_FAILED) p = NULL; // fall back to regular mmap
}
void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment, size)) != NULL) {
p = mmap(hint,size,protect_flags,flags,fd,0);
if (p==MAP_FAILED) p = NULL; // fall back to regular mmap
}
#else
UNUSED(try_alignment);
#endif
if (p==NULL) {
p = mmap(addr,size,protect_flags,flags,fd,0);
@ -311,7 +322,7 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr
return p;
}
static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only) {
static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
void* p = NULL;
#if !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
@ -331,9 +342,11 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif
#if defined(VM_MAKE_TAG)
// macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
fd = VM_MAKE_TAG(100);
int os_tag = (int)mi_option_get(mi_option_os_tag);
if (os_tag < 100 || os_tag > 255) os_tag = 100;
fd = VM_MAKE_TAG(os_tag);
#endif
if (large_only || use_large_os_page(size, try_alignment)) {
if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) {
static volatile _Atomic(uintptr_t) large_page_try_ok; // = 0;
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (!large_only && try_ok > 0) {
@ -368,6 +381,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif
if (large_only || lflags != flags) {
// try large OS page allocation
*is_large = true;
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
#ifdef MAP_HUGE_1GB
if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) {
@ -384,7 +398,8 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
}
}
if (p == NULL) {
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
*is_large = false;
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
#if defined(MADV_HUGEPAGE)
// Many Linux systems don't allow MAP_HUGETLB but they support instead
// transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE
@ -392,8 +407,10 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
// in that case -- in particular for our large regions (in `memory.c`).
// However, some systems only allow TPH if called with explicit `madvise`, so
// when large OS pages are enabled for mimalloc, we call `madvice` anyways.
if (use_large_os_page(size, try_alignment)) {
madvise(p, size, MADV_HUGEPAGE);
if (allow_large && use_large_os_page(size, try_alignment)) {
if (madvise(p, size, MADV_HUGEPAGE) == 0) {
*is_large = true; // possibly
};
}
#endif
}
@ -401,29 +418,67 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
}
#endif
// On 64-bit systems, we can do efficient aligned allocation by using
// the 4TiB to 30TiB area to allocate them.
#if (MI_INTPTR_SIZE >= 8) && (defined(_WIN32) || (defined(MI_OS_USE_MMAP) && !defined(MAP_ALIGNED)))
static volatile _Atomic(intptr_t) aligned_base;
// Return a 4MiB aligned address that is probably available
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL;
if ((size%MI_SEGMENT_SIZE) != 0) return NULL;
intptr_t hint = mi_atomic_add(&aligned_base, size);
if (hint == 0 || hint > ((intptr_t)30<<40)) { // try to wrap around after 30TiB (area after 32TiB is used for huge OS pages)
intptr_t init = ((intptr_t)4 << 40); // start at 4TiB area
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
uintptr_t r = _mi_random_init((uintptr_t)&mi_os_get_aligned_hint ^ hint);
init = init + (MI_SEGMENT_SIZE * ((r>>17) & 0xFFFF)); // (randomly 0-64k)*4MiB == 0 to 256GiB
#endif
mi_atomic_cas_strong(mi_atomic_cast(uintptr_t, &aligned_base), init, hint + size);
hint = mi_atomic_add(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all
}
if (hint%try_alignment != 0) return NULL;
return (void*)hint;
}
#else
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
UNUSED(try_alignment); UNUSED(size);
return NULL;
}
#endif
// Primitive allocation from the OS.
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_stats_t* stats) {
static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
if (size == 0) return NULL;
if (!commit) allow_large = false;
void* p = mi_os_alloc_from_huge_reserved(size, try_alignment, commit);
if (p != NULL) return p;
void* p = NULL;
if (commit && allow_large) {
p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment);
if (p != NULL) {
*is_large = true;
return p;
}
}
#if defined(_WIN32)
int flags = MEM_RESERVE;
if (commit) flags |= MEM_COMMIT;
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false);
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
#elif defined(__wasi__)
*is_large = false;
p = mi_wasm_heap_grow(size, try_alignment);
#else
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false);
p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
#endif
_mi_stat_increase(&stats->mmap_calls, 1);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
if (commit) _mi_stat_increase(&stats->committed, size);
if (commit) { _mi_stat_increase(&stats->committed, size); }
}
return p;
}
@ -431,19 +486,20 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_
// Primitive aligned allocation from the OS.
// This function guarantees the allocated memory is aligned.
static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, mi_stats_t* stats) {
static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) {
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
if (!commit) allow_large = false;
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
// try first with a hint (this will be aligned directly on Win 10+ or BSD)
void* p = mi_os_mem_alloc(size, alignment, commit, stats);
void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats);
if (p == NULL) return NULL;
// if not aligned, free it, overallocate, and unmap around it
if (((uintptr_t)p % alignment != 0)) {
mi_os_mem_free(p, size, stats);
mi_os_mem_free(p, size, commit, stats);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
size_t over_size = size + alignment;
@ -457,7 +513,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
if (commit) flags |= MEM_COMMIT;
for (int tries = 0; tries < 3; tries++) {
// over-allocate to determine a virtual memory range
p = mi_os_mem_alloc(over_size, alignment, commit, stats);
p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats);
if (p == NULL) return NULL; // error
if (((uintptr_t)p % alignment) == 0) {
// if p happens to be aligned, just decommit the left-over area
@ -466,19 +522,19 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
}
else {
// otherwise free and allocate at an aligned address in there
mi_os_mem_free(p, over_size, stats);
mi_os_mem_free(p, over_size, commit, stats);
void* aligned_p = mi_align_up_ptr(p, alignment);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large);
if (p == aligned_p) break; // success!
if (p != NULL) { // should not happen?
mi_os_mem_free(p, size, stats);
mi_os_mem_free(p, size, commit, stats);
p = NULL;
}
}
}
#else
// overallocate...
p = mi_os_mem_alloc(over_size, alignment, commit, stats);
p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats);
if (p == NULL) return NULL;
// and selectively unmap parts around the over-allocated area.
void* aligned_p = mi_align_up_ptr(p, alignment);
@ -486,8 +542,8 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size;
mi_assert_internal(pre_size < over_size && post_size < over_size && mid_size >= size);
if (pre_size > 0) mi_os_mem_free(p, pre_size, stats);
if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, stats);
if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats);
if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats);
// we can return the aligned pointer on `mmap` systems
p = aligned_p;
#endif
@ -503,22 +559,32 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
void* _mi_os_alloc(size_t size, mi_stats_t* stats) {
if (size == 0) return NULL;
size = mi_os_good_alloc_size(size, 0);
return mi_os_mem_alloc(size, 0, true, stats);
size = _mi_os_good_alloc_size(size);
bool is_large = false;
return mi_os_mem_alloc(size, 0, true, false, &is_large, stats);
}
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats) {
if (size == 0 || p == NULL) return;
size = _mi_os_good_alloc_size(size);
mi_os_mem_free(p, size, was_committed, stats);
}
void _mi_os_free(void* p, size_t size, mi_stats_t* stats) {
if (size == 0 || p == NULL) return;
size = mi_os_good_alloc_size(size, 0);
mi_os_mem_free(p, size, stats);
_mi_os_free_ex(p, size, true, stats);
}
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_tld_t* tld)
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld)
{
if (size == 0) return NULL;
size = mi_os_good_alloc_size(size, alignment);
size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size());
return mi_os_mem_alloc_aligned(size, alignment, commit, tld->stats);
bool allow_large = false;
if (large != NULL) {
allow_large = *large;
*large = false;
}
return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), tld->stats);
}
@ -555,11 +621,12 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
// Commit/Decommit memory.
// Usuelly commit is aligned liberal, while decommit is aligned conservative.
// (but not for the reset version where we want commit to be conservative as well)
static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, mi_stats_t* stats) {
static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) {
// page align in the range, commit liberally, decommit conservative
*is_zero = false;
size_t csize;
void* start = mi_os_page_align_areax(conservative, addr, size, &csize);
if (csize == 0 || mi_os_is_huge_reserved(addr)) return true;
if (csize == 0 || _mi_os_is_huge_reserved(addr)) return true;
int err = 0;
if (commit) {
_mi_stat_increase(&stats->committed, csize);
@ -571,6 +638,8 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
#if defined(_WIN32)
if (commit) {
// if the memory was already committed, the call succeeds but it is not zero'd
// *is_zero = true;
void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE);
err = (p == start ? 0 : GetLastError());
}
@ -582,6 +651,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
// WebAssembly guests can't control memory protection
#else
err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE));
if (err != 0) { err = errno; }
#endif
if (err != 0) {
_mi_warning_message("commit/decommit error: start: 0x%p, csize: 0x%x, err: %i\n", start, csize, err);
@ -590,16 +660,17 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
return (err == 0);
}
bool _mi_os_commit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, false /* conservative? */, stats);
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, false /* conservative? */, is_zero, stats);
}
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, false, true /* conservative? */, stats);
bool is_zero;
return mi_os_commitx(addr, size, false, true /* conservative? */, &is_zero, stats);
}
bool _mi_os_commit_unreset(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, true /* conservative? */, stats);
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, true /* conservative? */, is_zero, stats);
}
@ -611,13 +682,13 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
// page align conservatively within the range
size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0 || mi_os_is_huge_reserved(addr)) return true;
if (csize == 0 || _mi_os_is_huge_reserved(addr)) return true;
if (reset) _mi_stat_increase(&stats->reset, csize);
else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset!
#if (MI_DEBUG>1)
if (!mi_option_is_enabled(mi_option_secure)) {
if (MI_SECURE==0) {
memset(start, 0, csize); // pretend it is eagerly reset
}
#endif
@ -626,6 +697,11 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
// Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
mi_assert_internal(p == start);
#if 1
if (p == start) {
VirtualUnlock(start,csize); // VirtualUnlock after MEM_RESET removes the memory from the working set
}
#endif
if (p != start) return false;
#else
#if defined(MADV_FREE)
@ -663,11 +739,12 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
}
}
bool _mi_os_unreset(void* addr, size_t size, mi_stats_t* stats) {
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_commit_unreset(addr, size, stats); // re-commit it (conservatively!)
return _mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
}
else {
*is_zero = false;
return mi_os_resetx(addr, size, false, stats);
}
}
@ -679,8 +756,8 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) {
size_t csize = 0;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return false;
if (mi_os_is_huge_reserved(addr)) {
_mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
if (_mi_os_is_huge_reserved(addr)) {
_mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
}
int err = 0;
#ifdef _WIN32
@ -691,6 +768,7 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) {
err = 0;
#else
err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; }
#endif
if (err != 0) {
_mi_warning_message("mprotect error: start: 0x%p, csize: 0x%x, err: %i\n", start, csize, err);
@ -724,43 +802,44 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
// we cannot shrink on windows, but we can decommit
return _mi_os_decommit(start, size, stats);
#else
return mi_os_mem_free(start, size, stats);
return mi_os_mem_free(start, size, true, stats);
#endif
}
/* ----------------------------------------------------------------------------
Support for huge OS pages (1Gib) that are reserved up-front and never
released. Only regions are allocated in here (see `memory.c`) so the memory
will be reused.
-----------------------------------------------------------------------------*/
#define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB
typedef struct mi_huge_info_s {
volatile _Atomic(void*) start;
volatile _Atomic(size_t) reserved;
volatile _Atomic(size_t) used;
volatile _Atomic(void*) start; // start of huge page area (32TiB)
volatile _Atomic(size_t) reserved; // total reserved size
volatile _Atomic(size_t) used; // currently allocated
} mi_huge_info_t;
static mi_huge_info_t os_huge_reserved = { NULL, 0, ATOMIC_VAR_INIT(0) };
static bool mi_os_is_huge_reserved(void* p) {
return (mi_atomic_read_ptr(&os_huge_reserved.start) != NULL &&
bool _mi_os_is_huge_reserved(void* p) {
return (mi_atomic_read_ptr(&os_huge_reserved.start) != NULL &&
p >= mi_atomic_read_ptr(&os_huge_reserved.start) &&
(uint8_t*)p < (uint8_t*)mi_atomic_read_ptr(&os_huge_reserved.start) + mi_atomic_read(&os_huge_reserved.reserved));
}
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit)
void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment)
{
// only allow large aligned allocations
// only allow large aligned allocations (e.g. regions)
if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL;
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
if (!commit) return NULL;
if (mi_atomic_read_ptr(&os_huge_reserved.start)==NULL) return NULL;
if (mi_atomic_read(&os_huge_reserved.used) >= mi_atomic_read(&os_huge_reserved.reserved)) return NULL; // already full
// always aligned
mi_assert_internal(mi_atomic_read(&os_huge_reserved.used) % MI_SEGMENT_SIZE == 0 );
mi_assert_internal( (uintptr_t)mi_atomic_read_ptr(&os_huge_reserved.start) % MI_SEGMENT_SIZE == 0 );
// try to reserve space
size_t base = mi_atomic_addu( &os_huge_reserved.used, size );
if ((base + size) > os_huge_reserved.reserved) {
@ -791,37 +870,45 @@ static void mi_os_free_huge_reserved() {
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
UNUSED(pages); UNUSED(max_secs);
if (pages_reserved != NULL) *pages_reserved = 0;
return ENOMEM; // cannot allocate
return ENOMEM;
}
#else
int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reserved ) mi_attr_noexcept
{
if (pages_reserved != NULL) *pages_reserved = 0;
if (max_secs==0) return ETIMEDOUT; // timeout
if (max_secs==0) return ETIMEDOUT; // timeout
if (pages==0) return 0; // ok
if (!mi_atomic_cas_ptr_strong(&os_huge_reserved.start,(void*)1,NULL)) return -2; // already reserved
if (!mi_atomic_cas_ptr_strong(&os_huge_reserved.start,(void*)1,NULL)) return ETIMEDOUT; // already reserved
// Set the start address after the 32TiB area
uint8_t* start = (uint8_t*)((uintptr_t)32 << 40); // 32TiB virtual start address
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
uintptr_t r = _mi_random_init((uintptr_t)&mi_reserve_huge_os_pages);
start = start + ((uintptr_t)MI_SEGMENT_SIZE * ((r>>17) & 0xFFFF)); // (randomly 0-64k)*4MiB == 0 to 256GiB
#endif
// Allocate one page at the time but try to place them contiguously
// We allocate one page at the time to be able to abort if it takes too long
double start_t = _mi_clock_start();
uint8_t* start = (uint8_t*)((uintptr_t)16 << 40); // 16TiB virtual start address
uint8_t* addr = start; // current top of the allocations
for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) {
// allocate a page
void* p = NULL;
void* p = NULL;
bool is_large = true;
#ifdef _WIN32
p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true);
if (page==0) { mi_win_enable_large_os_pages(); }
p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true, true, &is_large);
#elif defined(MI_OS_USE_MMAP)
p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true);
#else
p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true, true, &is_large);
#else
// always fail
#endif
#endif
// Did we succeed at a contiguous address?
if (p != addr) {
// no success, issue a warning and return with an error
// no success, issue a warning and return with an error
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main );
}
else {
@ -832,30 +919,29 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reser
#endif
_mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n", page, addr, err);
}
return ENOMEM;
return ENOMEM;
}
// success, record it
if (page==0) {
mi_atomic_write_ptr(&os_huge_reserved.start, addr);
mi_atomic_write_ptr(&os_huge_reserved.start, addr); // don't switch the order of these writes
mi_atomic_write(&os_huge_reserved.reserved, MI_HUGE_OS_PAGE_SIZE);
}
else {
mi_atomic_addu(&os_huge_reserved.reserved,MI_HUGE_OS_PAGE_SIZE);
}
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
if (pages_reserved != NULL) { *pages_reserved = page + 1; };
if (pages_reserved != NULL) { *pages_reserved = page + 1; }
// check for timeout
double elapsed = _mi_clock_end(start_t);
if (elapsed > max_secs) return (-1); // timeout
if (elapsed > max_secs) return ETIMEDOUT;
if (page >= 1) {
double estimate = ((elapsed / (double)(page+1)) * (double)pages);
if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout
if (estimate > 1.5*max_secs) return ETIMEDOUT; // seems like we are going to timeout
}
}
}
_mi_verbose_message("reserved %zu huge pages\n", pages);
return 0;
}
#endif

View file

@ -81,6 +81,14 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(mi_page_list_is_valid(page,page->free));
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
#if MI_DEBUG>3 // generally too expensive to check this
if (page->flags.is_zero) {
for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) {
mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t)));
}
}
#endif
mi_block_t* tfree = mi_tf_block(page->thread_free);
mi_assert_internal(mi_page_list_is_valid(page, tfree));
size_t tfree_count = mi_page_list_count(page, tfree);
@ -180,10 +188,11 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
if (mi_unlikely(page->free == NULL)) {
if (mi_likely(page->free == NULL)) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
page->flags.is_zero = false;
}
else if (force) {
// append -- only on shutdown (force) as this is a linear operation
@ -195,7 +204,8 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
}
page->flags.is_zero = false;
}
}
mi_assert_internal(!force || page->local_free == NULL);
@ -400,7 +410,7 @@ void _mi_page_retire(mi_page_t* page) {
// if its neighbours are almost fully used.
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
}
}
@ -472,7 +482,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* heap, mi_page_t* page, si
heap->random = _mi_random_shuffle(rnd);
}
static void mi_page_free_list_extend( mi_page_t* page, size_t extend, mi_stats_t* stats)
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* page, size_t extend, mi_stats_t* stats)
{
UNUSED(stats);
mi_assert_internal(page->free == NULL);
@ -481,15 +491,15 @@ static void mi_page_free_list_extend( mi_page_t* page, size_t extend, mi_stats_t
void* page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
size_t bsize = page->block_size;
mi_block_t* start = mi_page_block_at(page, page_area, page->capacity);
// initialize a sequential free list
mi_block_t* last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
mi_block_t* last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
mi_block_t* block = start;
while(block <= last) {
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
mi_block_set_next(page,block,next);
block = next;
}
}
mi_block_set_next(page, last, NULL);
page->free = start;
}
@ -520,11 +530,11 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
size_t page_size;
_mi_page_start(_mi_page_segment(page), page, &page_size);
_mi_stat_increase(&stats->pages_extended, 1);
mi_stat_increase(stats->pages_extended, 1);
// calculate the extend count
size_t extend = page->reserved - page->capacity;
size_t max_extend = MI_MAX_EXTEND_SIZE/page->block_size;
size_t max_extend = (page->block_size >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)page->block_size);
if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND;
if (extend > max_extend) {
@ -537,7 +547,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
mi_assert_internal(extend < (1UL<<16));
// and append the extend the free list
if (extend < MI_MIN_SLICES || !mi_option_is_enabled(mi_option_secure)) {
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
mi_page_free_list_extend(page, extend, stats );
}
else {
@ -545,8 +555,12 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
}
// enable the new free list
page->capacity += (uint16_t)extend;
_mi_stat_increase(&stats->page_committed, extend * page->block_size);
mi_stat_increase(stats->page_committed, extend * page->block_size);
// extension into zero initialized memory preserves the zero'd free list
if (!page->is_zero_init) {
page->flags.is_zero = false;
}
mi_assert_expensive(mi_page_is_valid_init(page));
}
@ -565,6 +579,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
#if MI_SECURE
page->cookie = _mi_heap_random(heap) | 1;
#endif
page->flags.is_zero = page->is_zero_init;
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);
@ -638,7 +653,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = next;
} // for each page
_mi_stat_counter_increase(&heap->tld->stats.searches,count);
mi_stat_counter_increase(heap->tld->stats.searches,count);
if (page == NULL) {
page = rpage;
@ -664,7 +679,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
if (mi_option_get(mi_option_secure) >= 3 && page->capacity < page->reserved && ((_mi_heap_random(heap) & 1) == 1)) {
if ((MI_SECURE >= 3) && page->capacity < page->reserved && ((_mi_heap_random(heap) & 1) == 1)) {
// in secure mode, we extend half the time to increase randomness
mi_page_extend_free(heap, page, &heap->tld->stats);
mi_assert_internal(mi_page_immediate_available(page));
@ -687,12 +702,14 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
a certain number of allocations.
----------------------------------------------------------- */
static mi_deferred_free_fun* deferred_free = NULL;
static mi_deferred_free_fun* volatile deferred_free = NULL;
void _mi_deferred_free(mi_heap_t* heap, bool force) {
heap->tld->heartbeat++;
if (deferred_free != NULL) {
if (deferred_free != NULL && !heap->tld->recurse) {
heap->tld->recurse = true;
deferred_free(force, heap->tld->heartbeat);
heap->tld->recurse = false;
}
}
@ -710,7 +727,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
if (page != NULL) {

View file

@ -151,22 +151,22 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
if (page->segment_idx == 0) {
// the first page starts after the segment info (and possible guard page)
p += segment->segment_info_size;
psize -= segment->segment_info_size;
// for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
if (block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) {
size_t adjust = block_size - ((uintptr_t)p % block_size);
if (adjust < block_size) {
p += adjust;
psize -= adjust;
}
mi_assert_internal((uintptr_t)p % block_size == 0);
}
if (page->segment_idx == 0) {
// the first page starts after the segment info (and possible guard page)
p += segment->segment_info_size;
psize -= segment->segment_info_size;
// for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
if (block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) {
size_t adjust = block_size - ((uintptr_t)p % block_size);
if (adjust < block_size) {
p += adjust;
psize -= adjust;
}
mi_assert_internal((uintptr_t)p % block_size == 0);
}
}
long secure = mi_option_get(mi_option_secure);
if (secure > 1 || (secure == 1 && page->segment_idx == segment->capacity - 1)) {
if (MI_SECURE > 1 || (MI_SECURE == 1 && page->segment_idx == segment->capacity - 1)) {
// secure == 1: the last page has an os guard page at the end
// secure > 1: every page has an os guard page
psize -= _mi_os_page_size();
@ -186,18 +186,18 @@ static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size
capacity = MI_SMALL_PAGES_PER_SEGMENT;
}
*/
size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
size_t guardsize = 0;
size_t isize = 0;
if (!mi_option_is_enabled(mi_option_secure)) {
if (MI_SECURE == 0) {
// normally no guard pages
isize = _mi_align_up(minsize, (16 > MI_MAX_ALIGN_SIZE ? 16 : MI_MAX_ALIGN_SIZE));
isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
}
else {
// in secure mode, we set up a protected page in between the segment info
// and the page data (and one at the end of the segment)
size_t page_size = _mi_os_page_size();
const size_t page_size = _mi_os_page_size();
isize = _mi_align_up(minsize, page_size);
guardsize = page_size;
required = _mi_align_up(required, page_size);
@ -228,7 +228,8 @@ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
segment->thread_id = 0;
mi_segments_track_size(-((long)segment_size),tld);
if (mi_option_is_enabled(mi_option_secure)) {
if (MI_SECURE != 0) {
mi_assert_internal(!segment->mem_is_fixed);
_mi_mem_unprotect(segment, segment->segment_size); // ensure no more guard pages are set
}
_mi_mem_free(segment, segment_size, segment->memid, tld->stats);
@ -277,7 +278,7 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
return false;
}
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
if (mi_option_is_enabled(mi_option_cache_reset)) {
if (!segment->mem_is_fixed && mi_option_is_enabled(mi_option_cache_reset)) {
_mi_mem_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
}
segment->next = tld->cache;
@ -325,11 +326,15 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
// Try to get it from our thread local cache first
bool commit = mi_option_is_enabled(mi_option_eager_commit) || (page_kind > MI_PAGE_MEDIUM);
bool eager_delay = (tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager || (page_kind > MI_PAGE_MEDIUM);
bool protection_still_good = false;
bool is_zero = false;
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
if (segment != NULL) {
if (mi_option_is_enabled(mi_option_secure)) {
if (MI_SECURE!=0) {
mi_assert_internal(!segment->mem_is_fixed);
if (segment->page_kind != page_kind) {
_mi_mem_unprotect(segment, segment->segment_size); // reset protection if the page kind differs
}
@ -337,44 +342,49 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
protection_still_good = true; // otherwise, the guard pages are still in place
}
}
if (!mi_option_is_enabled(mi_option_eager_commit)) {
if (page_kind > MI_PAGE_MEDIUM) {
_mi_mem_commit(segment, segment->segment_size, tld->stats);
}
else {
// ok, commit (and unreset) on demand again
}
if (!segment->mem_is_committed && page_kind > MI_PAGE_MEDIUM) {
mi_assert_internal(!segment->mem_is_fixed);
_mi_mem_commit(segment, segment->segment_size, &is_zero, tld->stats);
segment->mem_is_committed = true;
}
else if (mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset)) {
_mi_mem_unreset(segment, segment->segment_size, tld->stats);
if (!segment->mem_is_fixed &&
(mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset))) {
bool reset_zero = false;
_mi_mem_unreset(segment, segment->segment_size, &reset_zero, tld->stats);
if (reset_zero) is_zero = true;
}
}
else {
// Allocate the segment from the OS
size_t memid;
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, commit, &memid, os_tld);
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
if (!commit) {
_mi_mem_commit(segment, info_size, tld->stats);
// ensure the initial info is committed
bool commit_zero = false;
_mi_mem_commit(segment, info_size, &commit_zero, tld->stats);
if (commit_zero) is_zero = true;
}
segment->memid = memid;
segment->mem_is_fixed = mem_large;
segment->mem_is_committed = commit;
mi_segments_track_size((long)segment_size, tld);
}
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
// zero the segment info
{ size_t memid = segment->memid;
memset(segment, 0, info_size);
segment->memid = memid;
}
// zero the segment info (but not the `mem` fields)
ptrdiff_t ofs = offsetof(mi_segment_t,next);
memset((uint8_t*)segment + ofs, 0, info_size - ofs);
if (mi_option_is_enabled(mi_option_secure) && !protection_still_good) {
// guard pages
if ((MI_SECURE != 0) && !protection_still_good) {
// in secure mode, we set up a protected page in between the segment info
// and the page data
mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
_mi_mem_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
size_t os_page_size = _mi_os_page_size();
if (mi_option_get(mi_option_secure) <= 1) {
if (MI_SECURE <= 1) {
// and protect the last page too
_mi_mem_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
}
@ -386,6 +396,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
}
}
// initialize
segment->page_kind = page_kind;
segment->capacity = capacity;
segment->page_shift = page_shift;
@ -397,6 +408,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
segment->pages[i].segment_idx = i;
segment->pages[i].is_reset = false;
segment->pages[i].is_committed = commit;
segment->pages[i].is_zero_init = is_zero;
}
_mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
@ -453,15 +465,20 @@ static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_stats_t* stats)
if (!page->segment_in_use) {
if (page->is_reset || !page->is_committed) {
size_t psize;
uint8_t* start = _mi_page_start(segment, page, &psize);
mi_assert_internal(!(page->is_reset && !page->is_committed));
uint8_t* start = _mi_page_start(segment, page, &psize);
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_fixed);
page->is_committed = true;
_mi_mem_commit(start,psize,stats);
bool is_zero = false;
_mi_mem_commit(start,psize,&is_zero,stats);
if (is_zero) page->is_zero_init = true;
}
if (page->is_reset) {
mi_assert_internal(!segment->mem_is_fixed);
page->is_reset = false;
_mi_mem_unreset(start, psize, stats);
bool is_zero = false;
_mi_mem_unreset(start, psize, &is_zero, stats);
if (is_zero) page->is_zero_init = true;
}
}
return page;
@ -486,24 +503,20 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
size_t inuse = page->capacity * page->block_size;
_mi_stat_decrease(&stats->page_committed, inuse);
_mi_stat_decrease(&stats->pages, 1);
// reset the page memory to reduce memory pressure?
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
if (!segment->mem_is_fixed && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
size_t psize;
uint8_t* start = _mi_page_start(segment, page, &psize);
page->is_reset = true;
_mi_mem_reset(start, psize, stats);
}
// zero the page data
uint8_t idx = page->segment_idx; // don't clear the index
bool is_reset = page->is_reset; // don't clear the reset flag
bool is_committed = page->is_committed; // don't clear the commit flag
memset(page, 0, sizeof(*page));
page->segment_idx = idx;
// zero the page data, but not the segment fields
page->is_zero_init = false;
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
page->segment_in_use = false;
page->is_reset = is_reset;
page->is_committed = is_committed;
segment->used--;
}

View file

@ -8,22 +8,10 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc-internal.h"
#include "mimalloc-atomic.h"
#include <stdio.h> // fputs, stderr
#include <string.h> // memset
/* -----------------------------------------------------------
Merge thread statistics with the main one.
----------------------------------------------------------- */
static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src);
void _mi_stats_done(mi_stats_t* stats) {
if (stats == &_mi_stats_main) return;
mi_stats_add(&_mi_stats_main, stats);
memset(stats,0,sizeof(*stats));
}
/* -----------------------------------------------------------
Statistics operations
----------------------------------------------------------- */
@ -85,6 +73,7 @@ static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64
mi_atomic_add64( &stat->allocated, src->allocated * unit);
mi_atomic_add64( &stat->current, src->current * unit);
mi_atomic_add64( &stat->freed, src->freed * unit);
// peak scores do not work across threads..
mi_atomic_add64( &stat->peak, src->peak * unit);
}
@ -132,7 +121,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
Display statistics
----------------------------------------------------------- */
static void mi_printf_amount(int64_t n, int64_t unit, FILE* out, const char* fmt) {
static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, const char* fmt) {
char buf[32];
int len = 32;
const char* suffix = (unit <= 0 ? " " : "b");
@ -153,16 +142,16 @@ static void mi_printf_amount(int64_t n, int64_t unit, FILE* out, const char* fmt
}
static void mi_print_amount(int64_t n, int64_t unit, FILE* out) {
static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out) {
mi_printf_amount(n,unit,out,NULL);
}
static void mi_print_count(int64_t n, int64_t unit, FILE* out) {
static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out) {
if (unit==1) _mi_fprintf(out,"%11s"," ");
else mi_print_amount(n,0,out);
}
static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, FILE* out ) {
static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out ) {
_mi_fprintf(out,"%10s:", msg);
if (unit>0) {
mi_print_amount(stat->peak, unit, out);
@ -191,24 +180,24 @@ static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t
}
}
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, FILE* out ) {
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out ) {
_mi_fprintf(out, "%10s:", msg);
mi_print_amount(stat->total, -1, out);
_mi_fprintf(out, "\n");
}
static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, FILE* out) {
static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out) {
double avg = (stat->count == 0 ? 0.0 : (double)stat->total / (double)stat->count);
_mi_fprintf(out, "%10s: %7.1f avg\n", msg, avg);
}
static void mi_print_header( FILE* out ) {
static void mi_print_header(mi_output_fun* out ) {
_mi_fprintf(out,"%10s: %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "unit ", "count ");
}
#if MI_STAT>1
static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bins, size_t max, const char* fmt, FILE* out) {
static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out) {
bool found = false;
char buf[64];
for (size_t i = 0; i <= max; i++) {
@ -232,8 +221,7 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit);
static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_noexcept {
if (out == NULL) out = stderr;
static void _mi_stats_print(mi_stats_t* stats, double secs, mi_output_fun* out) mi_attr_noexcept {
mi_print_header(out);
#if MI_STAT>1
mi_stat_count_t normal = { 0,0,0,0 };
@ -293,6 +281,13 @@ static mi_stats_t* mi_stats_get_default(void) {
return &heap->tld->stats;
}
static void mi_stats_merge_from(mi_stats_t* stats) {
if (stats != &_mi_stats_main) {
mi_stats_add(&_mi_stats_main, stats);
memset(stats, 0, sizeof(mi_stats_t));
}
}
void mi_stats_reset(void) mi_attr_noexcept {
mi_stats_t* stats = mi_stats_get_default();
if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); }
@ -300,19 +295,25 @@ void mi_stats_reset(void) mi_attr_noexcept {
mi_time_start = _mi_clock_start();
}
static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) {
if (stats != &_mi_stats_main) {
mi_stats_add(&_mi_stats_main,stats);
memset(stats,0,sizeof(mi_stats_t));
}
void mi_stats_merge(void) mi_attr_noexcept {
mi_stats_merge_from( mi_stats_get_default() );
}
void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done`
mi_stats_merge_from(stats);
}
static void mi_stats_print_ex(mi_stats_t* stats, double secs, mi_output_fun* out) {
mi_stats_merge_from(stats);
_mi_stats_print(&_mi_stats_main, secs, out);
}
void mi_stats_print(FILE* out) mi_attr_noexcept {
void mi_stats_print(mi_output_fun* out) mi_attr_noexcept {
mi_stats_print_ex(mi_stats_get_default(),_mi_clock_end(mi_time_start),out);
}
void mi_thread_stats_print(FILE* out) mi_attr_noexcept {
void mi_thread_stats_print(mi_output_fun* out) mi_attr_noexcept {
_mi_stats_print(mi_stats_get_default(), _mi_clock_end(mi_time_start), out);
}