merge dev-win

This commit is contained in:
daan 2019-08-20 17:31:46 -07:00
commit cd52d0a6d9
19 changed files with 412 additions and 206 deletions

View file

@ -146,7 +146,6 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
@ -155,6 +154,7 @@
<WholeProgramOptimization>false</WholeProgramOptimization>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<CompileAs>Default</CompileAs>
<BufferSecurityCheck>false</BufferSecurityCheck>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
@ -173,7 +173,6 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
@ -182,6 +181,7 @@
<WholeProgramOptimization>false</WholeProgramOptimization>
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
<CompileAs>Default</CompileAs>
<BufferSecurityCheck>false</BufferSecurityCheck>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>

View file

@ -141,8 +141,6 @@
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
@ -150,11 +148,9 @@
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>
<BufferSecurityCheck>false</BufferSecurityCheck>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Neither</FavorSizeOrSpeed>
<OmitFramePointers>false</OmitFramePointers>
<EnableFiberSafeOptimizations>false</EnableFiberSafeOptimizations>
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
<CompileAs>CompileAsCpp</CompileAs>
<IntrinsicFunctions>true</IntrinsicFunctions>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
@ -172,8 +168,6 @@
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
@ -181,11 +175,9 @@
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>
<BufferSecurityCheck>false</BufferSecurityCheck>
<InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>
<FavorSizeOrSpeed>Neither</FavorSizeOrSpeed>
<OmitFramePointers>false</OmitFramePointers>
<EnableFiberSafeOptimizations>false</EnableFiberSafeOptimizations>
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
<CompileAs>CompileAsCpp</CompileAs>
<IntrinsicFunctions>true</IntrinsicFunctions>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>

View file

@ -68,6 +68,9 @@ static inline void* mi_atomic_exchange_ptr(volatile void** p, void* exchange) {
return (void*)mi_atomic_exchange((volatile uintptr_t*)p, (uintptr_t)exchange);
}
static inline intptr_t mi_atomic_iread(volatile intptr_t* p) {
return (intptr_t)mi_atomic_read( (volatile uintptr_t*)p );
}
#ifdef _MSC_VER
#define WIN32_LEAN_AND_MEAN

View file

@ -102,6 +102,8 @@ uintptr_t _mi_heap_random(mi_heap_t* heap);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
double _mi_clock_end(double start);
double _mi_clock_start(void);
// "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
@ -161,15 +163,15 @@ bool _mi_page_is_valid(mi_page_t* page);
// Overflow detecting multiply
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
static inline bool mi_mul_overflow(size_t size, size_t count, size_t* total) {
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
#if (MI_INTPTR_SIZE == 4)
return __builtin_umul_overflow(size, count, total);
return __builtin_umul_overflow(count, size, total);
#else
return __builtin_umull_overflow(size, count, total);
return __builtin_umull_overflow(count, size, total);
#endif
#else /* __builtin_umul_overflow is unavailable */
*total = size * count;
*total = count * size;
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
&& size > 0 && (SIZE_MAX / size) < count);
#endif

View file

@ -84,18 +84,19 @@ terms of the MIT license. A copy of the license can be found in the file
// Derived constants
#define MI_SEGMENT_SIZE ((size_t)1<<MI_SEGMENT_SHIFT)
#define MI_SEGMENT_MASK (MI_SEGMENT_SIZE - 1)
#define MI_SEGMENT_SLICE_SIZE ((size_t)1 << MI_SEGMENT_SLICE_SHIFT)
#define MI_SEGMENT_SLICE_SIZE ((size_t)1 << MI_SEGMENT_SLICE_SHIFT)
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
#define MI_SMALL_PAGE_SIZE (1<<MI_SMALL_PAGE_SHIFT)
#define MI_MEDIUM_PAGE_SIZE (1<<MI_MEDIUM_PAGE_SHIFT)
#define MI_MEDIUM_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb on 64-bit
#define MI_MEDIUM_WSIZE_MAX (MI_MEDIUM_SIZE_MAX/MI_INTPTR_SIZE) // 64kb on 64-bit
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 16kb on 64-bit
#define MI_LARGE_SIZE_MAX (MI_SEGMENT_SIZE/4) // 16mb on 64-bit
#define MI_LARGE_WSIZE_MAX (MI_LARGE_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb on 64-bit
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE) // 64kb on 64-bit
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/4) // 16mb on 64-bit
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
@ -104,7 +105,8 @@ terms of the MIT license. A copy of the license can be found in the file
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
#if (MI_MEDIUM_WSIZE_MAX >= 655360)
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
#error "define more bins"
#endif
@ -170,7 +172,7 @@ typedef struct mi_page_s {
#endif
mi_page_flags_t flags;
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
volatile uintptr_t thread_freed; // at least this number of blocks are in `thread_free`
volatile mi_thread_free_t thread_free; // list of deferred free blocks freed by other threads
@ -213,7 +215,7 @@ typedef struct mi_segment_s {
struct mi_segment_s* prev;
struct mi_segment_s* abandoned_next; // abandoned segment stack: `used == abandoned`
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t used; // count of pages in use
size_t used; // count of pages in use
size_t segment_size;// for huge pages this may be different from `MI_SEGMENT_SIZE`
size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages.
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
@ -221,7 +223,7 @@ typedef struct mi_segment_s {
bool all_committed;
// layout like this to optimize access in `mi_free`
mi_segment_kind_t kind;
mi_segment_kind_t kind;
uintptr_t thread_id;
size_t slice_count; // slices in this segment (at most MI_SLICES_PER_SEGMENT)
mi_slice_t slices[MI_SLICES_PER_SEGMENT];

View file

@ -195,7 +195,7 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs) mi_attr_noexcept;
// ------------------------------------------------------
// Convenience
@ -220,25 +220,27 @@ mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
typedef enum mi_option_e {
// stable options
mi_option_show_stats,
mi_option_show_errors,
mi_option_show_stats,
mi_option_verbose,
// the following options are experimental
mi_option_secure,
mi_option_eager_commit,
mi_option_eager_region_commit,
mi_option_large_os_pages, // implies eager commit
mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages,
mi_option_page_reset,
mi_option_cache_reset,
mi_option_reset_decommits,
mi_option_reset_discards,
_mi_option_last
} mi_option_t;
mi_decl_export bool mi_option_is_enabled(mi_option_t option);
mi_decl_export void mi_option_enable(mi_option_t option, bool enable);
mi_decl_export void mi_option_enable_default(mi_option_t option, bool enable);
mi_decl_export void mi_option_enable(mi_option_t option);
mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
mi_decl_export long mi_option_get(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);

View file

@ -666,7 +666,7 @@ static void mi_patches_at_quick_exit(void) {
mi_patches_enable_term(); // enter termination phase and patch realloc/free with a no-op
}
__declspec(dllexport) BOOL WINAPI DllEntry(HINSTANCE inst, DWORD reason, LPVOID reserved) {
BOOL WINAPI DllEntry(HINSTANCE inst, DWORD reason, LPVOID reserved) {
if (reason == DLL_PROCESS_ATTACH) {
__security_init_cookie();
}

View file

@ -38,7 +38,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
block->next = 0;
#endif
#if (MI_STAT>1)
if(size <= MI_LARGE_SIZE_MAX) {
if(size <= MI_LARGE_OBJ_SIZE_MAX) {
size_t bin = _mi_bin(size);
mi_heap_stat_increase(heap,normal[bin], 1);
}
@ -230,7 +230,7 @@ void mi_free(void* p) mi_attr_noexcept
#if (MI_STAT>1)
mi_heap_t* heap = mi_heap_get_default();
mi_heap_stat_decrease( heap, malloc, mi_usable_size(p));
if (page->block_size <= MI_LARGE_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease( heap, normal[_mi_bin(page->block_size)], 1);
}
// huge page stat is accounted for in `_mi_page_retire`

View file

@ -130,19 +130,19 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// if abandoning, mark all pages to no longer add to delayed_free
if (collect == ABANDON) {
//for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
// _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
//}
// _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
//}
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
}
// free thread delayed blocks.
// free thread delayed blocks.
// (if abandoning, after this there are no more local references into the pages.)
_mi_heap_delayed_free(heap);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL );
// collect segment caches
if (collect >= FORCE) {
_mi_segment_thread_collect(&heap->tld->segments);
@ -172,7 +172,7 @@ void mi_collect(bool force) mi_attr_noexcept {
----------------------------------------------------------- */
mi_heap_t* mi_heap_get_default(void) {
mi_thread_init();
mi_thread_init();
return mi_get_default_heap();
}
@ -221,7 +221,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
static void mi_heap_free(mi_heap_t* heap) {
mi_assert_internal(mi_heap_is_initialized(heap));
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
// reset default
if (mi_heap_is_default(heap)) {
_mi_heap_default = heap->tld->heap_backing;
@ -242,11 +242,11 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
UNUSED(pq);
// ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE);
// stats
if (page->block_size > MI_MEDIUM_SIZE_MAX) {
if (page->block_size <= MI_LARGE_SIZE_MAX) {
if (page->block_size > MI_MEDIUM_OBJ_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&heap->tld->stats.large,page->block_size);
}
else {
@ -255,7 +255,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
}
#if (MI_STAT>1)
size_t inuse = page->used - page->thread_freed;
if (page->block_size <= MI_LARGE_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap,normal[_mi_bin(page->block_size)], inuse);
}
mi_heap_stat_decrease(heap,malloc, page->block_size * inuse); // todo: off for aligned blocks...
@ -303,20 +303,20 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return;
// unfull all full pages
mi_page_t* page = heap->pages[MI_BIN_FULL].first;
// unfull all full pages in the `from` heap
mi_page_t* page = from->pages[MI_BIN_FULL].first;
while (page != NULL) {
mi_page_t* next = page->next;
_mi_page_unfull(page);
page = next;
}
mi_assert_internal(heap->pages[MI_BIN_FULL].first == NULL);
mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
// free outstanding thread delayed free blocks
_mi_heap_delayed_free(from);
// transfer all pages by appending the queues; this will set
// a new heap field which is ok as all pages are unfull'd and thus
// a new heap field which is ok as all pages are unfull'd and thus
// other threads won't access this field anymore (see `mi_free_block_mt`)
for (size_t i = 0; i < MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
@ -327,7 +327,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
}
mi_assert_internal(from->thread_delayed_free == NULL);
mi_assert_internal(from->page_count == 0);
// and reset the `from` heap
mi_heap_reset_pages(from);
}
@ -525,4 +525,3 @@ bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_vis
mi_visit_blocks_args_t args = { visit_blocks, visitor, arg };
return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args);
}

View file

@ -43,8 +43,8 @@ const mi_page_t _mi_page_empty = {
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
QNULL(MI_MEDIUM_WSIZE_MAX + 1 /* 655360, Huge queue */), \
QNULL(MI_MEDIUM_WSIZE_MAX + 2) /* Full queue */ }
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
#define MI_STAT_COUNT_NULL() {0,0,0,0}
@ -116,14 +116,14 @@ mi_heap_t _mi_heap_main = {
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY,
NULL,
0,
0,
0, // thread id
#if MI_INTPTR_SIZE==8 // the cookie of the main heap can be fixed (unlike page cookies that need to be secure!)
0xCDCDCDCDCDCDCDCDUL,
#else
0xCDCDCDCDUL,
#endif
0,
0, // random
0, // page count
false // can reclaim
};
@ -432,6 +432,12 @@ static void mi_process_load(void) {
const char* msg = NULL;
mi_allocator_init(&msg);
if (msg != NULL) _mi_verbose_message(msg);
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
double max_secs = (double)pages / 5.0; // 0.2s per page
mi_reserve_huge_os_pages(pages, max_secs);
}
}
// Initialize the process; called by thread_init or the process loader

View file

@ -128,6 +128,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
size_t mask = mi_region_block_mask(blocks,bitidx);
mi_assert_internal(mask != 0);
mi_assert_internal((mask & mi_atomic_read(&region->map)) == mask);
mi_assert_internal(&regions[idx] == region);
// ensure the region is reserved
void* start = mi_atomic_read_ptr(&region->start);
@ -149,9 +150,23 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
mi_atomic_increment(&regions_count);
}
else {
// failed, another thread allocated just before us, free our allocated memory
// TODO: should we keep the allocated memory and assign it to some other region?
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
// failed, another thread allocated just before us!
// we assign it to a later slot instead (up to 4 tries).
// note: we don't need to increment the region count, this will happen on another allocation
for(size_t i = 1; i <= 4 && idx + i < MI_REGION_MAX; i++) {
void* s = mi_atomic_read_ptr(&regions[idx+i].start);
if (s == NULL) { // quick test
if (mi_atomic_compare_exchange_ptr(&regions[idx+i].start, start, s)) {
start = NULL;
break;
}
}
}
if (start != NULL) {
// free it if we didn't succeed to save it to some other region
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
}
// and continue with the memory at our index
start = mi_atomic_read_ptr(&region->start);
}
}

View file

@ -34,34 +34,38 @@ typedef enum mi_init_e {
typedef struct mi_option_desc_s {
long value; // the value
mi_init_t init; // is it initialized yet? (from the environment)
mi_option_t option; // for debugging: the option index should match the option
const char* name; // option name without `mimalloc_` prefix
} mi_option_desc_t;
#define MI_OPTION(opt) mi_option_##opt, #opt
#define MI_OPTION_DESC(opt) {0, UNINIT, MI_OPTION(opt) }
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
{ 0, UNINIT, "show_stats" },
{ MI_DEBUG, UNINIT, "show_errors" },
{ 0, UNINIT, "verbose" },
{ MI_DEBUG, UNINIT, MI_OPTION(show_errors) },
{ 0, UNINIT, MI_OPTION(show_stats) },
{ 0, UNINIT, MI_OPTION(verbose) },
#if MI_SECURE
{ MI_SECURE, INITIALIZED, "secure" }, // in a secure build the environment setting is ignored
{ MI_SECURE, INITIALIZED, MI_OPTION(secure) }, // in a secure build the environment setting is ignored
#else
{ 0, UNINIT, "secure" },
{ 0, UNINIT, MI_OPTION(secure) },
#endif
// the following options are experimental and not all combinations make sense.
{ 1, UNINIT, "eager_commit" }, // note: if eager_region_commit is on, this should be on too.
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: if eager_region_commit is on, this should be on too.
#ifdef _WIN32 // and BSD?
{ 0, UNINIT, "eager_region_commit" }, // don't commit too eagerly on windows (just for looks...)
{ 1, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
#else
{ 1, UNINIT, "eager_region_commit" },
{ 1, UNINIT, MI_OPTION(eager_region_commit) },
#endif
{ 0, UNINIT, "large_os_pages" }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, "page_reset" },
{ 0, UNINIT, "cache_reset" },
{ 0, UNINIT, "reset_decommits" }, // note: cannot enable this if secure is on
{ 0, UNINIT, "reset_discards" } // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(page_reset) },
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) } // note: cannot enable this if secure is on
};
static void mi_option_init(mi_option_desc_t* desc);
@ -69,6 +73,7 @@ static void mi_option_init(mi_option_desc_t* desc);
long mi_option_get(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
if (mi_unlikely(desc->init == UNINIT)) {
mi_option_init(desc);
if (option != mi_option_verbose) {
@ -81,6 +86,7 @@ long mi_option_get(mi_option_t option) {
void mi_option_set(mi_option_t option, long value) {
mi_assert(option >= 0 && option < _mi_option_last);
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
desc->value = value;
desc->init = INITIALIZED;
}
@ -97,14 +103,23 @@ bool mi_option_is_enabled(mi_option_t option) {
return (mi_option_get(option) != 0);
}
void mi_option_enable(mi_option_t option, bool enable) {
void mi_option_set_enabled(mi_option_t option, bool enable) {
mi_option_set(option, (enable ? 1 : 0));
}
void mi_option_enable_default(mi_option_t option, bool enable) {
void mi_option_set_enabled_default(mi_option_t option, bool enable) {
mi_option_set_default(option, (enable ? 1 : 0));
}
void mi_option_enable(mi_option_t option) {
mi_option_set_enabled(option,true);
}
void mi_option_disable(mi_option_t option) {
mi_option_set_enabled(option,false);
}
// --------------------------------------------------------
// Messages
// --------------------------------------------------------

270
src/os.c
View file

@ -22,6 +22,9 @@ terms of the MIT license. A copy of the license can be found in the file
#else
#include <sys/mman.h> // mmap
#include <unistd.h> // sysconf
#if defined(__linux__)
#include <linux/mman.h> // linux mmap flags
#endif
#if defined(__APPLE__)
#include <mach/vm_statistics.h>
#endif
@ -34,6 +37,9 @@ terms of the MIT license. A copy of the license can be found in the file
----------------------------------------------------------- */
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
static bool mi_os_is_huge_reserved(void* p);
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit);
static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
@ -67,7 +73,7 @@ size_t _mi_os_large_page_size() {
static bool use_large_os_page(size_t size, size_t alignment) {
// if we have access, check the size and alignment requirements
if (large_os_page_size == 0) return false;
if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false;
return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0);
}
@ -81,11 +87,13 @@ static size_t mi_os_good_alloc_size(size_t size, size_t alignment) {
#if defined(_WIN32)
// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
// Same for DiscardVirtualMemory. (hide MEM_EXTENDED_PARAMETER to compile with older SDK's)
typedef PVOID(__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG);
typedef DWORD(__stdcall *PDiscardVirtualMemory)(PVOID,SIZE_T);
// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
// We hide MEM_EXTENDED_PARAMETER to compile with older SDK's.
#include <winternl.h>
typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG);
typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ PVOID, ULONG);
static PVirtualAlloc2 pVirtualAlloc2 = NULL;
static PDiscardVirtualMemory pDiscardVirtualMemory = NULL;
static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
void _mi_os_init(void) {
// get the page size
@ -100,12 +108,16 @@ void _mi_os_init(void) {
// use VirtualAlloc2FromApp if possible as it is available to Windows store apps
pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2FromApp");
if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2");
pDiscardVirtualMemory = (PDiscardVirtualMemory)GetProcAddress(hDll, "DiscardVirtualMemory");
FreeLibrary(hDll);
}
hDll = LoadLibrary(TEXT("ntdll.dll"));
if (hDll != NULL) {
pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
FreeLibrary(hDll);
}
// Try to see if large OS pages are supported
unsigned long err = 0;
bool ok = mi_option_is_enabled(mi_option_large_os_pages);
bool ok = mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages);
if (ok) {
// To use large pages on Windows, we first need access permission
// Set "Lock pages in memory" permission in the group policy editor
@ -161,7 +173,7 @@ void _mi_os_init() {
static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
{
if (addr == NULL || size == 0) return true;
if (addr == NULL || size == 0 || mi_os_is_huge_reserved(addr)) return true;
bool err = false;
#if defined(_WIN32)
err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
@ -185,25 +197,49 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
#ifdef _WIN32
static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
if ((size % (uintptr_t)1 << 30) == 0 /* 1GiB multiple */
&& (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0
&& (addr != NULL || try_alignment == 0 || try_alignment % _mi_os_page_size() == 0)
&& pNtAllocateVirtualMemoryEx != NULL)
{
#ifndef MEM_EXTENDED_PARAMETER_NONPAGED_HUGE
#define MEM_EXTENDED_PARAMETER_NONPAGED_HUGE (0x10)
#endif
MEM_EXTENDED_PARAMETER param = { 0, 0 };
param.Type = 5; // == MemExtendedParameterAttributeFlags;
param.ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
SIZE_T psize = size;
void* base = addr;
NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags | MEM_RESERVE, PAGE_READWRITE, &param, 1);
if (err == 0) {
return base;
}
else {
// else fall back to regular large OS pages
_mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) page instead (error %lx)\n", err);
}
}
// on modern Windows try use VirtualAlloc2 for aligned allocation
if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
// on modern Windows try use VirtualAlloc2 for aligned allocation
MEM_ADDRESS_REQUIREMENTS reqs = { 0 };
reqs.Alignment = try_alignment;
MEM_EXTENDED_PARAMETER param = { 0 };
param.Type = MemExtendedParameterAddressRequirements;
param.Pointer = &reqs;
return (*pVirtualAlloc2)(addr, NULL, size, flags, PAGE_READWRITE, &param, 1);
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1);
}
#endif
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
}
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags) {
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only) {
static volatile uintptr_t large_page_try_ok = 0;
void* p = NULL;
if (use_large_os_page(size, try_alignment)) {
if (large_only || use_large_os_page(size, try_alignment)) {
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (try_ok > 0) {
if (!large_only && try_ok > 0) {
// if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
// therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
mi_atomic_compare_exchange(&large_page_try_ok, try_ok - 1, try_ok);
@ -211,6 +247,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
else {
// large OS pages must always reserve and commit.
p = mi_win_virtual_allocx(addr, size, try_alignment, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE | flags);
if (large_only) return p;
// fall back to non-large page allocation on error (`p == NULL`).
if (p == NULL) {
mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations
@ -237,12 +274,13 @@ static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) {
return (void*)aligned_base;
}
#else
static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
#define MI_OS_USE_MMAP
static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
void* p = NULL;
#if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
// on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations
static volatile intptr_t aligned_base = ((intptr_t)1 << 42); // starting at 4TiB
if (try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) {
static volatile intptr_t aligned_base = ((intptr_t)4 << 40); // starting at 4TiB
if (addr==NULL && try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) {
intptr_t hint = mi_atomic_add(&aligned_base,size) - size;
if (hint%try_alignment == 0) {
p = mmap((void*)hint,size,protect_flags,flags,fd,0);
@ -251,12 +289,13 @@ static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags,
}
#endif
if (p==NULL) {
p = mmap(NULL,size,protect_flags,flags,fd,0);
p = mmap(addr,size,protect_flags,flags,fd,0);
if (p==MAP_FAILED) p = NULL;
}
return p;
}
static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) {
static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only) {
void* p = NULL;
#if !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
@ -278,10 +317,10 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
// macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
fd = VM_MAKE_TAG(100);
#endif
if (use_large_os_page(size, try_alignment)) {
if (large_only || use_large_os_page(size, try_alignment)) {
static volatile uintptr_t large_page_try_ok = 0;
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (try_ok > 0) {
if (!large_only && try_ok > 0) {
// If the OS is not configured for large OS pages, or the user does not have
// enough permission, the `mmap` will always fail (but it might also fail for other reasons).
// Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
@ -297,27 +336,32 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
#ifdef MAP_HUGETLB
lflags |= MAP_HUGETLB;
#endif
#ifdef MAP_HUGE_2MB
lflags |= MAP_HUGE_2MB;
#ifdef MAP_HUGE_1GB
if ((size % (uintptr_t)1 << 20) == 0) {
lflags |= MAP_HUGE_1GB;
}
else
#endif
{
#ifdef MAP_HUGE_2MB
lflags |= MAP_HUGE_2MB;
#endif
}
#ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
#endif
if (lflags != flags) {
if (large_only || lflags != flags) {
// try large OS page allocation
p = mi_unix_mmapx(size, try_alignment, protect_flags, lflags, lfd);
if (p == MAP_FAILED) {
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
if (large_only) return p;
if (p == NULL) {
mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations
p = NULL; // and fall back to regular mmap
}
}
}
}
if (p == NULL) {
p = mi_unix_mmapx(size, try_alignment, protect_flags, flags, fd);
if (p == MAP_FAILED) {
p = NULL;
}
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
#if defined(MADV_HUGEPAGE)
// Many Linux systems don't allow MAP_HUGETLB but they support instead
// transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE
@ -325,7 +369,7 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
// in that case -- in particular for our large regions (in `memory.c`).
// However, some systems only allow TPH if called with explicit `madvise`, so
// when large OS pages are enabled for mimalloc, we call `madvice` anyways.
else if (use_large_os_page(size, try_alignment)) {
if (use_large_os_page(size, try_alignment)) {
madvise(p, size, MADV_HUGEPAGE);
}
#endif
@ -340,17 +384,19 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
if (size == 0) return NULL;
void* p = NULL;
#if defined(_WIN32)
int flags = MEM_RESERVE;
if (commit) flags |= MEM_COMMIT;
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags);
#elif defined(__wasi__)
p = mi_wasm_heap_grow(size, try_alignment);
#else
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
p = mi_unix_mmap(size, try_alignment, protect_flags);
#endif
void* p = mi_os_alloc_from_huge_reserved(size, try_alignment, commit);
if (p != NULL) return p;
#if defined(_WIN32)
int flags = MEM_RESERVE;
if (commit) flags |= MEM_COMMIT;
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false);
#elif defined(__wasi__)
p = mi_wasm_heap_grow(size, try_alignment);
#else
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false);
#endif
_mi_stat_increase(&stats->mmap_calls, 1);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
@ -399,7 +445,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
// otherwise free and allocate at an aligned address in there
mi_os_mem_free(p, over_size, stats);
void* aligned_p = mi_align_up_ptr(p, alignment);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false);
if (p == aligned_p) break; // success!
if (p != NULL) { // should not happen?
mi_os_mem_free(p, size, stats);
@ -555,17 +601,9 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
#if defined(_WIN32)
// Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
// (but this is for an access pattern that immediately reuses the memory)
if (mi_option_is_enabled(mi_option_reset_discards) && pDiscardVirtualMemory != NULL) {
DWORD ok = (*pDiscardVirtualMemory)(start, csize);
mi_assert_internal(ok == ERROR_SUCCESS);
if (ok != ERROR_SUCCESS) return false;
}
else {
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
mi_assert_internal(p == start);
if (p != start) return false;
}
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
mi_assert_internal(p == start);
if (p != start) return false;
#else
#if defined(MADV_FREE)
static int advice = MADV_FREE;
@ -664,3 +702,127 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
return mi_os_mem_free(start, size, stats);
#endif
}
/* ----------------------------------------------------------------------------
-----------------------------------------------------------------------------*/
#define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB
typedef struct mi_huge_info_s {
uint8_t* start;
ptrdiff_t reserved;
volatile ptrdiff_t used;
} mi_huge_info_t;
static mi_huge_info_t os_huge_reserved = { NULL, 0, 0 };
static bool mi_os_is_huge_reserved(void* p) {
return (os_huge_reserved.start != NULL &&
(uint8_t*)p >= os_huge_reserved.start &&
(uint8_t*)p < os_huge_reserved.start + os_huge_reserved.reserved);
}
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit)
{
// only allow large aligned allocations
if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL;
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
if (!commit) return NULL;
if (os_huge_reserved.start==NULL) return NULL;
if (mi_atomic_iread(&os_huge_reserved.used) >= os_huge_reserved.reserved) return NULL; // already full
// always aligned
mi_assert_internal( os_huge_reserved.used % MI_SEGMENT_SIZE == 0 );
mi_assert_internal( (uintptr_t)os_huge_reserved.start % MI_SEGMENT_SIZE == 0 );
// try to reserve space
ptrdiff_t next = mi_atomic_add( &os_huge_reserved.used, (ptrdiff_t)size );
if (next > os_huge_reserved.reserved) {
// "free" our over-allocation
mi_atomic_add( &os_huge_reserved.used, -((ptrdiff_t)size) );
return NULL;
}
// success!
uint8_t* p = os_huge_reserved.start + next - (ptrdiff_t)size;
mi_assert_internal( (uintptr_t)p % MI_SEGMENT_SIZE == 0 );
return p;
}
/*
static void mi_os_free_huge_reserved() {
uint8_t* addr = os_huge_reserved.start;
size_t total = os_huge_reserved.reserved;
os_huge_reserved.reserved = 0;
os_huge_reserved.start = NULL;
for( size_t current = 0; current < total; current += MI_HUGE_OS_PAGE_SIZE) {
_mi_os_free(addr + current, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main);
}
}
*/
#if !(MI_INTPTR_SIZE >= 8 && (defined(_WIN32) || defined(MI_OS_USE_MMAP)))
int mi_reserve_huge_os_pages(size_t pages, size_t max_secs) {
return -2; // cannot allocate
}
#else
int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept
{
if (max_secs==0) return -1; // timeout
if (pages==0) return 0; // ok
if (os_huge_reserved.start != NULL) return -2; // already reserved
// Allocate one page at the time but try to place them contiguously
// We allocate one page at the time to be able to abort if it takes too long
double start_t = _mi_clock_start();
uint8_t* start = (uint8_t*)((uintptr_t)8 << 40); // 8TiB virtual start address
uint8_t* addr = start; // current top of the allocations
for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) {
// allocate lorgu pages
void* p = NULL;
#ifdef _WIN32
p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true);
#elif defined(MI_OS_USE_MMAP)
p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true);
#else
// always fail
#endif
// Did we succeed at a contiguous address?
if (p != addr) {
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main );
}
else {
#ifdef _WIN32
int err = GetLastError();
#else
int err = errno;
#endif
_mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n", page, addr, err);
}
return -2;
}
// success, record it
if (page==0) {
os_huge_reserved.start = addr;
}
os_huge_reserved.reserved += MI_HUGE_OS_PAGE_SIZE;
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
// check for timeout
double elapsed = _mi_clock_end(start_t);
if (elapsed > max_secs) return (-1); // timeout
if (page >= 1) {
double estimate = ((elapsed / (double)(page+1)) * (double)pages);
if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout
}
}
_mi_verbose_message("reserved %zu huge pages\n", pages);
return 0;
}
#endif

View file

@ -34,15 +34,15 @@ terms of the MIT license. A copy of the license can be found in the file
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
return (pq->block_size == (MI_MEDIUM_SIZE_MAX+sizeof(uintptr_t)));
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
}
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
return (pq->block_size == (MI_MEDIUM_SIZE_MAX+(2*sizeof(uintptr_t))));
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
}
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
return (pq->block_size > MI_MEDIUM_SIZE_MAX);
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
}
/* -----------------------------------------------------------
@ -116,11 +116,11 @@ extern inline uint8_t _mi_bin(size_t size) {
bin = (uint8_t)wsize;
}
#endif
else if (wsize > MI_MEDIUM_WSIZE_MAX) {
else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
bin = MI_BIN_HUGE;
}
else {
#if defined(MI_ALIGN4W)
#if defined(MI_ALIGN4W)
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
#endif
wsize--;
@ -147,7 +147,7 @@ size_t _mi_bin_size(uint8_t bin) {
// Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept {
if (size <= MI_MEDIUM_SIZE_MAX) {
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
return _mi_bin_size(_mi_bin(size));
}
else {
@ -245,7 +245,7 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page));
mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_MEDIUM_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == queue->last) queue->last = page->prev;
@ -268,7 +268,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
mi_assert_internal(page->heap == NULL);
mi_assert_internal(!mi_page_queue_contains(queue, page));
mi_assert_internal(page->block_size == queue->block_size ||
(page->block_size > MI_MEDIUM_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
(page->block_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@ -297,8 +297,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_internal((page->block_size == to->block_size && page->block_size == from->block_size) ||
(page->block_size == to->block_size && mi_page_queue_is_full(from)) ||
(page->block_size == from->block_size && mi_page_queue_is_full(to)) ||
(page->block_size > MI_MEDIUM_SIZE_MAX && mi_page_queue_is_huge(to)) ||
(page->block_size > MI_MEDIUM_SIZE_MAX && mi_page_queue_is_full(to)));
(page->block_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
(page->block_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;

View file

@ -71,7 +71,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->block_size > 0);
mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved);
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = _mi_page_start(segment,page,NULL);
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
@ -102,7 +102,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == page->heap->thread_id);
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_MEDIUM_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
}
return true;
@ -170,7 +170,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_assert_internal(page!=NULL);
// collect the thread free list
if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation
_mi_page_thread_free_collect(page);
@ -193,7 +193,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
}
}
}
mi_assert_internal(!force || page->local_free == NULL);
@ -356,8 +356,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_page_set_has_aligned(page, false);
// account for huge pages here
if (page->block_size > MI_MEDIUM_SIZE_MAX) {
if (page->block_size <= MI_LARGE_SIZE_MAX) {
if (page->block_size > MI_MEDIUM_OBJ_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&page->heap->tld->stats.large, page->block_size);
}
else {
@ -394,7 +394,7 @@ void _mi_page_retire(mi_page_t* page) {
// is the only page left with free blocks. It is not clear
// how to check this efficiently though... for now we just check
// if its neighbours are almost fully used.
if (mi_likely(page->block_size <= MI_MEDIUM_SIZE_MAX)) {
if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) {
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
@ -713,7 +713,7 @@ static mi_page_t* mi_large_page_alloc(mi_heap_t* heap, size_t size) {
if (page != NULL) {
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page->block_size == block_size);
if (page->block_size <= MI_LARGE_SIZE_MAX) {
if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_increase(&heap->tld->stats.large, block_size);
_mi_stat_counter_increase(&heap->tld->stats.large_count, 1);
}
@ -740,14 +740,14 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// call potential deferred free routines
_mi_deferred_free(heap, false);
// free delayed frees from other threads
_mi_heap_delayed_free(heap);
// huge allocation?
mi_page_t* page;
if (mi_unlikely(size > MI_MEDIUM_SIZE_MAX)) {
if (mi_unlikely(size >= (SIZE_MAX - MI_MAX_ALIGN_SIZE))) {
if (mi_unlikely(size > MI_MEDIUM_OBJ_SIZE_MAX)) {
if (mi_unlikely(size > PTRDIFF_MAX)) {
page = NULL;
}
else {

View file

@ -21,7 +21,6 @@ static void mi_segment_map_freed_at(const mi_segment_t* segment);
/* -----------------------------------------------------------
Segment allocation
In any case the memory for a segment is virtual and only
committed on demand (i.e. we are careful to not touch the memory
@ -113,7 +112,7 @@ static void mi_page_queue_enqueue(mi_page_queue_t* pq, mi_page_t* page) {
pq->first = page;
if (page->next != NULL) page->next->prev = page;
else pq->last = page;
page->block_size = 0; // free
page->block_size = 0; // free
}
static mi_page_queue_t* mi_page_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
@ -159,7 +158,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_page_queue_t* pq;
while(slice < &segment->slices[segment->slice_count]) {
mi_assert_internal(slice->slice_count > 0);
mi_assert_internal(slice->slice_offset == 0);
mi_assert_internal(slice->slice_offset == 0);
size_t index = mi_slice_index(slice);
size_t maxindex = (index + slice->slice_count >= segment->slice_count ? segment->slice_count : index + slice->slice_count) - 1;
if (slice->block_size > 0) { // a page in use, all slices need their back offset set
@ -179,7 +178,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
pq = mi_page_queue_for(slice->slice_count,tld);
mi_assert_internal(mi_page_queue_contains(pq,mi_slice_to_page(slice)));
}
}
}
slice = &segment->slices[maxindex+1];
}
mi_assert_internal(slice == &segment->slices[segment->slice_count]);
@ -193,7 +192,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
----------------------------------------------------------- */
// Start of the page available memory; can be used on uninitialized pages
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
{
mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
ptrdiff_t idx = slice - segment->slices;
@ -233,7 +232,7 @@ static size_t mi_segment_size(size_t required, size_t* pre_size, size_t* info_si
size_t page_size = _mi_os_page_size();
size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size);
size_t guardsize = 0;
if (mi_option_is_enabled(mi_option_secure)) {
// in secure mode, we set up a protected page in between the segment info
// and the page data (and one at the end of the segment)
@ -296,7 +295,7 @@ static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t
}
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
if (tld->cache_count < MI_SEGMENT_CACHE_MAX
if (tld->cache_count < MI_SEGMENT_CACHE_MAX
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))
) { // always allow 1 element cache
return false;
@ -339,7 +338,7 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
/* -----------------------------------------------------------
Slices
Slices
----------------------------------------------------------- */
@ -399,11 +398,11 @@ static void mi_segment_page_split(mi_page_t* page, size_t slice_count, mi_segmen
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
size_t next_index = mi_slice_index(mi_page_to_slice(page)) + slice_count;
size_t next_count = page->slice_count - slice_count;
mi_segment_page_init( segment, next_index, next_count, tld );
mi_segment_page_init( segment, next_index, next_count, tld );
page->slice_count = (uint32_t)slice_count;
}
static mi_page_t* mi_segment_page_find(size_t slice_count, mi_segments_tld_t* tld) {
static mi_page_t* mi_segment_page_find(size_t slice_count, mi_segments_tld_t* tld) {
mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_SIZE_MAX);
// search from best fit up
mi_page_queue_t* pq = mi_page_queue_for(slice_count,tld);
@ -422,8 +421,8 @@ static mi_page_t* mi_segment_page_find(size_t slice_count, mi_segments_tld_t* tl
}
pq++;
}
// could not find a page..
return NULL;
// could not find a page..
return NULL;
}
static void mi_segment_page_delete(mi_slice_t* slice, mi_segments_tld_t* tld) {
@ -452,7 +451,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
//mi_assert_internal(pre_size % MI_SEGMENT_SLICE_SIZE == 0);
// Try to get it from our thread local cache first
bool commit = mi_option_is_enabled(mi_option_eager_commit) || mi_option_is_enabled(mi_option_eager_region_commit)
bool commit = mi_option_is_enabled(mi_option_eager_commit) || mi_option_is_enabled(mi_option_eager_region_commit)
|| required > 0; // huge page
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
if (segment==NULL) {
@ -482,7 +481,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
_mi_os_protect((uint8_t*)segment + info_size, (pre_size - info_size));
size_t os_page_size = _mi_os_page_size();
// and protect the last page too
_mi_os_protect((uint8_t*)segment + segment_size - os_page_size, os_page_size);
_mi_os_protect((uint8_t*)segment + segment_size - os_page_size, os_page_size);
slice_count--; // don't use the last slice :-(
}
@ -519,7 +518,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
mi_assert_internal(segment != NULL);
mi_assert_internal(segment != NULL);
mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->prev == NULL);
mi_assert_internal(segment->used == 0);
@ -541,7 +540,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
// stats
_mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
if (!force && mi_segment_cache_push(segment, tld)) {
// it is put in our cache
}
@ -555,7 +554,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
Page allocation
----------------------------------------------------------- */
static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{
mi_assert_internal(required <= MI_LARGE_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
@ -565,7 +564,7 @@ static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t require
mi_page_t* page = mi_segment_page_find(slices_needed,tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
if (page==NULL) {
// no free page, allocate a new segment and try again
if (mi_segment_alloc(0, tld, os_tld) == NULL) return NULL; // OOM
if (mi_segment_alloc(0, tld, os_tld) == NULL) return NULL; // OOM
return mi_segment_page_alloc(page_kind, required, tld, os_tld);
}
mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
@ -581,11 +580,11 @@ static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t require
if (i > 0) slice->slice_count = 0;
if (!segment->all_committed && !slice->is_committed) {
slice->is_committed = true;
commit = true;
commit = true;
}
if (slice->is_reset) {
slice->is_reset = false;
unreset = true;
unreset = true;
}
}
uint8_t* page_start = mi_slice_start(mi_page_to_slice(page));
@ -593,7 +592,7 @@ static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t require
if(unreset){ _mi_os_unreset(page_start, page_size, tld->stats); }
// initialize the page and return
mi_assert_internal(segment->thread_id == _mi_thread_id());
mi_assert_internal(segment->thread_id == _mi_thread_id());
segment->used++;
mi_page_init_flags(page, segment->thread_id);
return page;
@ -604,7 +603,7 @@ static mi_slice_t* mi_segment_page_free_coalesce(mi_page_t* page, mi_segments_tl
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(segment->used > 0);
segment->used--;
// free and coalesce the page
mi_slice_t* slice = mi_page_to_slice(page);
size_t slice_count = slice->slice_count;
@ -617,7 +616,7 @@ static mi_slice_t* mi_segment_page_free_coalesce(mi_page_t* page, mi_segments_tl
mi_segment_page_delete(next, tld);
}
if (slice > segment->slices) {
mi_slice_t* prev = mi_slice_first(slice - 1);
mi_slice_t* prev = mi_slice_first(slice - 1);
mi_assert_internal(prev >= segment->slices);
if (prev->block_size==0) {
// free previous slice -- remove it from free and merge
@ -627,7 +626,7 @@ static mi_slice_t* mi_segment_page_free_coalesce(mi_page_t* page, mi_segments_tl
slice = prev;
}
}
// and add the new free page
mi_segment_page_init(segment, mi_slice_index(slice), slice_count, tld);
mi_assert_expensive(mi_segment_is_valid(segment,tld));
@ -649,7 +648,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
size_t inuse = page->capacity * page->block_size;
_mi_stat_decrease(&tld->stats->page_committed, inuse);
_mi_stat_decrease(&tld->stats->pages, 1);
// reset the page memory to reduce memory pressure?
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
size_t psize;
@ -696,7 +695,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
else if (segment->used == segment->abandoned) {
// only abandoned pages; remove from free list and abandon
mi_segment_abandon(segment,tld);
}
}
}
@ -718,7 +717,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_expensive(mi_segment_is_valid(segment,tld));
// remove the free pages from our lists
mi_slice_t* slice = &segment->slices[0];
mi_slice_t* slice = &segment->slices[0];
while (slice <= mi_segment_last_slice(segment)) {
mi_assert_internal(slice->slice_count > 0);
mi_assert_internal(slice->slice_offset == 0);
@ -807,14 +806,14 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
segment->abandoned--;
if (mi_page_all_free(page)) {
// if everything free by now, free the page
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
}
else {
// otherwise reclaim it
mi_page_init_flags(page, segment->thread_id);
_mi_page_reclaim(heap, page);
}
}
}
mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0);
slice = slice + slice->slice_count;
}
@ -824,7 +823,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
mi_segment_free(segment,false,tld);
}
else {
reclaimed++;
reclaimed++;
}
}
return (reclaimed>0);
@ -847,15 +846,15 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
page = page + initial_count;
page->slice_count = (uint32_t)((segment->segment_size - segment->segment_info_size)/MI_SEGMENT_SLICE_SIZE);
page->slice_offset = 0;
page->block_size = size;
page->block_size = size;
mi_assert_internal(page->slice_count * MI_SEGMENT_SLICE_SIZE >= size);
mi_assert_internal(page->slice_count >= segment->slice_count - initial_count);
// set back pointers
// set back pointers
for (size_t i = 1; i <segment->slice_count; i++) {
mi_slice_t* slice = (mi_slice_t*)(page + i);
slice->slice_offset = (uint32_t)(sizeof(mi_page_t)*i);
slice->block_size = 1;
slice->slice_count = 0;
slice->slice_count = 0;
}
mi_page_init_flags(page,segment->thread_id);
return page;
@ -873,13 +872,13 @@ static bool mi_is_good_fit(size_t bsize, size_t size) {
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page;
if (block_size <= MI_SMALL_SIZE_MAX) {// || mi_is_good_fit(block_size,MI_SMALL_PAGE_SIZE)) {
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {// || mi_is_good_fit(block_size,MI_SMALL_PAGE_SIZE)) {
page = mi_segment_page_alloc(MI_PAGE_SMALL,block_size,tld,os_tld);
}
else if (block_size <= MI_MEDIUM_SIZE_MAX) {// || mi_is_good_fit(block_size, MI_MEDIUM_PAGE_SIZE)) {
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {// || mi_is_good_fit(block_size, MI_MEDIUM_PAGE_SIZE)) {
page = mi_segment_page_alloc(MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,tld, os_tld);
}
else if (block_size <= MI_LARGE_SIZE_MAX) {
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
page = mi_segment_page_alloc(MI_PAGE_LARGE,block_size,tld, os_tld);
}
else {
@ -899,7 +898,7 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
----------------------------------------------------------- */
#if (MI_INTPTR_SIZE==8)
#define MI_MAX_ADDRESS ((size_t)1 << 44) // 16TB
#define MI_MAX_ADDRESS ((size_t)1 << 44) // 16TB
#else
#define MI_MAX_ADDRESS ((size_t)1 << 31) // 2Gb
#endif
@ -911,7 +910,7 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
static volatile uintptr_t mi_segment_map[MI_SEGMENT_MAP_WSIZE]; // 1KiB per TB with 128MiB segments
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on 128MiB?
mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on 128MiB?
uintptr_t segindex = ((uintptr_t)segment % MI_MAX_ADDRESS) / MI_SEGMENT_SIZE;
*bitidx = segindex % (8*MI_INTPTR_SIZE);
return (segindex / (8*MI_INTPTR_SIZE));
@ -953,7 +952,7 @@ static mi_segment_t* _mi_segment_of(const void* p) {
return segment; // yes, allocated by us
}
if (index==0) return NULL;
// search downwards for the first segment in case it is an interior pointer
// search downwards for the first segment in case it is an interior pointer
// could be slow but searches in 256MiB steps trough valid huge objects
// note: we could maintain a lowest index to speed up the path for invalid pointers?
size_t lobitidx;
@ -1006,5 +1005,3 @@ static void* mi_segment_range_of(const void* p, size_t* size) {
}
}
*/

View file

@ -28,11 +28,14 @@ void _mi_stats_done(mi_stats_t* stats) {
Statistics operations
----------------------------------------------------------- */
static bool mi_is_in_main(void* stat) {
return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
}
static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
if (amount == 0) return;
bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
if (in_main)
if (mi_is_in_main(stat))
{
// add atomically (for abandoned pages)
int64_t current = mi_atomic_add(&stat->current,amount);
@ -58,11 +61,16 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
}
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
mi_atomic_add( &stat->count, 1 );
mi_atomic_add( &stat->total, (int64_t)amount );
if (mi_is_in_main(stat)) {
mi_atomic_add( &stat->count, 1 );
mi_atomic_add( &stat->total, (int64_t)amount );
}
else {
stat->count++;
stat->total += amount;
}
}
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) {
mi_stat_update(stat, (int64_t)amount);
}
@ -276,8 +284,8 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n
_mi_fprintf(out,"\n");
}
static double mi_clock_end(double start);
static double mi_clock_start(void);
double _mi_clock_end(double start);
double _mi_clock_start(void);
static double mi_time_start = 0.0;
static mi_stats_t* mi_stats_get_default(void) {
@ -289,7 +297,7 @@ void mi_stats_reset(void) mi_attr_noexcept {
mi_stats_t* stats = mi_stats_get_default();
if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); }
memset(&_mi_stats_main, 0, sizeof(mi_stats_t));
mi_time_start = mi_clock_start();
mi_time_start = _mi_clock_start();
}
static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) {
@ -301,11 +309,11 @@ static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) {
}
void mi_stats_print(FILE* out) mi_attr_noexcept {
mi_stats_print_ex(mi_stats_get_default(),mi_clock_end(mi_time_start),out);
mi_stats_print_ex(mi_stats_get_default(),_mi_clock_end(mi_time_start),out);
}
void mi_thread_stats_print(FILE* out) mi_attr_noexcept {
_mi_stats_print(mi_stats_get_default(), mi_clock_end(mi_time_start), out);
_mi_stats_print(mi_stats_get_default(), _mi_clock_end(mi_time_start), out);
}
@ -350,7 +358,7 @@ static double mi_clock_now(void) {
static double mi_clock_diff = 0.0;
static double mi_clock_start(void) {
double _mi_clock_start(void) {
if (mi_clock_diff == 0.0) {
double t0 = mi_clock_now();
mi_clock_diff = mi_clock_now() - t0;
@ -358,7 +366,7 @@ static double mi_clock_start(void) {
return mi_clock_now();
}
static double mi_clock_end(double start) {
double _mi_clock_end(double start) {
double end = mi_clock_now();
return (end - start - mi_clock_diff);
}

View file

@ -66,7 +66,7 @@ bool test_heap2();
// Main testing
// ---------------------------------------------------------------------------
int main() {
mi_option_enable(mi_option_verbose,false);
mi_option_disable(mi_option_verbose);
// ---------------------------------------------------
// Malloc

View file

@ -154,6 +154,9 @@ int main(int argc, char** argv) {
if (n > 0) N = n;
}
printf("start with %i threads with a %i%% load-per-thread\n", THREADS, N);
//int res = mi_reserve_huge_os_pages(4,1);
//printf("(reserve huge: %i\n)", res);
//bench_start_program();
memset((void*)transfer, 0, TRANSFERS*sizeof(void*));
run_os_threads(THREADS);