mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
merge from dev3
This commit is contained in:
commit
b920fc1b72
14 changed files with 103 additions and 42 deletions
|
@ -190,7 +190,7 @@
|
|||
<SDLCheck>true</SDLCheck>
|
||||
<ConformanceMode>Default</ConformanceMode>
|
||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>MI_DEBUG=3;MI_GUARDED=1;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||
|
|
|
@ -398,8 +398,8 @@ typedef enum mi_option_e {
|
|||
mi_option_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
|
||||
mi_option_full_page_retain, // retain N full pages per size class (=2)
|
||||
mi_option_max_page_candidates, // max candidate pages to consider for allocation (=4)
|
||||
mi_option_max_vabits, // max virtual address bits to consider in user space (=48)
|
||||
mi_option_debug_commit_full_pagemap, // commit the full pagemap to catch invalid pointer uses (=0)
|
||||
mi_option_max_vabits, // max user space virtual address bits to consider (=48)
|
||||
mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
|
|
@ -120,7 +120,7 @@ typedef int32_t mi_ssize_t;
|
|||
|
||||
// use a flat page-map (or a 2-level one)
|
||||
#ifndef MI_PAGE_MAP_FLAT
|
||||
#if MI_MAX_VABITS <= 40
|
||||
#if MI_MAX_VABITS <= 40 && !defined(__APPLE__)
|
||||
#define MI_PAGE_MAP_FLAT 1
|
||||
#else
|
||||
#define MI_PAGE_MAP_FLAT 0
|
||||
|
|
|
@ -445,6 +445,7 @@ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t si
|
|||
#if MI_PAGE_MAP_FLAT
|
||||
|
||||
// flat page-map committed on demand
|
||||
// single indirection and low commit, but large initial virtual reserve (4 GiB with 48 bit virtual addresses)
|
||||
extern uint8_t* _mi_page_map;
|
||||
|
||||
static inline size_t _mi_page_map_index(const void* p) {
|
||||
|
@ -471,6 +472,8 @@ static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) {
|
|||
#else
|
||||
|
||||
// 2-level page map:
|
||||
// double indirection but low commit and low virtual reserve.
|
||||
//
|
||||
// The page-map is usually 4 MiB and points to sub maps of 64 KiB.
|
||||
// The page-map is committed on-demand (in 64 KiB) parts (and sub-maps are committed on-demand as well)
|
||||
// One sub page-map = 64 KiB => covers 2^13 * 2^16 = 2^32 = 512 MiB address space
|
||||
|
|
|
@ -46,11 +46,13 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
|
||||
// #define MI_STAT 1
|
||||
|
||||
// Define MI_SECURE to enable security mitigations
|
||||
// #define MI_SECURE 1 // guard page around metadata
|
||||
// #define MI_SECURE 2 // guard page around each mimalloc page
|
||||
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
|
||||
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
|
||||
// Define MI_SECURE to enable security mitigations. The lowest two have minimal performance impact:
|
||||
// #define MI_SECURE 1 // guard page around metadata
|
||||
// #define MI_SECURE 2 // guard page around each mimalloc page (can fragment VMA's with large heaps..)
|
||||
//
|
||||
// The next two levels can have more performance cost:
|
||||
// #define MI_SECURE 3 // randomize allocations, encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
|
||||
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
|
||||
|
||||
#if !defined(MI_SECURE)
|
||||
#define MI_SECURE 0
|
||||
|
|
|
@ -674,7 +674,7 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
|
|||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
#endif
|
||||
_mi_stat_counter_increase(&heap->tld->stats.guarded_alloc_count, 1);
|
||||
mi_heap_stat_counter_increase(heap, guarded_alloc_count, 1);
|
||||
}
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
|
|
|
@ -25,6 +25,12 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define MI_META_PAGE_SIZE MI_ARENA_SLICE_SIZE
|
||||
#define MI_META_PAGE_ALIGN MI_ARENA_SLICE_ALIGN
|
||||
|
||||
#if MI_SECURE
|
||||
#define MI_META_PAGE_GUARD_SIZE (4*MI_KiB)
|
||||
#else
|
||||
#define MI_META_PAGE_GUARD_SIZE (0)
|
||||
#endif
|
||||
|
||||
#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE > 4k (even on 32-bit)
|
||||
#define MI_META_BLOCK_ALIGN MI_META_BLOCK_SIZE
|
||||
#define MI_META_BLOCKS_PER_PAGE (MI_ARENA_SLICE_SIZE / MI_META_BLOCK_SIZE) // 1024
|
||||
|
@ -41,7 +47,7 @@ static mi_decl_cache_align _Atomic(mi_meta_page_t*) mi_meta_pages = MI_ATOMIC_V
|
|||
|
||||
#if MI_DEBUG > 1
|
||||
static mi_meta_page_t* mi_meta_page_of_ptr(void* p, size_t* block_idx) {
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)mi_align_down_ptr(p,MI_META_PAGE_ALIGN);
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)((uint8_t*)mi_align_down_ptr(p,MI_META_PAGE_ALIGN) + MI_META_PAGE_GUARD_SIZE);
|
||||
if (block_idx != NULL) {
|
||||
*block_idx = ((uint8_t*)p - (uint8_t*)mpage) / MI_META_BLOCK_SIZE;
|
||||
}
|
||||
|
@ -54,9 +60,9 @@ static mi_meta_page_t* mi_meta_page_next( mi_meta_page_t* mpage ) {
|
|||
}
|
||||
|
||||
static void* mi_meta_block_start( mi_meta_page_t* mpage, size_t block_idx ) {
|
||||
mi_assert_internal(_mi_is_aligned(mpage,MI_META_PAGE_ALIGN));
|
||||
mi_assert_internal(_mi_is_aligned((uint8_t*)mpage - MI_META_PAGE_GUARD_SIZE, MI_META_PAGE_ALIGN));
|
||||
mi_assert_internal(block_idx < MI_META_BLOCKS_PER_PAGE);
|
||||
void* p = ((uint8_t*)mpage + (block_idx * MI_META_BLOCK_SIZE));
|
||||
void* p = ((uint8_t*)mpage - MI_META_PAGE_GUARD_SIZE + (block_idx * MI_META_BLOCK_SIZE));
|
||||
mi_assert_internal(mpage == mi_meta_page_of_ptr(p,NULL));
|
||||
return p;
|
||||
}
|
||||
|
@ -66,22 +72,32 @@ static mi_meta_page_t* mi_meta_page_zalloc(void) {
|
|||
// allocate a fresh arena slice
|
||||
// note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again..
|
||||
mi_memid_t memid;
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)_mi_arena_alloc_aligned(_mi_subproc(), MI_ARENA_SLICE_SIZE, MI_ARENA_SLICE_ALIGN, 0,
|
||||
true /* commit*/, true /* allow large */,
|
||||
uint8_t* base = (uint8_t*)_mi_arena_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0,
|
||||
true /* commit*/, (MI_SECURE==0) /* allow large? */,
|
||||
NULL /* req arena */, 0 /* thread_seq */, &memid);
|
||||
if (mpage == NULL) return NULL;
|
||||
mi_assert_internal(_mi_is_aligned(mpage,MI_META_PAGE_ALIGN));
|
||||
if (base == NULL) return NULL;
|
||||
mi_assert_internal(_mi_is_aligned(base,MI_META_PAGE_ALIGN));
|
||||
if (!memid.initially_zero) {
|
||||
_mi_memzero_aligned(mpage, MI_ARENA_SLICE_SIZE);
|
||||
_mi_memzero_aligned(base, MI_ARENA_SLICE_SIZE);
|
||||
}
|
||||
|
||||
// initialize the page
|
||||
// guard pages
|
||||
#if MI_SECURE
|
||||
if (!memid.is_pinned) {
|
||||
_mi_os_decommit(base, MI_META_PAGE_GUARD_SIZE);
|
||||
_mi_os_decommit(base + MI_META_PAGE_SIZE - MI_META_PAGE_GUARD_SIZE, MI_META_PAGE_GUARD_SIZE);
|
||||
}
|
||||
#endif
|
||||
|
||||
// initialize the page and free block bitmap
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)(base + MI_META_PAGE_GUARD_SIZE);
|
||||
mpage->memid = memid;
|
||||
mi_bbitmap_init(&mpage->blocks_free, MI_META_BLOCKS_PER_PAGE, true /* already_zero */);
|
||||
const size_t mpage_size = offsetof(mi_meta_page_t,blocks_free) + mi_bbitmap_size(MI_META_BLOCKS_PER_PAGE, NULL);
|
||||
const size_t info_blocks = _mi_divide_up(mpage_size,MI_META_BLOCK_SIZE);
|
||||
mi_assert_internal(info_blocks < MI_META_BLOCKS_PER_PAGE);
|
||||
mi_bbitmap_unsafe_setN(&mpage->blocks_free, info_blocks, MI_META_BLOCKS_PER_PAGE - info_blocks);
|
||||
const size_t guard_blocks = _mi_divide_up(MI_META_PAGE_GUARD_SIZE, MI_META_BLOCK_SIZE);
|
||||
mi_assert_internal(info_blocks + 2*guard_blocks < MI_META_BLOCKS_PER_PAGE);
|
||||
mi_bbitmap_unsafe_setN(&mpage->blocks_free, info_blocks + guard_blocks, MI_META_BLOCKS_PER_PAGE - info_blocks - 2*guard_blocks);
|
||||
|
||||
// push atomically in front of the meta page list
|
||||
// (note: there is no ABA issue since we never free meta-pages)
|
||||
|
|
45
src/arena.c
45
src/arena.c
|
@ -581,10 +581,16 @@ static mi_page_t* mi_arena_page_try_find_abandoned(mi_subproc_t* subproc, size_t
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#if MI_SECURE < 2
|
||||
#define MI_ARENA_GUARD_PAGE_SIZE (0)
|
||||
#else
|
||||
#define MI_ARENA_GUARD_PAGE_SIZE (4*MI_KiB)
|
||||
#endif
|
||||
|
||||
static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_count, size_t block_size, size_t block_alignment,
|
||||
mi_arena_t* req_arena, size_t tseq)
|
||||
{
|
||||
const bool allow_large = true;
|
||||
const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page
|
||||
const bool commit = true;
|
||||
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
||||
const size_t page_alignment = MI_ARENA_SLICE_ALIGN;
|
||||
|
@ -619,6 +625,14 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_
|
|||
mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN));
|
||||
mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment));
|
||||
|
||||
// guard page at the end
|
||||
const size_t page_body_size = mi_size_of_slices(slice_count) - MI_ARENA_GUARD_PAGE_SIZE;
|
||||
#if MI_SECURE >= 2
|
||||
if (memid.initially_committed && !memid.is_pinned) {
|
||||
_mi_os_decommit((uint8_t*)page + page_body_size, MI_ARENA_GUARD_PAGE_SIZE);
|
||||
}
|
||||
#endif
|
||||
|
||||
// claimed free slices: initialize the page partly
|
||||
if (!memid.initially_zero) {
|
||||
mi_track_mem_undefined(page, slice_count * MI_ARENA_SLICE_SIZE);
|
||||
|
@ -629,7 +643,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_
|
|||
}
|
||||
#if MI_DEBUG > 1
|
||||
if (memid.initially_zero) {
|
||||
if (!mi_mem_is_zero(page, mi_size_of_slices(slice_count))) {
|
||||
if (!mi_mem_is_zero(page, page_body_size)) {
|
||||
_mi_error_message(EFAULT, "internal error: page memory was not zero initialized.\n");
|
||||
memid.initially_zero = false;
|
||||
_mi_memzero_aligned(page, sizeof(*page));
|
||||
|
@ -659,7 +673,7 @@ static mi_page_t* mi_arena_page_alloc_fresh(mi_subproc_t* subproc, size_t slice_
|
|||
// otherwise start after the info
|
||||
block_start = mi_page_info_size();
|
||||
}
|
||||
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
|
||||
const size_t reserved = (os_align ? 1 : (page_body_size - block_start) / block_size);
|
||||
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
||||
page->reserved = (uint16_t)reserved;
|
||||
page->page_start = (uint8_t*)page + block_start;
|
||||
|
@ -712,7 +726,11 @@ static mi_page_t* mi_singleton_page_alloc(mi_heap_t* heap, size_t block_size, si
|
|||
mi_tld_t* const tld = heap->tld;
|
||||
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
|
||||
const size_t info_size = (os_align ? MI_PAGE_ALIGN : mi_page_info_size());
|
||||
#if MI_ARENA_GUARD_PAGE_SIZE == 0
|
||||
const size_t slice_count = mi_slice_count_of_size(info_size + block_size);
|
||||
#else
|
||||
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, MI_ARENA_GUARD_PAGE_SIZE) + MI_ARENA_GUARD_PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
mi_page_t* page = mi_arena_page_alloc_fresh(tld->subproc, slice_count, block_size, block_alignment, req_arena, tld->thread_seq);
|
||||
if (page == NULL) return NULL;
|
||||
|
@ -721,6 +739,7 @@ static mi_page_t* mi_singleton_page_alloc(mi_heap_t* heap, size_t block_size, si
|
|||
mi_assert(page->reserved == 1);
|
||||
mi_assert_internal(_mi_ptr_page(page)==page);
|
||||
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
|
||||
_mi_page_init(heap, page);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -778,6 +797,13 @@ void _mi_arena_page_free(mi_page_t* page) {
|
|||
}
|
||||
#endif
|
||||
|
||||
// recommit guard page at the end?
|
||||
#if MI_SECURE >= 2
|
||||
if (!page->memid.is_pinned) {
|
||||
_mi_os_commit((uint8_t*)page + mi_memid_size(page->memid) - MI_ARENA_GUARD_PAGE_SIZE, MI_ARENA_GUARD_PAGE_SIZE, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
// unregister page
|
||||
_mi_page_map_unregister(page);
|
||||
if (page->memid.memkind == MI_MEM_ARENA) {
|
||||
|
@ -1124,12 +1150,17 @@ static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t s
|
|||
mi_arena_t* arena = (mi_arena_t*)start;
|
||||
|
||||
// commit & zero if needed
|
||||
bool is_zero = memid.initially_zero;
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
if (!memid.initially_committed) {
|
||||
_mi_os_commit(arena, mi_size_of_slices(info_slices), NULL);
|
||||
// security: always leave a guard OS page decommitted at the end (already part of info_slices)
|
||||
_mi_os_commit(arena, mi_size_of_slices(info_slices) - os_page_size, NULL);
|
||||
}
|
||||
if (!is_zero) {
|
||||
_mi_memzero(arena, mi_size_of_slices(info_slices));
|
||||
else if (!memid.is_pinned) {
|
||||
// security: decommit a guard OS page at the end of the arena info
|
||||
_mi_os_decommit((uint8_t*)arena + mi_size_of_slices(info_slices) - os_page_size, os_page_size);
|
||||
}
|
||||
if (!memid.initially_zero) {
|
||||
_mi_memzero(arena, mi_size_of_slices(info_slices) - os_page_size);
|
||||
}
|
||||
|
||||
// init
|
||||
|
|
|
@ -102,6 +102,14 @@ typedef struct mi_option_desc_s {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef MI_DEFAULT_PAGEMAP_COMMIT
|
||||
#if defined(__APPLE__)
|
||||
#define MI_DEFAULT_PAGEMAP_COMMIT 1
|
||||
#else
|
||||
#define MI_DEFAULT_PAGEMAP_COMMIT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
static mi_option_desc_t options[_mi_option_last] =
|
||||
{
|
||||
|
@ -165,7 +173,8 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 2, UNINIT, MI_OPTION(full_page_retain) },
|
||||
{ 4, UNINIT, MI_OPTION(max_page_candidates) },
|
||||
{ 0, UNINIT, MI_OPTION(max_vabits) },
|
||||
{ 0, UNINIT, MI_OPTION(debug_commit_full_pagemap) },
|
||||
{ MI_DEFAULT_PAGEMAP_COMMIT,
|
||||
UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront?
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
|
4
src/os.c
4
src/os.c
|
@ -536,8 +536,8 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
|
|||
start = huge_start;
|
||||
if (start == 0) {
|
||||
// Initialize the start address after the 32TiB area
|
||||
start = ((uintptr_t)32 << 40); // 32TiB virtual start address
|
||||
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
|
||||
start = ((uintptr_t)8 << 40); // 8TiB virtual start address
|
||||
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
|
||||
uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
|
||||
start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB
|
||||
#endif
|
||||
|
|
|
@ -42,7 +42,7 @@ bool _mi_page_map_init(void) {
|
|||
// Allocate the page map and commit bits
|
||||
mi_page_map_max_address = (void*)(MI_PU(1) << vbits);
|
||||
const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT));
|
||||
const bool commit = (page_map_size <= 1*MI_MiB || mi_option_is_enabled(mi_option_debug_commit_full_pagemap)); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
|
||||
const bool commit = (page_map_size <= 1*MI_MiB || mi_option_is_enabled(mi_option_pagemap_commit)); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
|
||||
const size_t commit_bits = _mi_divide_up(page_map_size, MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT);
|
||||
const size_t bitmap_size = (commit ? 0 : mi_bitmap_size(commit_bits, NULL));
|
||||
const size_t reserve_size = bitmap_size + page_map_size;
|
||||
|
@ -187,7 +187,7 @@ bool _mi_page_map_init(void) {
|
|||
const size_t os_page_size = _mi_os_page_size();
|
||||
const size_t page_map_size = _mi_align_up( page_map_count * sizeof(mi_page_t**), os_page_size);
|
||||
const size_t reserve_size = page_map_size + os_page_size;
|
||||
const bool commit = page_map_size <= 64*MI_KiB || mi_option_is_enabled(mi_option_debug_commit_full_pagemap); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
|
||||
const bool commit = page_map_size <= 64*MI_KiB || mi_option_is_enabled(mi_option_pagemap_commit); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
|
||||
_mi_page_map = (mi_page_t***)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid);
|
||||
if (_mi_page_map==NULL) {
|
||||
_mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB);
|
||||
|
|
14
src/page.c
14
src/page.c
|
@ -475,7 +475,7 @@ static mi_decl_noinline void mi_heap_generic_collect(mi_heap_t* heap) {
|
|||
|
||||
static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
|
||||
MI_UNUSED(stats);
|
||||
#if (MI_SECURE<=2)
|
||||
#if (MI_SECURE<3)
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
#endif
|
||||
|
@ -533,7 +533,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
|
|||
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
|
||||
{
|
||||
MI_UNUSED(stats);
|
||||
#if (MI_SECURE <= 2)
|
||||
#if (MI_SECURE<3)
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
#endif
|
||||
|
@ -561,7 +561,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
|||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
|
||||
#if (MI_SECURE>0)
|
||||
#if (MI_SECURE>=3)
|
||||
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
||||
#else
|
||||
#define MI_MIN_EXTEND (1)
|
||||
|
@ -574,7 +574,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
|||
// extra test in malloc? or cache effects?)
|
||||
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
#if (MI_SECURE<=2)
|
||||
#if (MI_SECURE<3)
|
||||
mi_assert(page->free == NULL);
|
||||
mi_assert(page->local_free == NULL);
|
||||
if (page->free != NULL) return;
|
||||
|
@ -605,7 +605,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
|||
mi_assert_internal(extend < (1UL<<16));
|
||||
|
||||
// and append the extend the free list
|
||||
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
|
||||
if (extend < MI_MIN_SLICES || MI_SECURE<3) { //!mi_option_is_enabled(mi_option_secure)) {
|
||||
mi_page_free_list_extend(page, bsize, extend, &heap->tld->stats );
|
||||
}
|
||||
else {
|
||||
|
@ -621,7 +621,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) {
|
|||
void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert(page != NULL);
|
||||
mi_page_set_heap(page, heap);
|
||||
|
||||
|
||||
size_t page_size;
|
||||
uint8_t* page_start = mi_page_area(page, &page_size); MI_UNUSED(page_start);
|
||||
mi_track_mem_noaccess(page_start,page_size);
|
||||
|
@ -740,7 +740,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
|
|||
} // for each page
|
||||
|
||||
mi_debug_heap_stat_counter_increase(heap, searches, count);
|
||||
|
||||
|
||||
// set the page to the best candidate
|
||||
if (page_candidate != NULL) {
|
||||
page = page_candidate;
|
||||
|
|
|
@ -412,7 +412,7 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
|||
int err = 0;
|
||||
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
|
||||
err = unix_madvise(start, size, MADV_DONTNEED);
|
||||
#if !MI_DEBUG && !MI_SECURE
|
||||
#if !MI_DEBUG && MI_SECURE<=2
|
||||
*needs_recommit = false;
|
||||
#else
|
||||
*needs_recommit = true;
|
||||
|
|
|
@ -35,7 +35,7 @@ int main() {
|
|||
// corrupt_free();
|
||||
// block_overflow1();
|
||||
// block_overflow2();
|
||||
// test_canary_leak();
|
||||
test_canary_leak();
|
||||
// test_aslr();
|
||||
// invalid_free();
|
||||
// test_reserved();
|
||||
|
|
Loading…
Add table
Reference in a new issue