mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 14:09:31 +03:00
Merge branch 'dev-guarded' into dev-slice-guarded
This commit is contained in:
commit
6a21db1017
2 changed files with 34 additions and 20 deletions
|
@ -98,7 +98,7 @@
|
|||
<PreprocessorDefinitions>MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<CompileAs>Default</CompileAs>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalDependencies>$(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
|
@ -126,7 +126,7 @@
|
|||
<PreprocessorDefinitions>MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<CompileAs>Default</CompileAs>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<AdditionalDependencies>$(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
|
@ -157,7 +157,7 @@
|
|||
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
||||
<CompileAs>Default</CompileAs>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
|
@ -189,7 +189,7 @@
|
|||
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
|
||||
<CompileAs>Default</CompileAs>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
|
|
46
src/alloc.c
46
src/alloc.c
|
@ -31,18 +31,22 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||
|
||||
// check the free list
|
||||
mi_block_t* const block = page->free;
|
||||
if mi_unlikely(block == NULL) {
|
||||
return _mi_malloc_generic(heap, size, zero, 0);
|
||||
}
|
||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||
|
||||
// pop from the free list
|
||||
page->free = mi_block_next(page, block);
|
||||
page->used++;
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
|
||||
|
||||
#if MI_DEBUG>3
|
||||
if (page->free_is_zero && size > sizeof(*block)) {
|
||||
if (page->free_is_zero && size > sizeof(*block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
|
||||
}
|
||||
#endif
|
||||
|
@ -118,26 +122,26 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
// forward declaration
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept;
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment);
|
||||
static inline bool mi_heap_malloc_small_use_guarded(size_t size);
|
||||
#endif
|
||||
|
||||
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||
#if MI_DEBUG
|
||||
const uintptr_t tid = _mi_thread_id();
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
|
||||
#endif
|
||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||
#if (MI_PADDING || MI_DEBUG_GUARDED)
|
||||
if (size == 0) { size = sizeof(void*); }
|
||||
#endif
|
||||
#if MI_DEBUG_GUARDED
|
||||
if (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max) && size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min)) {
|
||||
return mi_heap_malloc_guarded(heap, size, zero, 0);
|
||||
}
|
||||
if (mi_heap_malloc_small_use_guarded(size)) { return mi_heap_malloc_guarded(heap, size, zero); }
|
||||
#endif
|
||||
|
||||
// get page in constant time, and allocate from it
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
@ -167,24 +171,21 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
|
|||
|
||||
// The main allocation function
|
||||
extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
|
||||
// fast path for small objects
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
||||
mi_assert_internal(huge_alignment == 0);
|
||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||
}
|
||||
#if MI_DEBUG_GUARDED
|
||||
else if ( huge_alignment == 0 && // guarded pages do not work with huge aligments at the moment
|
||||
_mi_option_get_fast(mi_option_debug_guarded_max) > 0 && // guarded must be enabled
|
||||
((size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min) && size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max))
|
||||
|| ((mi_good_size(size) & (_mi_os_page_size()-1)) == 0)) ) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
|
||||
{
|
||||
return mi_heap_malloc_guarded(heap, size, zero, 0);
|
||||
}
|
||||
else if (mi_heap_malloc_use_guarded(size,huge_alignment>0)) { return mi_heap_malloc_guarded(heap, size, zero); }
|
||||
#endif
|
||||
else {
|
||||
// regular allocation
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
|
@ -601,7 +602,20 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
|
|||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
||||
static inline bool mi_heap_malloc_small_use_guarded(size_t size) {
|
||||
return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max)
|
||||
&& size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
|
||||
}
|
||||
|
||||
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment) {
|
||||
return (!has_huge_alignment // guarded pages do not work with huge aligments at the moment
|
||||
&& _mi_option_get_fast(mi_option_debug_guarded_max) > 0 // guarded must be enabled
|
||||
&& (mi_heap_malloc_small_use_guarded(size)
|
||||
|| ((mi_good_size(size) & (_mi_os_page_size() - 1)) == 0)) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
|
||||
);
|
||||
}
|
||||
|
||||
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
#if defined(MI_PADDING_SIZE)
|
||||
mi_assert(MI_PADDING_SIZE==0);
|
||||
|
@ -610,7 +624,7 @@ static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t siz
|
|||
const size_t obj_size = _mi_align_up(size, MI_MAX_ALIGN_SIZE); // ensure minimal alignment requirement
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
const size_t req_size = _mi_align_up(obj_size + os_page_size, os_page_size);
|
||||
void* const block = _mi_malloc_generic(heap, req_size, zero, huge_alignment);
|
||||
void* const block = _mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
|
||||
if (block==NULL) return NULL;
|
||||
mi_page_t* page = _mi_ptr_page(block);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
|
|
Loading…
Add table
Reference in a new issue