mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-19 13:39:31 +03:00
Merge branch 'dev-slice' into fix-passing-heap-v2
This commit is contained in:
commit
8e3d7add99
19 changed files with 929 additions and 820 deletions
|
@ -84,6 +84,17 @@ endif()
|
||||||
# Process options
|
# Process options
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# put -Wall early so other warnings can be disabled selectively
|
||||||
|
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang")
|
||||||
|
list(APPEND mi_cflags -Wall -Wextra -Wpedantic)
|
||||||
|
endif()
|
||||||
|
if(CMAKE_C_COMPILER_ID MATCHES "GNU")
|
||||||
|
list(APPEND mi_cflags -Wall -Wextra)
|
||||||
|
endif()
|
||||||
|
if(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||||
|
list(APPEND mi_cflags -Wall)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel")
|
if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel")
|
||||||
set(MI_USE_CXX "ON")
|
set(MI_USE_CXX "ON")
|
||||||
endif()
|
endif()
|
||||||
|
@ -186,6 +197,10 @@ endif()
|
||||||
if(MI_SEE_ASM)
|
if(MI_SEE_ASM)
|
||||||
message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
|
message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
|
||||||
list(APPEND mi_cflags -save-temps)
|
list(APPEND mi_cflags -save-temps)
|
||||||
|
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang")
|
||||||
|
message(STATUS "No GNU Line marker")
|
||||||
|
list(APPEND mi_cflags -Wno-gnu-line-marker)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MI_CHECK_FULL)
|
if(MI_CHECK_FULL)
|
||||||
|
@ -279,17 +294,17 @@ endif()
|
||||||
|
|
||||||
# Compiler flags
|
# Compiler flags
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
|
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
|
||||||
list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas -fvisibility=hidden)
|
list(APPEND mi_cflags -Wno-unknown-pragmas -fvisibility=hidden)
|
||||||
if(NOT MI_USE_CXX)
|
if(NOT MI_USE_CXX)
|
||||||
list(APPEND mi_cflags -Wstrict-prototypes)
|
list(APPEND mi_cflags -Wstrict-prototypes)
|
||||||
endif()
|
endif()
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang")
|
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang")
|
||||||
list(APPEND mi_cflags -Wpedantic -Wno-static-in-inline)
|
list(APPEND mi_cflags -Wno-static-in-inline)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
if(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||||
list(APPEND mi_cflags -Wall -fvisibility=hidden)
|
list(APPEND mi_cflags -fvisibility=hidden)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku")
|
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku")
|
||||||
|
|
|
@ -168,7 +168,7 @@ void* mi_expand(void* p, size_t newsize);
|
||||||
/// @returns A pointer to a block of \a count * \a size bytes, or \a NULL
|
/// @returns A pointer to a block of \a count * \a size bytes, or \a NULL
|
||||||
/// if out of memory or if \a count * \a size overflows.
|
/// if out of memory or if \a count * \a size overflows.
|
||||||
///
|
///
|
||||||
/// If there is no overflow, it behaves exactly like `mi_malloc(p,count*size)`.
|
/// If there is no overflow, it behaves exactly like `mi_malloc(count*size)`.
|
||||||
/// @see mi_calloc()
|
/// @see mi_calloc()
|
||||||
/// @see mi_zallocn()
|
/// @see mi_zallocn()
|
||||||
void* mi_mallocn(size_t count, size_t size);
|
void* mi_mallocn(size_t count, size_t size);
|
||||||
|
@ -499,11 +499,11 @@ void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_m
|
||||||
/// \{
|
/// \{
|
||||||
|
|
||||||
/// The maximum supported alignment size (currently 1MiB).
|
/// The maximum supported alignment size (currently 1MiB).
|
||||||
#define MI_ALIGNMENT_MAX (1024*1024UL)
|
#define MI_BLOCK_ALIGNMENT_MAX (1024*1024UL)
|
||||||
|
|
||||||
/// Allocate \a size bytes aligned by \a alignment.
|
/// Allocate \a size bytes aligned by \a alignment.
|
||||||
/// @param size number of bytes to allocate.
|
/// @param size number of bytes to allocate.
|
||||||
/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_ALIGNMENT_MAX.
|
/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_BLOCK_ALIGNMENT_MAX.
|
||||||
/// @returns pointer to the allocated memory or \a NULL if out of memory.
|
/// @returns pointer to the allocated memory or \a NULL if out of memory.
|
||||||
/// The returned pointer is aligned by \a alignment, i.e.
|
/// The returned pointer is aligned by \a alignment, i.e.
|
||||||
/// `(uintptr_t)p % alignment == 0`.
|
/// `(uintptr_t)p % alignment == 0`.
|
||||||
|
|
|
@ -217,6 +217,12 @@
|
||||||
<ClCompile Include="..\..\src\bitmap.c">
|
<ClCompile Include="..\..\src\bitmap.c">
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
<ClCompile Include="..\..\src\free.c">
|
||||||
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||||
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||||
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||||
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||||
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\src\heap.c" />
|
<ClCompile Include="..\..\src\heap.c" />
|
||||||
<ClCompile Include="..\..\src\init.c" />
|
<ClCompile Include="..\..\src\init.c" />
|
||||||
<ClCompile Include="..\..\src\libc.c" />
|
<ClCompile Include="..\..\src\libc.c" />
|
||||||
|
|
|
@ -58,6 +58,9 @@
|
||||||
<ClCompile Include="..\..\src\libc.c">
|
<ClCompile Include="..\..\src\libc.c">
|
||||||
<Filter>Sources</Filter>
|
<Filter>Sources</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
<ClCompile Include="..\..\src\free.c">
|
||||||
|
<Filter>Sources</Filter>
|
||||||
|
</ClCompile>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<ClInclude Include="..\..\src\bitmap.h">
|
<ClInclude Include="..\..\src\bitmap.h">
|
||||||
|
|
|
@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#define mi_decl_noinline __declspec(noinline)
|
#define mi_decl_noinline __declspec(noinline)
|
||||||
#define mi_decl_thread __declspec(thread)
|
#define mi_decl_thread __declspec(thread)
|
||||||
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
||||||
#define mi_decl_weak
|
#define mi_decl_weak
|
||||||
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
||||||
#define mi_decl_noinline __attribute__((noinline))
|
#define mi_decl_noinline __attribute__((noinline))
|
||||||
#define mi_decl_thread __thread
|
#define mi_decl_thread __thread
|
||||||
|
@ -40,7 +40,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#define mi_decl_noinline
|
#define mi_decl_noinline
|
||||||
#define mi_decl_thread __thread // hope for the best :-)
|
#define mi_decl_thread __thread // hope for the best :-)
|
||||||
#define mi_decl_cache_align
|
#define mi_decl_cache_align
|
||||||
#define mi_decl_weak
|
#define mi_decl_weak
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||||
|
@ -133,8 +133,8 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
|
||||||
size_t _mi_arena_segment_abandoned_count(void);
|
size_t _mi_arena_segment_abandoned_count(void);
|
||||||
|
|
||||||
typedef struct mi_arena_field_cursor_s { // abstract
|
typedef struct mi_arena_field_cursor_s { // abstract
|
||||||
mi_arena_id_t start;
|
mi_arena_id_t start;
|
||||||
int count;
|
int count;
|
||||||
size_t bitmap_idx;
|
size_t bitmap_idx;
|
||||||
} mi_arena_field_cursor_t;
|
} mi_arena_field_cursor_t;
|
||||||
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current);
|
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current);
|
||||||
|
@ -203,9 +203,9 @@ void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool
|
||||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||||
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p);
|
||||||
bool _mi_free_delayed_block(mi_block_t* block);
|
bool _mi_free_delayed_block(mi_block_t* block);
|
||||||
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||||
|
|
||||||
// "libc.c"
|
// "libc.c"
|
||||||
|
@ -437,9 +437,14 @@ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t si
|
||||||
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
||||||
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
||||||
// therefore we align one byte before `p`.
|
// therefore we align one byte before `p`.
|
||||||
|
// We check for NULL afterwards on 64-bit systems to improve codegen for `mi_free`.
|
||||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||||
mi_assert_internal(p != NULL);
|
mi_segment_t* const segment = (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||||
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
#if MI_INTPTR_SIZE <= 4
|
||||||
|
return (p==NULL ? NULL : segment);
|
||||||
|
#else
|
||||||
|
return ((intptr_t)segment <= 0 ? NULL : segment);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||||
|
@ -454,6 +459,7 @@ static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
||||||
|
|
||||||
// Segment belonging to a page
|
// Segment belonging to a page
|
||||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||||
|
mi_assert_internal(page!=NULL);
|
||||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||||
return segment;
|
return segment;
|
||||||
|
@ -482,31 +488,28 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quick page start for initialized pages
|
// Quick page start for initialized pages
|
||||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
static inline uint8_t* mi_page_start(const mi_page_t* page) {
|
||||||
return _mi_segment_page_start(segment, page, page_size);
|
mi_assert_internal(page->page_start != NULL);
|
||||||
|
mi_assert_expensive(_mi_segment_page_start(_mi_page_segment(page),page,NULL) == page->page_start);
|
||||||
|
return page->page_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the page containing the pointer
|
// Get the page containing the pointer
|
||||||
static inline mi_page_t* _mi_ptr_page(void* p) {
|
static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||||
|
mi_assert_internal(p!=NULL);
|
||||||
return _mi_segment_page_of(_mi_ptr_segment(p), p);
|
return _mi_segment_page_of(_mi_ptr_segment(p), p);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the block size of a page (special case for huge objects)
|
// Get the block size of a page (special case for huge objects)
|
||||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||||
const size_t bsize = page->xblock_size;
|
mi_assert_internal(page->block_size > 0);
|
||||||
mi_assert_internal(bsize > 0);
|
return page->block_size;
|
||||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
|
||||||
return bsize;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
size_t psize;
|
|
||||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
|
||||||
return psize;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||||
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) ||
|
||||||
|
(!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE));
|
||||||
|
return page->is_huge;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the usable block size of a page without fixed padding.
|
// Get the usable block size of a page without fixed padding.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -13,9 +13,12 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
// mi_heap_t : all data for a thread-local heap, contains
|
// mi_heap_t : all data for a thread-local heap, contains
|
||||||
// lists of all managed heap pages.
|
// lists of all managed heap pages.
|
||||||
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
||||||
// are allocated.
|
// are allocated. A segment is divided in slices (64KiB) from
|
||||||
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
|
// which pages are allocated.
|
||||||
|
// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from
|
||||||
// where objects are allocated.
|
// where objects are allocated.
|
||||||
|
// Note: we write "OS page" for OS memory pages while
|
||||||
|
// using plain "page" for mimalloc pages (`mi_page_t`).
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@ -89,10 +92,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
|
// We used to abandon huge pages in order to eagerly deallocate it if freed from another thread.
|
||||||
// but that makes it not possible to visit them during a heap walk or include them in a
|
// Unfortunately, that makes it not possible to visit them during a heap walk or include them in a
|
||||||
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
|
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks nowadays if freed from
|
||||||
// another thread so most memory is available until it gets properly freed by the owning thread.
|
// another thread so the memory becomes "virtually" available (and eventually gets properly freed by
|
||||||
|
// the owning thread).
|
||||||
// #define MI_HUGE_PAGE_ABANDON 1
|
// #define MI_HUGE_PAGE_ABANDON 1
|
||||||
|
|
||||||
|
|
||||||
|
@ -192,17 +196,14 @@ typedef int32_t mi_ssize_t;
|
||||||
#error "mimalloc internal: define more bins"
|
#error "mimalloc internal: define more bins"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Maximum slice offset (15)
|
|
||||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
|
||||||
|
|
||||||
// Used as a special value to encode block sizes in 32 bits.
|
|
||||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
|
||||||
|
|
||||||
// blocks up to this size are always allocated aligned
|
// blocks up to this size are always allocated aligned
|
||||||
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
||||||
|
|
||||||
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||||
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||||
|
|
||||||
|
// Maximum slice count (255) for which we can find the page for interior pointers
|
||||||
|
#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
@ -227,7 +228,7 @@ typedef enum mi_delayed_e {
|
||||||
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
|
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
|
||||||
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
|
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
|
||||||
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
|
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
|
||||||
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
|
MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim
|
||||||
} mi_delayed_t;
|
} mi_delayed_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -266,7 +267,6 @@ typedef uintptr_t mi_thread_free_t;
|
||||||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||||
// avoiding atomic operations in the common case.
|
// avoiding atomic operations in the common case.
|
||||||
//
|
//
|
||||||
//
|
|
||||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||||
//
|
//
|
||||||
|
@ -274,16 +274,13 @@ typedef uintptr_t mi_thread_free_t;
|
||||||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||||
//
|
//
|
||||||
// Notes:
|
// Notes:
|
||||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
|
||||||
// - Using `uint16_t` does not seem to slow things down
|
// - Using `uint16_t` does not seem to slow things down
|
||||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
// - The size is 12 words on 64-bit which helps the page index calculations
|
||||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
// (and 14 words on 32-bit, and encoded free lists add 2 words)
|
||||||
// and 12 are still good for address calculation)
|
// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
|
||||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
|
||||||
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
|
|
||||||
// concurrent frees where only the first concurrent free adds to the owning
|
// concurrent frees where only the first concurrent free adds to the owning
|
||||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
|
||||||
// The invariant is that no-delayed-free is only set if there is
|
// The invariant is that no-delayed-free is only set if there is
|
||||||
// at least one block that will be added, or as already been added, to
|
// at least one block that will be added, or as already been added, to
|
||||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||||
|
@ -292,20 +289,24 @@ typedef struct mi_page_s {
|
||||||
// "owned" by the segment
|
// "owned" by the segment
|
||||||
uint32_t slice_count; // slices in this page (0 if not a page)
|
uint32_t slice_count; // slices in this page (0 if not a page)
|
||||||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
uint8_t is_committed:1; // `true` if the page virtual memory is committed
|
||||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
|
||||||
|
uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`)
|
||||||
|
// padding
|
||||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||||
uint16_t reserved; // number of blocks reserved in memory
|
uint16_t reserved; // number of blocks reserved in memory
|
||||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||||
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
|
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||||
uint8_t retire_expire : 7; // expiration count for retired blocks
|
uint8_t retire_expire:7; // expiration count for retired blocks
|
||||||
|
|
||||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||||
uint32_t used; // number of blocks in use (including blocks in `thread_free`)
|
|
||||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
|
||||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||||
|
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||||
|
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||||
|
// padding
|
||||||
|
size_t block_size; // size available in each block (always `>0`)
|
||||||
|
uint8_t* page_start; // start of the page area containing the blocks
|
||||||
|
|
||||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||||
|
@ -317,10 +318,8 @@ typedef struct mi_page_s {
|
||||||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||||
|
|
||||||
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
|
// 64-bit 11 words, 32-bit 13 words, (+2 for secure)
|
||||||
#if MI_INTPTR_SIZE==8
|
void* padding[1];
|
||||||
uintptr_t padding[1];
|
|
||||||
#endif
|
|
||||||
} mi_page_t;
|
} mi_page_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -331,14 +330,15 @@ typedef struct mi_page_s {
|
||||||
|
|
||||||
typedef enum mi_page_kind_e {
|
typedef enum mi_page_kind_e {
|
||||||
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
||||||
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
|
MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
|
||||||
MI_PAGE_LARGE, // larger blocks go into a page of just one block
|
MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
|
||||||
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
|
MI_PAGE_HUGE // a huge page is a single page in a segment of variable size
|
||||||
|
// used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
|
||||||
} mi_page_kind_t;
|
} mi_page_kind_t;
|
||||||
|
|
||||||
typedef enum mi_segment_kind_e {
|
typedef enum mi_segment_kind_e {
|
||||||
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
||||||
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
|
MI_SEGMENT_HUGE, // segment with just one huge page inside.
|
||||||
} mi_segment_kind_t;
|
} mi_segment_kind_t;
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
@ -371,13 +371,17 @@ typedef mi_page_t mi_slice_t;
|
||||||
typedef int64_t mi_msecs_t;
|
typedef int64_t mi_msecs_t;
|
||||||
|
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------
|
||||||
|
// a memory id tracks the provenance of arena/OS allocated memory
|
||||||
|
// ---------------------------------------------------------------
|
||||||
|
|
||||||
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
||||||
typedef enum mi_memkind_e {
|
typedef enum mi_memkind_e {
|
||||||
MI_MEM_NONE, // not allocated
|
MI_MEM_NONE, // not allocated
|
||||||
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
||||||
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
||||||
MI_MEM_OS, // allocated from the OS
|
MI_MEM_OS, // allocated from the OS
|
||||||
MI_MEM_OS_HUGE, // allocated as huge os pages
|
MI_MEM_OS_HUGE, // allocated as huge OS pages (usually 1GiB, pinned to physical memory)
|
||||||
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
|
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
|
||||||
MI_MEM_ARENA // allocated from an arena (the usual case)
|
MI_MEM_ARENA // allocated from an arena (the usual case)
|
||||||
} mi_memkind_t;
|
} mi_memkind_t;
|
||||||
|
@ -394,7 +398,7 @@ typedef struct mi_memid_os_info {
|
||||||
typedef struct mi_memid_arena_info {
|
typedef struct mi_memid_arena_info {
|
||||||
size_t block_index; // index in the arena
|
size_t block_index; // index in the arena
|
||||||
mi_arena_id_t id; // arena id (>= 1)
|
mi_arena_id_t id; // arena id (>= 1)
|
||||||
bool is_exclusive; // the arena can only be used for specific arena allocations
|
bool is_exclusive; // this arena can only be used for specific arena allocations
|
||||||
} mi_memid_arena_info_t;
|
} mi_memid_arena_info_t;
|
||||||
|
|
||||||
typedef struct mi_memid_s {
|
typedef struct mi_memid_s {
|
||||||
|
@ -402,46 +406,56 @@ typedef struct mi_memid_s {
|
||||||
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
||||||
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
|
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
|
||||||
} mem;
|
} mem;
|
||||||
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
|
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages)
|
||||||
bool initially_committed;// `true` if the memory was originally allocated as committed
|
bool initially_committed;// `true` if the memory was originally allocated as committed
|
||||||
bool initially_zero; // `true` if the memory was originally zero initialized
|
bool initially_zero; // `true` if the memory was originally zero initialized
|
||||||
mi_memkind_t memkind;
|
mi_memkind_t memkind;
|
||||||
} mi_memid_t;
|
} mi_memid_t;
|
||||||
|
|
||||||
|
|
||||||
// Segments are large allocated memory blocks (8mb on 64 bit) from
|
// -----------------------------------------------------------------------------------------
|
||||||
// the OS. Inside segments we allocated fixed size _pages_ that
|
// Segments are large allocated memory blocks (8mb on 64 bit) from arenas or the OS.
|
||||||
// contain blocks.
|
//
|
||||||
|
// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks.
|
||||||
|
// The start of a segment is this structure with a fixed number of slice entries (`slices`)
|
||||||
|
// usually followed by a guard OS page and the actual allocation area with pages.
|
||||||
|
// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`).
|
||||||
|
// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent
|
||||||
|
// slices part of the area, the `slice_offset` is the byte offset back to the first slice
|
||||||
|
// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`).
|
||||||
|
// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`).
|
||||||
|
// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while
|
||||||
|
// large and huge pages span a variable amount of slices.
|
||||||
typedef struct mi_segment_s {
|
typedef struct mi_segment_s {
|
||||||
// constant fields
|
// constant fields
|
||||||
mi_memid_t memid; // memory id for arena allocation
|
mi_memid_t memid; // memory id for arena/OS allocation
|
||||||
bool allow_decommit;
|
bool allow_decommit; // can we decommmit the memory
|
||||||
bool allow_purge;
|
bool allow_purge; // can we purge the memory (reset or decommit)
|
||||||
size_t segment_size;
|
size_t segment_size;
|
||||||
|
|
||||||
// segment fields
|
// segment fields
|
||||||
mi_msecs_t purge_expire;
|
mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
|
||||||
mi_commit_mask_t purge_mask;
|
mi_commit_mask_t purge_mask; // slices that can be purged
|
||||||
mi_commit_mask_t commit_mask;
|
mi_commit_mask_t commit_mask; // slices that are currently committed
|
||||||
|
|
||||||
// from here is zero initialized
|
// from here is zero initialized
|
||||||
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
||||||
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
|
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
|
||||||
|
|
||||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||||
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
|
size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
|
||||||
size_t used; // count of pages in use
|
size_t used; // count of pages in use
|
||||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||||
|
|
||||||
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
||||||
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
|
size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages.
|
||||||
|
|
||||||
// layout like this to optimize access in `mi_free`
|
// layout like this to optimize access in `mi_free`
|
||||||
mi_segment_kind_t kind;
|
mi_segment_kind_t kind;
|
||||||
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
||||||
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
||||||
|
|
||||||
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
|
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment
|
||||||
} mi_segment_t;
|
} mi_segment_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -498,8 +512,6 @@ typedef struct mi_padding_s {
|
||||||
// A heap owns a set of pages.
|
// A heap owns a set of pages.
|
||||||
struct mi_heap_s {
|
struct mi_heap_s {
|
||||||
mi_tld_t* tld;
|
mi_tld_t* tld;
|
||||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
|
||||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
|
||||||
_Atomic(mi_block_t*) thread_delayed_free;
|
_Atomic(mi_block_t*) thread_delayed_free;
|
||||||
mi_threadid_t thread_id; // thread this heap belongs too
|
mi_threadid_t thread_id; // thread this heap belongs too
|
||||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||||
|
@ -511,6 +523,8 @@ struct mi_heap_s {
|
||||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||||
mi_heap_t* next; // list of heaps per thread
|
mi_heap_t* next; // list of heaps per thread
|
||||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||||
|
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||||
|
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -626,6 +640,7 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||||
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
||||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Thread Local data
|
// Thread Local data
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
|
|
@ -33,7 +33,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||||
|
|
||||||
void* p;
|
void* p;
|
||||||
size_t oversize;
|
size_t oversize;
|
||||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||||
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
||||||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||||
|
@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||||
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
|
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
|
||||||
// zero afterwards as only the area from the aligned_p may be committed!
|
// zero afterwards as only the area from the aligned_p may be committed!
|
||||||
if (p == NULL) return NULL;
|
if (p == NULL) return NULL;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// otherwise over-allocate
|
// otherwise over-allocate
|
||||||
|
@ -69,13 +69,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||||
// todo: expand padding if overallocated ?
|
// todo: expand padding if overallocated ?
|
||||||
|
|
||||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
|
||||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||||
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
||||||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||||
|
|
||||||
// now zero the block if needed
|
// now zero the block if needed
|
||||||
if (alignment > MI_ALIGNMENT_MAX) {
|
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||||
mi_track_mem_undefined(aligned_p, size);
|
mi_track_mem_undefined(aligned_p, size);
|
||||||
if (zero) {
|
if (zero) {
|
||||||
|
@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||||
|
|
||||||
if (p != aligned_p) {
|
if (p != aligned_p) {
|
||||||
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
|
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
|
||||||
}
|
}
|
||||||
return aligned_p;
|
return aligned_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
554
src/alloc.c
554
src/alloc.c
|
@ -1,5 +1,5 @@
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -18,6 +18,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
|
||||||
#define MI_IN_ALLOC_C
|
#define MI_IN_ALLOC_C
|
||||||
#include "alloc-override.c"
|
#include "alloc-override.c"
|
||||||
|
#include "free.c"
|
||||||
#undef MI_IN_ALLOC_C
|
#undef MI_IN_ALLOC_C
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
@ -26,16 +27,18 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
|
||||||
// Fast allocation in a page: just pop from the free list.
|
// Fast allocation in a page: just pop from the free list.
|
||||||
// Fall back to generic allocation only if the list is empty.
|
// Fall back to generic allocation only if the list is empty.
|
||||||
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
|
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
|
||||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||||
|
{
|
||||||
|
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||||
mi_block_t* const block = page->free;
|
mi_block_t* const block = page->free;
|
||||||
if mi_unlikely(block == NULL) {
|
if mi_unlikely(block == NULL) {
|
||||||
return _mi_malloc_generic(heap, size, zero, 0);
|
return _mi_malloc_generic(heap, size, zero, 0);
|
||||||
}
|
}
|
||||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||||
// pop from the free list
|
// pop from the free list
|
||||||
page->used++;
|
|
||||||
page->free = mi_block_next(page, block);
|
page->free = mi_block_next(page, block);
|
||||||
|
page->used++;
|
||||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||||
#if MI_DEBUG>3
|
#if MI_DEBUG>3
|
||||||
if (page->free_is_zero) {
|
if (page->free_is_zero) {
|
||||||
|
@ -50,54 +53,54 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||||
|
|
||||||
// zero the block? note: we need to zero the full block size (issue #63)
|
// zero the block? note: we need to zero the full block size (issue #63)
|
||||||
if mi_unlikely(zero) {
|
if mi_unlikely(zero) {
|
||||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||||
mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
|
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
|
||||||
if (page->free_is_zero) {
|
if (page->free_is_zero) {
|
||||||
block->next = 0;
|
block->next = 0;
|
||||||
mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
|
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
|
_mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||||
if (!zero && !mi_page_is_huge(page)) {
|
if (!zero && !mi_page_is_huge(page)) {
|
||||||
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
|
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
|
||||||
}
|
}
|
||||||
#elif (MI_SECURE!=0)
|
#elif (MI_SECURE!=0)
|
||||||
if (!zero) { block->next = 0; } // don't leak internal data
|
if (!zero) { block->next = 0; } // don't leak internal data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (MI_STAT>0)
|
#if (MI_STAT>0)
|
||||||
const size_t bsize = mi_page_usable_block_size(page);
|
const size_t bsize = mi_page_usable_block_size(page);
|
||||||
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||||
mi_heap_stat_increase(heap, normal, bsize);
|
mi_heap_stat_increase(heap, normal, bsize);
|
||||||
mi_heap_stat_counter_increase(heap, normal_count, 1);
|
mi_heap_stat_counter_increase(heap, normal_count, 1);
|
||||||
#if (MI_STAT>1)
|
#if (MI_STAT>1)
|
||||||
const size_t bin = _mi_bin(bsize);
|
const size_t bin = _mi_bin(bsize);
|
||||||
mi_heap_stat_increase(heap, normal_bins[bin], 1);
|
mi_heap_stat_increase(heap, normal_bins[bin], 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if MI_PADDING // && !MI_TRACK_ENABLED
|
#if MI_PADDING // && !MI_TRACK_ENABLED
|
||||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
||||||
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
|
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
|
||||||
#if (MI_DEBUG>=2)
|
#if (MI_DEBUG>=2)
|
||||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
||||||
#endif
|
#endif
|
||||||
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
|
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
|
||||||
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
||||||
padding->delta = (uint32_t)(delta);
|
padding->delta = (uint32_t)(delta);
|
||||||
#if MI_PADDING_CHECK
|
#if MI_PADDING_CHECK
|
||||||
if (!mi_page_is_huge(page)) {
|
if (!mi_page_is_huge(page)) {
|
||||||
uint8_t* fill = (uint8_t*)padding - delta;
|
uint8_t* fill = (uint8_t*)padding - delta;
|
||||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
|
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
|
||||||
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
|
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
@ -112,9 +115,11 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
||||||
#if (MI_PADDING)
|
#if (MI_PADDING)
|
||||||
if (size == 0) { size = sizeof(void*); }
|
if (size == 0) { size = sizeof(void*); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||||
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||||
mi_track_malloc(p,size,zero);
|
mi_track_malloc(p,size,zero);
|
||||||
|
|
||||||
#if MI_STAT>1
|
#if MI_STAT>1
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||||
|
@ -190,500 +195,6 @@ mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
|
||||||
// Check for double free in secure and debug mode
|
|
||||||
// This is somewhat expensive so only enabled for secure mode 4
|
|
||||||
// ------------------------------------------------------
|
|
||||||
|
|
||||||
#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
|
|
||||||
// linear check if the free list contains a specific element
|
|
||||||
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
|
|
||||||
while (list != NULL) {
|
|
||||||
if (elem==list) return true;
|
|
||||||
list = mi_block_next(page, list);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
// The decoded value is in the same page (or NULL).
|
|
||||||
// Walk the free lists to verify positively if it is already freed
|
|
||||||
if (mi_list_contains(page, page->free, block) ||
|
|
||||||
mi_list_contains(page, page->local_free, block) ||
|
|
||||||
mi_list_contains(page, mi_page_thread_free(page), block))
|
|
||||||
{
|
|
||||||
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
|
|
||||||
|
|
||||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
bool is_double_free = false;
|
|
||||||
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
|
|
||||||
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
|
||||||
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
|
||||||
{
|
|
||||||
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
|
||||||
// (continue in separate function to improve code generation)
|
|
||||||
is_double_free = mi_check_is_double_freex(page, block);
|
|
||||||
}
|
|
||||||
return is_double_free;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
MI_UNUSED(page);
|
|
||||||
MI_UNUSED(block);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Check for heap block overflow by setting up padding at the end of the block
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#if MI_PADDING // && !MI_TRACK_ENABLED
|
|
||||||
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
|
||||||
*bsize = mi_page_usable_block_size(page);
|
|
||||||
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
|
||||||
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
|
||||||
*delta = padding->delta;
|
|
||||||
uint32_t canary = padding->canary;
|
|
||||||
uintptr_t keys[2];
|
|
||||||
keys[0] = page->keys[0];
|
|
||||||
keys[1] = page->keys[1];
|
|
||||||
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
|
|
||||||
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
|
||||||
return ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the exact usable size of a block.
|
|
||||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
size_t bsize;
|
|
||||||
size_t delta;
|
|
||||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
|
||||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
|
||||||
return (ok ? bsize - delta : 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
|
||||||
// list that is freed later by the owning heap. If the exact usable size is too small to
|
|
||||||
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
|
||||||
// so it will later not trigger an overflow error in `mi_free_block`.
|
|
||||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
|
||||||
size_t bsize;
|
|
||||||
size_t delta;
|
|
||||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
|
||||||
mi_assert_internal(ok);
|
|
||||||
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
|
|
||||||
mi_assert_internal(bsize >= min_size);
|
|
||||||
if (bsize < min_size) return; // should never happen
|
|
||||||
size_t new_delta = (bsize - min_size);
|
|
||||||
mi_assert_internal(new_delta < bsize);
|
|
||||||
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
|
||||||
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
|
||||||
padding->delta = (uint32_t)new_delta;
|
|
||||||
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
MI_UNUSED(block);
|
|
||||||
return mi_page_usable_block_size(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
|
||||||
MI_UNUSED(page);
|
|
||||||
MI_UNUSED(block);
|
|
||||||
MI_UNUSED(min_size);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if MI_PADDING && MI_PADDING_CHECK
|
|
||||||
|
|
||||||
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
|
||||||
size_t bsize;
|
|
||||||
size_t delta;
|
|
||||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
|
||||||
*size = *wrong = bsize;
|
|
||||||
if (!ok) return false;
|
|
||||||
mi_assert_internal(bsize >= delta);
|
|
||||||
*size = bsize - delta;
|
|
||||||
if (!mi_page_is_huge(page)) {
|
|
||||||
uint8_t* fill = (uint8_t*)block + bsize - delta;
|
|
||||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
|
|
||||||
mi_track_mem_defined(fill, maxpad);
|
|
||||||
for (size_t i = 0; i < maxpad; i++) {
|
|
||||||
if (fill[i] != MI_DEBUG_PADDING) {
|
|
||||||
*wrong = bsize - delta + i;
|
|
||||||
ok = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mi_track_mem_noaccess(fill, maxpad);
|
|
||||||
}
|
|
||||||
return ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
size_t size;
|
|
||||||
size_t wrong;
|
|
||||||
if (!mi_verify_padding(page,block,&size,&wrong)) {
|
|
||||||
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
MI_UNUSED(page);
|
|
||||||
MI_UNUSED(block);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// only maintain stats for smaller objects if requested
|
|
||||||
#if (MI_STAT>0)
|
|
||||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
#if (MI_STAT < 2)
|
|
||||||
MI_UNUSED(block);
|
|
||||||
#endif
|
|
||||||
mi_heap_t* const heap = mi_heap_get_default();
|
|
||||||
const size_t bsize = mi_page_usable_block_size(page);
|
|
||||||
#if (MI_STAT>1)
|
|
||||||
const size_t usize = mi_page_usable_size_of(page, block);
|
|
||||||
mi_heap_stat_decrease(heap, malloc, usize);
|
|
||||||
#endif
|
|
||||||
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
|
||||||
mi_heap_stat_decrease(heap, normal, bsize);
|
|
||||||
#if (MI_STAT > 1)
|
|
||||||
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
|
||||||
mi_heap_stat_decrease(heap, large, bsize);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
mi_heap_stat_decrease(heap, huge, bsize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|
||||||
MI_UNUSED(page); MI_UNUSED(block);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
|
||||||
#if (MI_STAT>0)
|
|
||||||
// maintain stats for huge objects
|
|
||||||
static void mi_stat_huge_free(const mi_page_t* page) {
|
|
||||||
mi_heap_t* const heap = mi_heap_get_default();
|
|
||||||
const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
|
|
||||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
|
||||||
mi_heap_stat_decrease(heap, large, bsize);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
mi_heap_stat_decrease(heap, huge, bsize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static void mi_stat_huge_free(const mi_page_t* page) {
|
|
||||||
MI_UNUSED(page);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
|
||||||
// Free
|
|
||||||
// ------------------------------------------------------
|
|
||||||
|
|
||||||
// multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
|
||||||
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
|
||||||
{
|
|
||||||
// first see if the segment was abandoned and we can reclaim it
|
|
||||||
mi_segment_t* const segment = _mi_page_segment(page);
|
|
||||||
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
|
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
|
||||||
segment->page_kind != MI_PAGE_HUGE &&
|
|
||||||
#endif
|
|
||||||
mi_atomic_load_relaxed(&segment->thread_id) == 0)
|
|
||||||
{
|
|
||||||
// the segment is abandoned, try to reclaim it into our heap
|
|
||||||
mi_heap_t* heap = mi_heap_get_default();
|
|
||||||
if (heap->tld != NULL && _mi_segment_attempt_reclaim(heap, segment)) {
|
|
||||||
mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
|
|
||||||
mi_free(block); // recursively free as now it will be a local free in our heap
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The padding check may access the non-thread-owned page for the key values.
|
|
||||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
|
||||||
mi_check_padding(page, block);
|
|
||||||
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
|
||||||
|
|
||||||
// huge page segments are always abandoned and can be freed immediately
|
|
||||||
if (segment->kind == MI_SEGMENT_HUGE) {
|
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
|
||||||
// huge page segments are always abandoned and can be freed immediately
|
|
||||||
mi_stat_huge_free(page);
|
|
||||||
_mi_segment_huge_page_free(segment, page, block);
|
|
||||||
return;
|
|
||||||
#else
|
|
||||||
// huge pages are special as they occupy the entire segment
|
|
||||||
// as these are large we reset the memory occupied by the page so it is available to other threads
|
|
||||||
// (as the owning thread needs to actually free the memory later).
|
|
||||||
_mi_segment_huge_page_reset(segment, page, block);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
|
|
||||||
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
|
|
||||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
|
|
||||||
mi_thread_free_t tfreex;
|
|
||||||
bool use_delayed;
|
|
||||||
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
|
||||||
do {
|
|
||||||
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
|
|
||||||
if mi_unlikely(use_delayed) {
|
|
||||||
// unlikely: this only happens on the first concurrent free in a page that is in the full list
|
|
||||||
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// usual: directly add to page thread_free list
|
|
||||||
mi_block_set_next(page, block, mi_tf_block(tfree));
|
|
||||||
tfreex = mi_tf_set_block(tfree,block);
|
|
||||||
}
|
|
||||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
|
||||||
|
|
||||||
if mi_unlikely(use_delayed) {
|
|
||||||
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
|
|
||||||
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
|
|
||||||
mi_assert_internal(heap != NULL);
|
|
||||||
if (heap != NULL) {
|
|
||||||
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
|
|
||||||
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
|
||||||
do {
|
|
||||||
mi_block_set_nextx(heap,block,dfree, heap->keys);
|
|
||||||
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
|
|
||||||
}
|
|
||||||
|
|
||||||
// and reset the MI_DELAYED_FREEING flag
|
|
||||||
tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
|
||||||
do {
|
|
||||||
tfreex = tfree;
|
|
||||||
mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
|
|
||||||
tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
|
|
||||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// regular free
|
|
||||||
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
|
|
||||||
{
|
|
||||||
// and push it on the free list
|
|
||||||
//const size_t bsize = mi_page_block_size(page);
|
|
||||||
if mi_likely(local) {
|
|
||||||
// owning thread can free a block directly
|
|
||||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
|
||||||
mi_check_padding(page, block);
|
|
||||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
|
||||||
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
|
|
||||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
mi_block_set_next(page, block, page->local_free);
|
|
||||||
page->local_free = block;
|
|
||||||
page->used--;
|
|
||||||
if mi_unlikely(mi_page_all_free(page)) {
|
|
||||||
_mi_page_retire(page);
|
|
||||||
}
|
|
||||||
else if mi_unlikely(mi_page_is_in_full(page)) {
|
|
||||||
_mi_page_unfull(page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
_mi_free_block_mt(page,block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
|
||||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
|
|
||||||
mi_assert_internal(page!=NULL && p!=NULL);
|
|
||||||
const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
|
|
||||||
const size_t adjust = (diff % mi_page_block_size(page));
|
|
||||||
return (mi_block_t*)((uintptr_t)p - adjust);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
|
|
||||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
|
||||||
mi_stat_free(page, block); // stat_free may access the padding
|
|
||||||
mi_track_free_size(block, mi_page_usable_size_of(page,block));
|
|
||||||
_mi_free_block(page, is_local, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the segment data belonging to a pointer
|
|
||||||
// This is just a single `and` in assembly but does further checks in debug mode
|
|
||||||
// (and secure mode) if this was a valid pointer.
|
|
||||||
static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
|
|
||||||
{
|
|
||||||
MI_UNUSED(msg);
|
|
||||||
mi_assert(p != NULL);
|
|
||||||
|
|
||||||
#if (MI_DEBUG>0)
|
|
||||||
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
|
|
||||||
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
mi_segment_t* const segment = _mi_ptr_segment(p);
|
|
||||||
mi_assert_internal(segment != NULL);
|
|
||||||
|
|
||||||
#if (MI_DEBUG>0)
|
|
||||||
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
|
||||||
#if (MI_INTPTR_SIZE == 8 && defined(__linux__))
|
|
||||||
if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
|
|
||||||
#else
|
|
||||||
{
|
|
||||||
#endif
|
|
||||||
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
|
|
||||||
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
|
|
||||||
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
|
|
||||||
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if (MI_DEBUG>0 || MI_SECURE>=4)
|
|
||||||
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
|
|
||||||
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return segment;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Free a block
|
|
||||||
// fast path written carefully to prevent spilling on the stack
|
|
||||||
void mi_free(void* p) mi_attr_noexcept
|
|
||||||
{
|
|
||||||
if mi_unlikely(p == NULL) return;
|
|
||||||
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
|
|
||||||
const bool is_local= (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
|
|
||||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
|
||||||
|
|
||||||
if mi_likely(is_local) { // thread-local free?
|
|
||||||
if mi_likely(page->flags.full_aligned == 0) // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
|
|
||||||
{
|
|
||||||
mi_block_t* const block = (mi_block_t*)p;
|
|
||||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
|
||||||
mi_check_padding(page, block);
|
|
||||||
mi_stat_free(page, block);
|
|
||||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
|
||||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
|
||||||
#endif
|
|
||||||
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
|
|
||||||
mi_block_set_next(page, block, page->local_free);
|
|
||||||
page->local_free = block;
|
|
||||||
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
|
|
||||||
_mi_page_retire(page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// page is full or contains (inner) aligned blocks; use generic path
|
|
||||||
_mi_free_generic(segment, page, true, p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// not thread-local; use generic path
|
|
||||||
_mi_free_generic(segment, page, false, p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// return true if successful
|
|
||||||
bool _mi_free_delayed_block(mi_block_t* block) {
|
|
||||||
// get segment and page
|
|
||||||
const mi_segment_t* const segment = _mi_ptr_segment(block);
|
|
||||||
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
|
|
||||||
mi_assert_internal(_mi_thread_id() == segment->thread_id);
|
|
||||||
mi_page_t* const page = _mi_segment_page_of(segment, block);
|
|
||||||
|
|
||||||
// Clear the no-delayed flag so delayed freeing is used again for this page.
|
|
||||||
// This must be done before collecting the free lists on this page -- otherwise
|
|
||||||
// some blocks may end up in the page `thread_free` list with no blocks in the
|
|
||||||
// heap `thread_delayed_free` list which may cause the page to be never freed!
|
|
||||||
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
|
|
||||||
if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect all other non-local frees to ensure up-to-date `used` count
|
|
||||||
_mi_page_free_collect(page, false);
|
|
||||||
|
|
||||||
// and free the block (possibly freeing the page as well since used is updated)
|
|
||||||
_mi_free_block(page, true, block);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes available in a block
|
|
||||||
mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept {
|
|
||||||
const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p);
|
|
||||||
const size_t size = mi_page_usable_size_of(page, block);
|
|
||||||
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
|
|
||||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
|
||||||
return (size - adjust);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
|
|
||||||
if (p == NULL) return 0;
|
|
||||||
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
|
|
||||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
|
||||||
if mi_likely(!mi_page_has_aligned(page)) {
|
|
||||||
const mi_block_t* block = (const mi_block_t*)p;
|
|
||||||
return mi_page_usable_size_of(page, block);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// split out to separate routine for improved code generation
|
|
||||||
return mi_page_usable_aligned_size_of(segment, page, p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
|
||||||
return _mi_usable_size(p, "mi_usable_size");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
|
||||||
// Allocation extensions
|
|
||||||
// ------------------------------------------------------
|
|
||||||
|
|
||||||
void mi_free_size(void* p, size_t size) mi_attr_noexcept {
|
|
||||||
MI_UNUSED_RELEASE(size);
|
|
||||||
mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
|
|
||||||
mi_free(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
|
|
||||||
MI_UNUSED_RELEASE(alignment);
|
|
||||||
mi_assert(((uintptr_t)p % alignment) == 0);
|
|
||||||
mi_free_size(p,size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
|
|
||||||
MI_UNUSED_RELEASE(alignment);
|
|
||||||
mi_assert(((uintptr_t)p % alignment) == 0);
|
|
||||||
mi_free(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
|
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
|
||||||
size_t total;
|
size_t total;
|
||||||
if (mi_count_size_overflow(count,size,&total)) return NULL;
|
if (mi_count_size_overflow(count,size,&total)) return NULL;
|
||||||
|
@ -885,7 +396,8 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
|
||||||
char* rname = realpath(fname, NULL);
|
char* rname = realpath(fname, NULL);
|
||||||
if (rname == NULL) return NULL;
|
if (rname == NULL) return NULL;
|
||||||
char* result = mi_heap_strdup(heap, rname);
|
char* result = mi_heap_strdup(heap, rname);
|
||||||
free(rname); // use regular free! (which may be redirected to our free but that's ok)
|
mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok)
|
||||||
|
// note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
|
530
src/free.c
Normal file
530
src/free.c
Normal file
|
@ -0,0 +1,530 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
"LICENSE" at the root of this distribution.
|
||||||
|
-----------------------------------------------------------------------------*/
|
||||||
|
#if !defined(MI_IN_ALLOC_C)
|
||||||
|
#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"
|
||||||
|
// add includes help an IDE
|
||||||
|
#include "mimalloc.h"
|
||||||
|
#include "mimalloc/internal.h"
|
||||||
|
#include "mimalloc/atomic.h"
|
||||||
|
#include "mimalloc/prim.h" // _mi_prim_thread_id()
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// forward declarations
|
||||||
|
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block);
|
||||||
|
static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block);
|
||||||
|
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block);
|
||||||
|
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
|
||||||
|
|
||||||
|
|
||||||
|
// ------------------------------------------------------
|
||||||
|
// Free
|
||||||
|
// ------------------------------------------------------
|
||||||
|
|
||||||
|
// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||||
|
static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);
|
||||||
|
|
||||||
|
// regular free of a (thread local) block pointer
|
||||||
|
// fast path written carefully to prevent spilling on the stack
|
||||||
|
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
|
||||||
|
{
|
||||||
|
// checks
|
||||||
|
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||||
|
mi_check_padding(page, block);
|
||||||
|
if (track_stats) { mi_stat_free(page, block); }
|
||||||
|
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||||
|
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
|
||||||
|
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
|
||||||
|
|
||||||
|
// actual free: push on the local free list
|
||||||
|
mi_block_set_next(page, block, page->local_free);
|
||||||
|
page->local_free = block;
|
||||||
|
if mi_unlikely(--page->used == 0) {
|
||||||
|
_mi_page_retire(page);
|
||||||
|
}
|
||||||
|
else if mi_unlikely(check_full && mi_page_is_in_full(page)) {
|
||||||
|
_mi_page_unfull(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
||||||
|
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
|
||||||
|
// `page_start` and `block_size` fields; however these are constant and the page won't be
|
||||||
|
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
|
||||||
|
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
|
||||||
|
mi_assert_internal(page!=NULL && p!=NULL);
|
||||||
|
|
||||||
|
size_t diff = (uint8_t*)p - page->page_start;
|
||||||
|
size_t adjust;
|
||||||
|
if mi_likely(page->block_size_shift != 0) {
|
||||||
|
adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
adjust = diff % mi_page_block_size(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (mi_block_t*)((uintptr_t)p - adjust);
|
||||||
|
}
|
||||||
|
|
||||||
|
// free a local pointer (page parameter comes first for better codegen)
|
||||||
|
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||||
|
MI_UNUSED(segment);
|
||||||
|
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
|
||||||
|
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
|
||||||
|
}
|
||||||
|
|
||||||
|
// free a pointer owned by another thread (page parameter comes first for better codegen)
|
||||||
|
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||||
|
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
||||||
|
mi_free_block_mt(page, segment, block);
|
||||||
|
}
|
||||||
|
|
||||||
|
// generic free (for runtime integration)
|
||||||
|
void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
|
||||||
|
if (is_local) mi_free_generic_local(page,segment,p);
|
||||||
|
else mi_free_generic_mt(page,segment,p);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the segment data belonging to a pointer
|
||||||
|
// This is just a single `and` in release mode but does further checks in debug mode
|
||||||
|
// (and secure mode) to see if this was a valid pointer.
|
||||||
|
static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
|
||||||
|
{
|
||||||
|
MI_UNUSED(msg);
|
||||||
|
|
||||||
|
#if (MI_DEBUG>0)
|
||||||
|
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
|
||||||
|
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
mi_segment_t* const segment = _mi_ptr_segment(p);
|
||||||
|
if mi_unlikely(segment==NULL) return segment;
|
||||||
|
|
||||||
|
#if (MI_DEBUG>0)
|
||||||
|
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
||||||
|
#if (MI_INTPTR_SIZE == 8 && defined(__linux__))
|
||||||
|
if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
|
||||||
|
#else
|
||||||
|
{
|
||||||
|
#endif
|
||||||
|
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
|
||||||
|
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
|
||||||
|
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
|
||||||
|
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#if (MI_DEBUG>0 || MI_SECURE>=4)
|
||||||
|
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
|
||||||
|
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return segment;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free a block
|
||||||
|
// Fast path written carefully to prevent register spilling on the stack
|
||||||
|
void mi_free(void* p) mi_attr_noexcept
|
||||||
|
{
|
||||||
|
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
|
||||||
|
if mi_unlikely(segment==NULL) return;
|
||||||
|
|
||||||
|
const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
|
||||||
|
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||||
|
|
||||||
|
if mi_likely(is_local) { // thread-local free?
|
||||||
|
if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
|
||||||
|
// thread-local, aligned, and not a full page
|
||||||
|
mi_block_t* const block = (mi_block_t*)p;
|
||||||
|
mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// page is full or contains (inner) aligned blocks; use generic path
|
||||||
|
mi_free_generic_local(page, segment, p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// not thread-local; use generic path
|
||||||
|
mi_free_generic_mt(page, segment, p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// return true if successful
|
||||||
|
bool _mi_free_delayed_block(mi_block_t* block) {
|
||||||
|
// get segment and page
|
||||||
|
mi_assert_internal(block!=NULL);
|
||||||
|
const mi_segment_t* const segment = _mi_ptr_segment(block);
|
||||||
|
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
|
||||||
|
mi_assert_internal(_mi_thread_id() == segment->thread_id);
|
||||||
|
mi_page_t* const page = _mi_segment_page_of(segment, block);
|
||||||
|
|
||||||
|
// Clear the no-delayed flag so delayed freeing is used again for this page.
|
||||||
|
// This must be done before collecting the free lists on this page -- otherwise
|
||||||
|
// some blocks may end up in the page `thread_free` list with no blocks in the
|
||||||
|
// heap `thread_delayed_free` list which may cause the page to be never freed!
|
||||||
|
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
|
||||||
|
if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count
|
||||||
|
_mi_page_free_collect(page, false);
|
||||||
|
|
||||||
|
// and free the block (possibly freeing the page as well since `used` is updated)
|
||||||
|
mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------
|
||||||
|
// Multi-threaded Free (`_mt`)
|
||||||
|
// ------------------------------------------------------
|
||||||
|
|
||||||
|
// Push a block that is owned by another thread on its page-local thread free
|
||||||
|
// list or it's heap delayed free list. Such blocks are later collected by
|
||||||
|
// the owning thread in `_mi_free_delayed_block`.
|
||||||
|
static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )
|
||||||
|
{
|
||||||
|
// Try to put the block on either the page-local thread free list,
|
||||||
|
// or the heap delayed free list (if this is the first non-local free in that page)
|
||||||
|
mi_thread_free_t tfreex;
|
||||||
|
bool use_delayed;
|
||||||
|
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
||||||
|
do {
|
||||||
|
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
|
||||||
|
if mi_unlikely(use_delayed) {
|
||||||
|
// unlikely: this only happens on the first concurrent free in a page that is in the full list
|
||||||
|
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// usual: directly add to page thread_free list
|
||||||
|
mi_block_set_next(page, block, mi_tf_block(tfree));
|
||||||
|
tfreex = mi_tf_set_block(tfree,block);
|
||||||
|
}
|
||||||
|
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
||||||
|
|
||||||
|
// If this was the first non-local free, we need to push it on the heap delayed free list instead
|
||||||
|
if mi_unlikely(use_delayed) {
|
||||||
|
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
|
||||||
|
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
|
||||||
|
mi_assert_internal(heap != NULL);
|
||||||
|
if (heap != NULL) {
|
||||||
|
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
|
||||||
|
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
|
||||||
|
do {
|
||||||
|
mi_block_set_nextx(heap,block,dfree, heap->keys);
|
||||||
|
} while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block));
|
||||||
|
}
|
||||||
|
|
||||||
|
// and reset the MI_DELAYED_FREEING flag
|
||||||
|
tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
||||||
|
do {
|
||||||
|
tfreex = tfree;
|
||||||
|
mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING);
|
||||||
|
tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE);
|
||||||
|
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||||
|
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)
|
||||||
|
{
|
||||||
|
// first see if the segment was abandoned and if we can reclaim it into our thread
|
||||||
|
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
segment->page_kind != MI_PAGE_HUGE &&
|
||||||
|
#endif
|
||||||
|
mi_atomic_load_relaxed(&segment->thread_id) == 0)
|
||||||
|
{
|
||||||
|
// the segment is abandoned, try to reclaim it into our heap
|
||||||
|
if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {
|
||||||
|
mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
|
||||||
|
mi_free(block); // recursively free as now it will be a local free in our heap
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The padding check may access the non-thread-owned page for the key values.
|
||||||
|
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||||
|
mi_check_padding(page, block);
|
||||||
|
|
||||||
|
// adjust stats (after padding check and potentially recursive `mi_free` above)
|
||||||
|
mi_stat_free(page, block); // stat_free may access the padding
|
||||||
|
mi_track_free_size(block, mi_page_usable_size_of(page,block));
|
||||||
|
|
||||||
|
// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||||
|
_mi_padding_shrink(page, block, sizeof(mi_block_t));
|
||||||
|
|
||||||
|
if (segment->kind == MI_SEGMENT_HUGE) {
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
// huge page segments are always abandoned and can be freed immediately
|
||||||
|
_mi_segment_huge_page_free(segment, page, block);
|
||||||
|
return;
|
||||||
|
#else
|
||||||
|
// huge pages are special as they occupy the entire segment
|
||||||
|
// as these are large we reset the memory occupied by the page so it is available to other threads
|
||||||
|
// (as the owning thread needs to actually free the memory later).
|
||||||
|
_mi_segment_huge_page_reset(segment, page, block);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||||
|
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// and finally free the actual block by pushing it on the owning heap
|
||||||
|
// thread_delayed free list (or heap delayed free list)
|
||||||
|
mi_free_block_delayed_mt(page,block);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ------------------------------------------------------
|
||||||
|
// Usable size
|
||||||
|
// ------------------------------------------------------
|
||||||
|
|
||||||
|
// Bytes available in a block
|
||||||
|
static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {
|
||||||
|
const mi_block_t* block = _mi_page_ptr_unalign(page, p);
|
||||||
|
const size_t size = mi_page_usable_size_of(page, block);
|
||||||
|
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
|
||||||
|
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||||
|
return (size - adjust);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
|
||||||
|
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
|
||||||
|
if mi_unlikely(segment==NULL) return 0;
|
||||||
|
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||||
|
if mi_likely(!mi_page_has_aligned(page)) {
|
||||||
|
const mi_block_t* block = (const mi_block_t*)p;
|
||||||
|
return mi_page_usable_size_of(page, block);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// split out to separate routine for improved code generation
|
||||||
|
return mi_page_usable_aligned_size_of(page, p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
||||||
|
return _mi_usable_size(p, "mi_usable_size");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ------------------------------------------------------
|
||||||
|
// Free variants
|
||||||
|
// ------------------------------------------------------
|
||||||
|
|
||||||
|
void mi_free_size(void* p, size_t size) mi_attr_noexcept {
|
||||||
|
MI_UNUSED_RELEASE(size);
|
||||||
|
mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
|
||||||
|
mi_free(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
|
||||||
|
MI_UNUSED_RELEASE(alignment);
|
||||||
|
mi_assert(((uintptr_t)p % alignment) == 0);
|
||||||
|
mi_free_size(p,size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
|
||||||
|
MI_UNUSED_RELEASE(alignment);
|
||||||
|
mi_assert(((uintptr_t)p % alignment) == 0);
|
||||||
|
mi_free(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ------------------------------------------------------
|
||||||
|
// Check for double free in secure and debug mode
|
||||||
|
// This is somewhat expensive so only enabled for secure mode 4
|
||||||
|
// ------------------------------------------------------
|
||||||
|
|
||||||
|
#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
|
||||||
|
// linear check if the free list contains a specific element
|
||||||
|
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
|
||||||
|
while (list != NULL) {
|
||||||
|
if (elem==list) return true;
|
||||||
|
list = mi_block_next(page, list);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
// The decoded value is in the same page (or NULL).
|
||||||
|
// Walk the free lists to verify positively if it is already freed
|
||||||
|
if (mi_list_contains(page, page->free, block) ||
|
||||||
|
mi_list_contains(page, page->local_free, block) ||
|
||||||
|
mi_list_contains(page, mi_page_thread_free(page), block))
|
||||||
|
{
|
||||||
|
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
|
||||||
|
|
||||||
|
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
bool is_double_free = false;
|
||||||
|
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
|
||||||
|
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
||||||
|
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
||||||
|
{
|
||||||
|
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
||||||
|
// (continue in separate function to improve code generation)
|
||||||
|
is_double_free = mi_check_is_double_freex(page, block);
|
||||||
|
}
|
||||||
|
return is_double_free;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
MI_UNUSED(page);
|
||||||
|
MI_UNUSED(block);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Check for heap block overflow by setting up padding at the end of the block
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#if MI_PADDING // && !MI_TRACK_ENABLED
|
||||||
|
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||||
|
*bsize = mi_page_usable_block_size(page);
|
||||||
|
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||||
|
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
||||||
|
*delta = padding->delta;
|
||||||
|
uint32_t canary = padding->canary;
|
||||||
|
uintptr_t keys[2];
|
||||||
|
keys[0] = page->keys[0];
|
||||||
|
keys[1] = page->keys[1];
|
||||||
|
bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
|
||||||
|
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the exact usable size of a block.
|
||||||
|
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
size_t bsize;
|
||||||
|
size_t delta;
|
||||||
|
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||||
|
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||||
|
return (ok ? bsize - delta : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
||||||
|
// list that is freed later by the owning heap. If the exact usable size is too small to
|
||||||
|
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
||||||
|
// so it will later not trigger an overflow error in `mi_free_block`.
|
||||||
|
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||||
|
size_t bsize;
|
||||||
|
size_t delta;
|
||||||
|
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||||
|
mi_assert_internal(ok);
|
||||||
|
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
|
||||||
|
mi_assert_internal(bsize >= min_size);
|
||||||
|
if (bsize < min_size) return; // should never happen
|
||||||
|
size_t new_delta = (bsize - min_size);
|
||||||
|
mi_assert_internal(new_delta < bsize);
|
||||||
|
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
||||||
|
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
||||||
|
padding->delta = (uint32_t)new_delta;
|
||||||
|
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
MI_UNUSED(block);
|
||||||
|
return mi_page_usable_block_size(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||||
|
MI_UNUSED(page);
|
||||||
|
MI_UNUSED(block);
|
||||||
|
MI_UNUSED(min_size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if MI_PADDING && MI_PADDING_CHECK
|
||||||
|
|
||||||
|
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
||||||
|
size_t bsize;
|
||||||
|
size_t delta;
|
||||||
|
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||||
|
*size = *wrong = bsize;
|
||||||
|
if (!ok) return false;
|
||||||
|
mi_assert_internal(bsize >= delta);
|
||||||
|
*size = bsize - delta;
|
||||||
|
if (!mi_page_is_huge(page)) {
|
||||||
|
uint8_t* fill = (uint8_t*)block + bsize - delta;
|
||||||
|
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
|
||||||
|
mi_track_mem_defined(fill, maxpad);
|
||||||
|
for (size_t i = 0; i < maxpad; i++) {
|
||||||
|
if (fill[i] != MI_DEBUG_PADDING) {
|
||||||
|
*wrong = bsize - delta + i;
|
||||||
|
ok = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mi_track_mem_noaccess(fill, maxpad);
|
||||||
|
}
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
size_t size;
|
||||||
|
size_t wrong;
|
||||||
|
if (!mi_verify_padding(page,block,&size,&wrong)) {
|
||||||
|
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
MI_UNUSED(page);
|
||||||
|
MI_UNUSED(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// only maintain stats for smaller objects if requested
|
||||||
|
#if (MI_STAT>0)
|
||||||
|
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
#if (MI_STAT < 2)
|
||||||
|
MI_UNUSED(block);
|
||||||
|
#endif
|
||||||
|
mi_heap_t* const heap = mi_heap_get_default();
|
||||||
|
const size_t bsize = mi_page_usable_block_size(page);
|
||||||
|
#if (MI_STAT>1)
|
||||||
|
const size_t usize = mi_page_usable_size_of(page, block);
|
||||||
|
mi_heap_stat_decrease(heap, malloc, usize);
|
||||||
|
#endif
|
||||||
|
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||||
|
mi_heap_stat_decrease(heap, normal, bsize);
|
||||||
|
#if (MI_STAT > 1)
|
||||||
|
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||||
|
mi_heap_stat_decrease(heap, large, bsize);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_heap_stat_decrease(heap, huge, bsize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
|
MI_UNUSED(page); MI_UNUSED(block);
|
||||||
|
}
|
||||||
|
#endif
|
15
src/heap.c
15
src/heap.c
|
@ -32,7 +32,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
|
||||||
#if MI_DEBUG>1
|
#if MI_DEBUG>1
|
||||||
size_t total = heap->page_count;
|
size_t total = heap->page_count;
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||||
mi_page_queue_t* pq = &heap->pages[i];
|
mi_page_queue_t* pq = &heap->pages[i];
|
||||||
|
@ -120,11 +120,11 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||||
{
|
{
|
||||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||||
|
|
||||||
const bool force = collect >= MI_FORCE;
|
const bool force = collect >= MI_FORCE;
|
||||||
_mi_deferred_free(heap, force);
|
_mi_deferred_free(heap, force);
|
||||||
|
|
||||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||||
const bool force_main =
|
const bool force_main =
|
||||||
#ifdef NDEBUG
|
#ifdef NDEBUG
|
||||||
collect == MI_FORCE
|
collect == MI_FORCE
|
||||||
#else
|
#else
|
||||||
|
@ -474,8 +474,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
||||||
MI_UNUSED(heap);
|
MI_UNUSED(heap);
|
||||||
MI_UNUSED(pq);
|
MI_UNUSED(pq);
|
||||||
bool* found = (bool*)vfound;
|
bool* found = (bool*)vfound;
|
||||||
mi_segment_t* segment = _mi_page_segment(page);
|
void* start = mi_page_start(page);
|
||||||
void* start = _mi_page_start(segment, page, NULL);
|
|
||||||
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
||||||
*found = (p >= start && p < end);
|
*found = (p >= start && p < end);
|
||||||
return (!*found); // continue if not found
|
return (!*found); // continue if not found
|
||||||
|
@ -521,7 +520,7 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
|
||||||
const size_t bsize = mi_page_block_size(page);
|
const size_t bsize = mi_page_block_size(page);
|
||||||
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
|
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||||
|
|
||||||
if (page->capacity == 1) {
|
if (page->capacity == 1) {
|
||||||
// optimize page with one block
|
// optimize page with one block
|
||||||
|
@ -588,7 +587,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
||||||
xarea.page = page;
|
xarea.page = page;
|
||||||
xarea.area.reserved = page->reserved * bsize;
|
xarea.area.reserved = page->reserved * bsize;
|
||||||
xarea.area.committed = page->capacity * bsize;
|
xarea.area.committed = page->capacity * bsize;
|
||||||
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
|
xarea.area.blocks = mi_page_start(page);
|
||||||
xarea.area.used = page->used; // number of blocks in use (#553)
|
xarea.area.used = page->used; // number of blocks in use (#553)
|
||||||
xarea.area.block_size = ubsize;
|
xarea.area.block_size = ubsize;
|
||||||
xarea.area.full_block_size = bsize;
|
xarea.area.full_block_size = bsize;
|
||||||
|
|
26
src/init.c
26
src/init.c
|
@ -14,16 +14,19 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
|
||||||
// Empty page used to initialize the small free pages array
|
// Empty page used to initialize the small free pages array
|
||||||
const mi_page_t _mi_page_empty = {
|
const mi_page_t _mi_page_empty = {
|
||||||
0, false, false, false,
|
0,
|
||||||
|
false, false, false, false,
|
||||||
0, // capacity
|
0, // capacity
|
||||||
0, // reserved capacity
|
0, // reserved capacity
|
||||||
{ 0 }, // flags
|
{ 0 }, // flags
|
||||||
false, // is_zero
|
false, // is_zero
|
||||||
0, // retire_expire
|
0, // retire_expire
|
||||||
NULL, // free
|
NULL, // free
|
||||||
0, // used
|
|
||||||
0, // xblock_size
|
|
||||||
NULL, // local_free
|
NULL, // local_free
|
||||||
|
0, // used
|
||||||
|
0, // block size shift
|
||||||
|
0, // block_size
|
||||||
|
NULL, // page_start
|
||||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||||
{ 0, 0 },
|
{ 0, 0 },
|
||||||
#endif
|
#endif
|
||||||
|
@ -84,8 +87,9 @@ const mi_page_t _mi_page_empty = {
|
||||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||||
MI_STAT_COUNT_NULL(), \
|
MI_STAT_COUNT_NULL(), \
|
||||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||||
{ 0, 0 }, { 0, 0 }, { 0, 0 } \
|
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||||
|
{ 0, 0 } \
|
||||||
MI_STAT_COUNT_END_NULL()
|
MI_STAT_COUNT_END_NULL()
|
||||||
|
|
||||||
|
|
||||||
|
@ -111,8 +115,6 @@ const mi_page_t _mi_page_empty = {
|
||||||
|
|
||||||
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||||
NULL,
|
NULL,
|
||||||
MI_SMALL_PAGES_EMPTY,
|
|
||||||
MI_PAGE_QUEUES_EMPTY,
|
|
||||||
MI_ATOMIC_VAR_INIT(NULL),
|
MI_ATOMIC_VAR_INIT(NULL),
|
||||||
0, // tid
|
0, // tid
|
||||||
0, // cookie
|
0, // cookie
|
||||||
|
@ -122,7 +124,9 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||||
0, // page count
|
0, // page count
|
||||||
MI_BIN_FULL, 0, // page retired min/max
|
MI_BIN_FULL, 0, // page retired min/max
|
||||||
NULL, // next
|
NULL, // next
|
||||||
false
|
false,
|
||||||
|
MI_SMALL_PAGES_EMPTY,
|
||||||
|
MI_PAGE_QUEUES_EMPTY
|
||||||
};
|
};
|
||||||
|
|
||||||
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
|
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
|
||||||
|
@ -156,8 +160,6 @@ static mi_tld_t tld_main = {
|
||||||
|
|
||||||
mi_heap_t _mi_heap_main = {
|
mi_heap_t _mi_heap_main = {
|
||||||
&tld_main,
|
&tld_main,
|
||||||
MI_SMALL_PAGES_EMPTY,
|
|
||||||
MI_PAGE_QUEUES_EMPTY,
|
|
||||||
MI_ATOMIC_VAR_INIT(NULL),
|
MI_ATOMIC_VAR_INIT(NULL),
|
||||||
0, // thread id
|
0, // thread id
|
||||||
0, // initial cookie
|
0, // initial cookie
|
||||||
|
@ -167,7 +169,9 @@ mi_heap_t _mi_heap_main = {
|
||||||
0, // page count
|
0, // page count
|
||||||
MI_BIN_FULL, 0, // page retired min/max
|
MI_BIN_FULL, 0, // page retired min/max
|
||||||
NULL, // next heap
|
NULL, // next heap
|
||||||
false // can reclaim
|
false, // can reclaim
|
||||||
|
MI_SMALL_PAGES_EMPTY,
|
||||||
|
MI_PAGE_QUEUES_EMPTY
|
||||||
};
|
};
|
||||||
|
|
||||||
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
|
bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
|
||||||
|
|
|
@ -210,7 +210,7 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) {
|
||||||
if (c == 'x' || c == 'u') {
|
if (c == 'x' || c == 'u') {
|
||||||
if (numtype == 'z') x = va_arg(args, size_t);
|
if (numtype == 'z') x = va_arg(args, size_t);
|
||||||
else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t
|
else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t
|
||||||
else if (numtype == 'L') x = va_arg(args, unsigned long long);
|
else if (numtype == 'L') x = (uintptr_t)va_arg(args, unsigned long long);
|
||||||
else x = va_arg(args, unsigned long);
|
else x = va_arg(args, unsigned long);
|
||||||
}
|
}
|
||||||
else if (c == 'p') {
|
else if (c == 'p') {
|
||||||
|
@ -231,7 +231,7 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) {
|
||||||
intptr_t x = 0;
|
intptr_t x = 0;
|
||||||
if (numtype == 'z') x = va_arg(args, intptr_t );
|
if (numtype == 'z') x = va_arg(args, intptr_t );
|
||||||
else if (numtype == 't') x = va_arg(args, ptrdiff_t);
|
else if (numtype == 't') x = va_arg(args, ptrdiff_t);
|
||||||
else if (numtype == 'L') x = va_arg(args, long long);
|
else if (numtype == 'L') x = (intptr_t)va_arg(args, long long);
|
||||||
else x = va_arg(args, long);
|
else x = va_arg(args, long);
|
||||||
char pre = 0;
|
char pre = 0;
|
||||||
if (x < 0) {
|
if (x < 0) {
|
||||||
|
|
48
src/os.c
48
src/os.c
|
@ -29,7 +29,7 @@ bool _mi_os_has_overcommit(void) {
|
||||||
return mi_os_mem_config.has_overcommit;
|
return mi_os_mem_config.has_overcommit;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_os_has_virtual_reserve(void) {
|
bool _mi_os_has_virtual_reserve(void) {
|
||||||
return mi_os_mem_config.has_virtual_reserve;
|
return mi_os_mem_config.has_virtual_reserve;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
mi_assert(memid.memkind < MI_MEM_OS);
|
mi_assert(memid.memkind < MI_MEM_OS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -188,25 +188,25 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
|
||||||
if (!commit) { allow_large = false; }
|
if (!commit) { allow_large = false; }
|
||||||
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
|
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
|
||||||
*is_zero = false;
|
*is_zero = false;
|
||||||
void* p = NULL;
|
void* p = NULL;
|
||||||
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
|
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
|
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
|
||||||
}
|
}
|
||||||
|
|
||||||
MI_UNUSED(tld_stats);
|
MI_UNUSED(tld_stats);
|
||||||
mi_stats_t* stats = &_mi_stats_main;
|
mi_stats_t* stats = &_mi_stats_main;
|
||||||
mi_stat_counter_increase(stats->mmap_calls, 1);
|
mi_stat_counter_increase(stats->mmap_calls, 1);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
_mi_stat_increase(&stats->reserved, size);
|
_mi_stat_increase(&stats->reserved, size);
|
||||||
if (commit) {
|
if (commit) {
|
||||||
_mi_stat_increase(&stats->committed, size);
|
_mi_stat_increase(&stats->committed, size);
|
||||||
// seems needed for asan (or `mimalloc-test-api` fails)
|
// seems needed for asan (or `mimalloc-test-api` fails)
|
||||||
#ifdef MI_TRACK_ASAN
|
#ifdef MI_TRACK_ASAN
|
||||||
if (*is_zero) { mi_track_mem_defined(p,size); }
|
if (*is_zero) { mi_track_mem_defined(p,size); }
|
||||||
else { mi_track_mem_undefined(p,size); }
|
else { mi_track_mem_undefined(p,size); }
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
||||||
// over-allocate uncommitted (virtual) memory
|
// over-allocate uncommitted (virtual) memory
|
||||||
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
|
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
|
||||||
if (p == NULL) return NULL;
|
if (p == NULL) return NULL;
|
||||||
|
|
||||||
// set p to the aligned part in the full region
|
// set p to the aligned part in the full region
|
||||||
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
|
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
|
||||||
// this is handled though by having the `base` field in the memid's
|
// this is handled though by having the `base` field in the memid's
|
||||||
|
@ -259,7 +259,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
||||||
// overallocate...
|
// overallocate...
|
||||||
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
|
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
|
||||||
if (p == NULL) return NULL;
|
if (p == NULL) return NULL;
|
||||||
|
|
||||||
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
|
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
|
||||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||||
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
|
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
|
||||||
|
@ -270,7 +270,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
||||||
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
|
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
|
||||||
// we can return the aligned pointer on `mmap` (and sbrk) systems
|
// we can return the aligned pointer on `mmap` (and sbrk) systems
|
||||||
p = aligned_p;
|
p = aligned_p;
|
||||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
|
||||||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
|
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
|
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -303,7 +303,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
||||||
if (size == 0) return NULL;
|
if (size == 0) return NULL;
|
||||||
size = _mi_os_good_alloc_size(size);
|
size = _mi_os_good_alloc_size(size);
|
||||||
alignment = _mi_align_up(alignment, _mi_os_page_size());
|
alignment = _mi_align_up(alignment, _mi_os_page_size());
|
||||||
|
|
||||||
bool os_is_large = false;
|
bool os_is_large = false;
|
||||||
bool os_is_zero = false;
|
bool os_is_zero = false;
|
||||||
void* os_base = NULL;
|
void* os_base = NULL;
|
||||||
|
@ -318,7 +318,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
OS aligned allocation with an offset. This is used
|
OS aligned allocation with an offset. This is used
|
||||||
for large alignments > MI_ALIGNMENT_MAX. We use a large mimalloc
|
for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc
|
||||||
page where the object can be aligned at an offset from the start of the segment.
|
page where the object can be aligned at an offset from the start of the segment.
|
||||||
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
|
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
|
||||||
to use the actual start of the memory region.
|
to use the actual start of the memory region.
|
||||||
|
@ -381,7 +381,7 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
|
||||||
|
|
||||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||||
MI_UNUSED(tld_stats);
|
MI_UNUSED(tld_stats);
|
||||||
mi_stats_t* stats = &_mi_stats_main;
|
mi_stats_t* stats = &_mi_stats_main;
|
||||||
if (is_zero != NULL) { *is_zero = false; }
|
if (is_zero != NULL) { *is_zero = false; }
|
||||||
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
||||||
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
||||||
|
@ -391,21 +391,21 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
|
||||||
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
|
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
|
||||||
if (csize == 0) return true;
|
if (csize == 0) return true;
|
||||||
|
|
||||||
// commit
|
// commit
|
||||||
bool os_is_zero = false;
|
bool os_is_zero = false;
|
||||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (os_is_zero && is_zero != NULL) {
|
if (os_is_zero && is_zero != NULL) {
|
||||||
*is_zero = true;
|
*is_zero = true;
|
||||||
mi_assert_expensive(mi_mem_is_zero(start, csize));
|
mi_assert_expensive(mi_mem_is_zero(start, csize));
|
||||||
}
|
}
|
||||||
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
|
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
|
||||||
#ifdef MI_TRACK_ASAN
|
#ifdef MI_TRACK_ASAN
|
||||||
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
||||||
else { mi_track_mem_undefined(start,csize); }
|
else { mi_track_mem_undefined(start,csize); }
|
||||||
#endif
|
#endif
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -419,11 +419,11 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
|
||||||
// page align
|
// page align
|
||||||
size_t csize;
|
size_t csize;
|
||||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||||
if (csize == 0) return true;
|
if (csize == 0) return true;
|
||||||
|
|
||||||
// decommit
|
// decommit
|
||||||
*needs_recommit = true;
|
*needs_recommit = true;
|
||||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||||
}
|
}
|
||||||
|
@ -441,7 +441,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
||||||
// but may be used later again. This will release physical memory
|
// but may be used later again. This will release physical memory
|
||||||
// pages and reduce swapping while keeping the memory committed.
|
// pages and reduce swapping while keeping the memory committed.
|
||||||
// We page align to a conservative area inside the range to reset.
|
// We page align to a conservative area inside the range to reset.
|
||||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||||
// page align conservatively within the range
|
// page align conservatively within the range
|
||||||
size_t csize;
|
size_t csize;
|
||||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||||
|
@ -461,7 +461,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// either resets or decommits memory, returns true if the memory needs
|
// either resets or decommits memory, returns true if the memory needs
|
||||||
// to be recommitted if it is to be re-used later on.
|
// to be recommitted if it is to be re-used later on.
|
||||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||||
{
|
{
|
||||||
|
@ -474,7 +474,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||||
{
|
{
|
||||||
bool needs_recommit = true;
|
bool needs_recommit = true;
|
||||||
mi_os_decommit_ex(p, size, &needs_recommit, stats);
|
mi_os_decommit_ex(p, size, &needs_recommit, stats);
|
||||||
return needs_recommit;
|
return needs_recommit;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
|
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
|
||||||
|
@ -484,7 +484,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// either resets or decommits memory, returns true if the memory needs
|
// either resets or decommits memory, returns true if the memory needs
|
||||||
// to be recommitted if it is to be re-used later on.
|
// to be recommitted if it is to be re-used later on.
|
||||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
|
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
|
||||||
return _mi_os_purge_ex(p, size, true, stats);
|
return _mi_os_purge_ex(p, size, true, stats);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*----------------------------------------------------------------------------
|
/*----------------------------------------------------------------------------
|
||||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -11,6 +11,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
|
|
||||||
#ifndef MI_IN_PAGE_C
|
#ifndef MI_IN_PAGE_C
|
||||||
#error "this file should be included from 'page.c'"
|
#error "this file should be included from 'page.c'"
|
||||||
|
// include to help an IDE
|
||||||
|
#include "mimalloc.h"
|
||||||
|
#include "mimalloc/internal.h"
|
||||||
|
#include "mimalloc/atomic.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
|
@ -137,21 +141,25 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {
|
||||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
|
||||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
|
||||||
mi_page_queue_t* pq = &heap->pages[bin];
|
|
||||||
mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
|
|
||||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
|
||||||
return pq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
mi_assert_internal(heap!=NULL);
|
||||||
|
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||||
mi_page_queue_t* pq = &heap->pages[bin];
|
mi_page_queue_t* pq = &heap->pages[bin];
|
||||||
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
|
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
|
||||||
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
|
||||||
|
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
|
||||||
|
return pq;
|
||||||
|
}
|
||||||
|
|
||||||
|
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||||
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
|
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||||
|
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||||
return pq;
|
return pq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +214,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
||||||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||||
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||||
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||||
|
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
|
|
||||||
if (page->prev != NULL) page->prev->next = page->next;
|
if (page->prev != NULL) page->prev->next = page->next;
|
||||||
|
@ -232,8 +242,8 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||||
#endif
|
#endif
|
||||||
mi_assert_internal(page->xblock_size == queue->block_size ||
|
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||||
(page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||||
|
|
||||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||||
|
@ -259,12 +269,13 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
mi_assert_expensive(mi_page_queue_contains(from, page));
|
mi_assert_expensive(mi_page_queue_contains(from, page));
|
||||||
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
||||||
|
const size_t bsize = mi_page_block_size(page);
|
||||||
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
|
MI_UNUSED(bsize);
|
||||||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
|
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
|
||||||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
|
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
|
||||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
|
||||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
|
||||||
|
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
|
||||||
|
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
if (page->prev != NULL) page->prev->next = page->next;
|
if (page->prev != NULL) page->prev->next = page->next;
|
||||||
|
|
102
src/page.c
102
src/page.c
|
@ -1,5 +1,5 @@
|
||||||
/*----------------------------------------------------------------------------
|
/*----------------------------------------------------------------------------
|
||||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -59,7 +59,7 @@ static inline uint8_t* mi_page_area(const mi_page_t* page) {
|
||||||
|
|
||||||
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
|
uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||||
mi_block_t* start = (mi_block_t*)page_area;
|
mi_block_t* start = (mi_block_t*)page_area;
|
||||||
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
||||||
while(p != NULL) {
|
while(p != NULL) {
|
||||||
|
@ -78,14 +78,13 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mi_page_is_valid_init(mi_page_t* page) {
|
static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||||
mi_assert_internal(page->xblock_size > 0);
|
mi_assert_internal(mi_page_block_size(page) > 0);
|
||||||
mi_assert_internal(page->used <= page->capacity);
|
mi_assert_internal(page->used <= page->capacity);
|
||||||
mi_assert_internal(page->capacity <= page->reserved);
|
mi_assert_internal(page->capacity <= page->reserved);
|
||||||
|
|
||||||
mi_segment_t* segment = _mi_page_segment(page);
|
uint8_t* start = mi_page_start(page);
|
||||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
|
||||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
|
||||||
//const size_t bsize = mi_page_block_size(page);
|
|
||||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||||
|
|
||||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||||
|
@ -125,9 +124,9 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
||||||
|
|
||||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
|
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
if (segment->kind != MI_SEGMENT_HUGE)
|
if (segment->kind != MI_SEGMENT_HUGE)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||||
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||||
|
@ -193,8 +192,8 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
|
||||||
if (head == NULL) return;
|
if (head == NULL) return;
|
||||||
|
|
||||||
// find the tail -- also to get a proper count (without data races)
|
// find the tail -- also to get a proper count (without data races)
|
||||||
uint32_t max_count = page->capacity; // cannot collect more than capacity
|
size_t max_count = page->capacity; // cannot collect more than capacity
|
||||||
uint32_t count = 1;
|
size_t count = 1;
|
||||||
mi_block_t* tail = head;
|
mi_block_t* tail = head;
|
||||||
mi_block_t* next;
|
mi_block_t* next;
|
||||||
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
|
||||||
|
@ -212,7 +211,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
|
||||||
page->local_free = head;
|
page->local_free = head;
|
||||||
|
|
||||||
// update counts now
|
// update counts now
|
||||||
page->used -= count;
|
page->used -= (uint16_t)count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _mi_page_free_collect(mi_page_t* page, bool force) {
|
void _mi_page_free_collect(mi_page_t* page, bool force) {
|
||||||
|
@ -263,7 +262,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// TODO: push on full queue immediately if it is full?
|
// TODO: push on full queue immediately if it is full?
|
||||||
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
||||||
mi_page_queue_push(heap, pq, page);
|
mi_page_queue_push(heap, pq, page);
|
||||||
|
@ -282,11 +281,13 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
||||||
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||||
|
#endif
|
||||||
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||||
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
|
||||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||||
// a fresh page was found, initialize it
|
// a fresh page was found, initialize it
|
||||||
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||||
mi_assert_internal(full_block_size >= block_size);
|
mi_assert_internal(full_block_size >= block_size);
|
||||||
mi_page_init(heap, page, full_block_size, heap->tld);
|
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||||
mi_heap_stat_increase(heap, pages, 1);
|
mi_heap_stat_increase(heap, pages, 1);
|
||||||
|
@ -427,8 +428,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||||
_mi_segment_page_free(page, force, segments_tld);
|
_mi_segment_page_free(page, force, segments_tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retire parameters
|
#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||||
#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
|
|
||||||
#define MI_RETIRE_CYCLES (16)
|
#define MI_RETIRE_CYCLES (16)
|
||||||
|
|
||||||
// Retire a page with no more used blocks
|
// Retire a page with no more used blocks
|
||||||
|
@ -441,7 +441,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
mi_assert_expensive(_mi_page_is_valid(page));
|
mi_assert_expensive(_mi_page_is_valid(page));
|
||||||
mi_assert_internal(mi_page_all_free(page));
|
mi_assert_internal(mi_page_all_free(page));
|
||||||
|
|
||||||
mi_page_set_has_aligned(page, false);
|
mi_page_set_has_aligned(page, false);
|
||||||
|
|
||||||
// don't retire too often..
|
// don't retire too often..
|
||||||
|
@ -451,10 +451,11 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||||
// how to check this efficiently though...
|
// how to check this efficiently though...
|
||||||
// for now, we don't retire if it is the only page left of this size class.
|
// for now, we don't retire if it is the only page left of this size class.
|
||||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||||
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
|
const size_t bsize = mi_page_block_size(page);
|
||||||
|
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
|
||||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||||
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||||
mi_heap_t* heap = mi_page_heap(page);
|
mi_heap_t* heap = mi_page_heap(page);
|
||||||
mi_assert_internal(pq >= heap->pages);
|
mi_assert_internal(pq >= heap->pages);
|
||||||
const size_t index = pq - heap->pages;
|
const size_t index = pq - heap->pages;
|
||||||
|
@ -516,7 +517,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
|
||||||
#endif
|
#endif
|
||||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
|
void* const page_area = mi_page_start(page);
|
||||||
|
|
||||||
// initialize a randomized free list
|
// initialize a randomized free list
|
||||||
// set up `slice_count` slices to alternate between
|
// set up `slice_count` slices to alternate between
|
||||||
|
@ -574,7 +575,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
||||||
#endif
|
#endif
|
||||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
|
void* const page_area = mi_page_start(page);
|
||||||
|
|
||||||
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
||||||
|
|
||||||
|
@ -608,7 +609,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
||||||
// allocations but this did not speed up any benchmark (due to an
|
// allocations but this did not speed up any benchmark (due to an
|
||||||
// extra test in malloc? or cache effects?)
|
// extra test in malloc? or cache effects?)
|
||||||
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
|
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
|
||||||
MI_UNUSED(tld);
|
MI_UNUSED(tld);
|
||||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||||
#if (MI_SECURE<=2)
|
#if (MI_SECURE<=2)
|
||||||
mi_assert(page->free == NULL);
|
mi_assert(page->free == NULL);
|
||||||
|
@ -617,16 +618,14 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
|
||||||
#endif
|
#endif
|
||||||
if (page->capacity >= page->reserved) return;
|
if (page->capacity >= page->reserved) return;
|
||||||
|
|
||||||
size_t page_size;
|
|
||||||
_mi_page_start(_mi_page_segment(page), page, &page_size);
|
|
||||||
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
||||||
|
|
||||||
// calculate the extend count
|
// calculate the extend count
|
||||||
const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size);
|
const size_t bsize = mi_page_block_size(page);
|
||||||
size_t extend = page->reserved - page->capacity;
|
size_t extend = page->reserved - page->capacity;
|
||||||
mi_assert_internal(extend > 0);
|
mi_assert_internal(extend > 0);
|
||||||
|
|
||||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
|
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
|
||||||
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
||||||
mi_assert_internal(max_extend > 0);
|
mi_assert_internal(max_extend > 0);
|
||||||
|
|
||||||
|
@ -660,11 +659,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
mi_assert_internal(block_size > 0);
|
mi_assert_internal(block_size > 0);
|
||||||
// set fields
|
// set fields
|
||||||
mi_page_set_heap(page, heap);
|
mi_page_set_heap(page, heap);
|
||||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
page->block_size = block_size;
|
||||||
size_t page_size;
|
size_t page_size;
|
||||||
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
|
page->page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||||
MI_UNUSED(page_start);
|
mi_track_mem_noaccess(page->page_start,page_size);
|
||||||
mi_track_mem_noaccess(page_start,page_size);
|
|
||||||
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
||||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||||
|
@ -677,12 +675,18 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
page->free_is_zero = page->is_zero_init;
|
page->free_is_zero = page->is_zero_init;
|
||||||
#if MI_DEBUG>2
|
#if MI_DEBUG>2
|
||||||
if (page->is_zero_init) {
|
if (page->is_zero_init) {
|
||||||
mi_track_mem_defined(page_start, page_size);
|
mi_track_mem_defined(page->page_start, page_size);
|
||||||
mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
|
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mi_assert_internal(page->is_committed);
|
mi_assert_internal(page->is_committed);
|
||||||
|
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
||||||
|
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
page->block_size_shift = 0;
|
||||||
|
}
|
||||||
|
|
||||||
mi_assert_internal(page->capacity == 0);
|
mi_assert_internal(page->capacity == 0);
|
||||||
mi_assert_internal(page->free == NULL);
|
mi_assert_internal(page->free == NULL);
|
||||||
mi_assert_internal(page->used == 0);
|
mi_assert_internal(page->used == 0);
|
||||||
|
@ -695,6 +699,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||||
mi_assert_internal(page->keys[0] != 0);
|
mi_assert_internal(page->keys[0] != 0);
|
||||||
mi_assert_internal(page->keys[1] != 0);
|
mi_assert_internal(page->keys[1] != 0);
|
||||||
#endif
|
#endif
|
||||||
|
mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
|
||||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||||
|
|
||||||
// initialize an initial free list
|
// initialize an initial free list
|
||||||
|
@ -718,7 +723,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||||
while (page != NULL)
|
while (page != NULL)
|
||||||
{
|
{
|
||||||
mi_page_t* next = page->next; // remember next
|
mi_page_t* next = page->next; // remember next
|
||||||
#if MI_STAT
|
#if MI_STAT
|
||||||
count++;
|
count++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -820,11 +825,9 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
// Large and huge page allocation.
|
// Large and huge page allocation.
|
||||||
// Huge pages are allocated directly without being in a queue.
|
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
|
||||||
// Because huge pages contain just one block, and the segment contains
|
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
||||||
// just that page, we always treat them as abandoned and any thread
|
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
||||||
// that frees the block can free the whole page and segment directly.
|
|
||||||
// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
|
|
||||||
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||||
size_t block_size = _mi_os_good_alloc_size(size);
|
size_t block_size = _mi_os_good_alloc_size(size);
|
||||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||||
|
@ -832,25 +835,26 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
||||||
#else
|
#else
|
||||||
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
|
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
|
||||||
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
||||||
#endif
|
#endif
|
||||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||||
if (page != NULL) {
|
if (page != NULL) {
|
||||||
mi_assert_internal(mi_page_immediate_available(page));
|
mi_assert_internal(mi_page_immediate_available(page));
|
||||||
|
|
||||||
if (is_huge) {
|
if (is_huge) {
|
||||||
|
mi_assert_internal(mi_page_is_huge(page));
|
||||||
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||||
#if MI_HUGE_PAGE_ABANDON
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||||
mi_page_set_heap(page, NULL);
|
mi_page_set_heap(page, NULL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(!mi_page_is_huge(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
||||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||||
mi_heap_stat_increase(heap, large, bsize);
|
mi_heap_stat_increase(heap, large, bsize);
|
||||||
|
@ -869,7 +873,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
||||||
// huge allocation?
|
// huge allocation?
|
||||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||||
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
||||||
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
||||||
|
@ -882,7 +886,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
|
||||||
else {
|
else {
|
||||||
// otherwise find a page with free blocks in our size segregated queues
|
// otherwise find a page with free blocks in our size segregated queues
|
||||||
#if MI_PADDING
|
#if MI_PADDING
|
||||||
mi_assert_internal(size >= MI_PADDING_SIZE);
|
mi_assert_internal(size >= MI_PADDING_SIZE);
|
||||||
#endif
|
#endif
|
||||||
return mi_find_free_page(heap, size);
|
return mi_find_free_page(heap, size);
|
||||||
}
|
}
|
||||||
|
@ -898,7 +902,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
||||||
|
|
||||||
// initialize if necessary
|
// initialize if necessary
|
||||||
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
||||||
heap = mi_heap_get_default(); // calls mi_thread_init
|
heap = mi_heap_get_default(); // calls mi_thread_init
|
||||||
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
||||||
}
|
}
|
||||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||||
|
@ -926,7 +930,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
||||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||||
|
|
||||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||||
if mi_unlikely(zero && page->xblock_size == 0) {
|
if mi_unlikely(zero && page->block_size == 0) {
|
||||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||||
void* p = _mi_page_malloc(heap, page, size, false);
|
void* p = _mi_page_malloc(heap, page, size, false);
|
||||||
mi_assert_internal(p != NULL);
|
mi_assert_internal(p != NULL);
|
||||||
|
|
131
src/segment.c
131
src/segment.c
|
@ -1,5 +1,5 @@
|
||||||
/* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||||
This is free software; you can redistribute it and/or modify it under the
|
This is free software; you can redistribute it and/or modify it under the
|
||||||
terms of the MIT license. A copy of the license can be found in the file
|
terms of the MIT license. A copy of the license can be found in the file
|
||||||
"LICENSE" at the root of this distribution.
|
"LICENSE" at the root of this distribution.
|
||||||
|
@ -11,7 +11,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#include <string.h> // memset
|
#include <string.h> // memset
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#define MI_PAGE_HUGE_ALIGN (256*1024)
|
// -------------------------------------------------------------------
|
||||||
|
// Segments
|
||||||
|
// mimalloc pages reside in segments. See `mi_segment_valid` for invariants.
|
||||||
|
// -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
|
||||||
|
|
||||||
|
@ -146,10 +150,6 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------------
|
/* --------------------------------------------------------------------------------
|
||||||
Segment allocation
|
Segment allocation
|
||||||
|
|
||||||
If a thread ends, it "abandons" pages with used blocks
|
|
||||||
and there is an abandoned segment list whose segments can
|
|
||||||
be reclaimed by still running threads, much like work-stealing.
|
|
||||||
-------------------------------------------------------------------------------- */
|
-------------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
|
||||||
sq->first = slice;
|
sq->first = slice;
|
||||||
if (slice->next != NULL) slice->next->prev = slice;
|
if (slice->next != NULL) slice->next->prev = slice;
|
||||||
else sq->last = slice;
|
else sq->last = slice;
|
||||||
slice->xblock_size = 0; // free
|
slice->block_size = 0; // free
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
|
static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
|
||||||
|
@ -223,7 +223,7 @@ static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t*
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
||||||
mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
|
mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0);
|
||||||
// should work too if the queue does not contain slice (which can happen during reclaim)
|
// should work too if the queue does not contain slice (which can happen during reclaim)
|
||||||
if (slice->prev != NULL) slice->prev->next = slice->next;
|
if (slice->prev != NULL) slice->prev->next = slice->next;
|
||||||
if (slice == sq->first) sq->first = slice->next;
|
if (slice == sq->first) sq->first = slice->next;
|
||||||
|
@ -231,7 +231,7 @@ static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
||||||
if (slice == sq->last) sq->last = slice->prev;
|
if (slice == sq->last) sq->last = slice->prev;
|
||||||
slice->prev = NULL;
|
slice->prev = NULL;
|
||||||
slice->next = NULL;
|
slice->next = NULL;
|
||||||
slice->xblock_size = 1; // no more free
|
slice->block_size = 1; // no more free
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static bool mi_slice_is_used(const mi_slice_t* slice) {
|
static bool mi_slice_is_used(const mi_slice_t* slice) {
|
||||||
return (slice->xblock_size > 0);
|
return (slice->block_size > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -268,19 +268,20 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(slice->slice_offset == 0);
|
mi_assert_internal(slice->slice_offset == 0);
|
||||||
size_t index = mi_slice_index(slice);
|
size_t index = mi_slice_index(slice);
|
||||||
size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
|
size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
|
||||||
if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets
|
if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets
|
||||||
used_count++;
|
used_count++;
|
||||||
for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) {
|
mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE));
|
||||||
|
for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) {
|
||||||
mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
|
mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
|
||||||
mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
|
mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
|
||||||
mi_assert_internal(i==0 || segment->slices[index + i].xblock_size == 1);
|
mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
|
||||||
}
|
}
|
||||||
// and the last entry as well (for coalescing)
|
// and the last entry as well (for coalescing)
|
||||||
const mi_slice_t* last = slice + slice->slice_count - 1;
|
const mi_slice_t* last = slice + slice->slice_count - 1;
|
||||||
if (last > slice && last < mi_segment_slices_end(segment)) {
|
if (last > slice && last < mi_segment_slices_end(segment)) {
|
||||||
mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
|
mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
|
||||||
mi_assert_internal(last->slice_count == 0);
|
mi_assert_internal(last->slice_count == 0);
|
||||||
mi_assert_internal(last->xblock_size == 1);
|
mi_assert_internal(last->block_size == 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else { // free range of slices; only last slice needs a valid back offset
|
else { // free range of slices; only last slice needs a valid back offset
|
||||||
|
@ -289,7 +290,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
|
mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
|
||||||
}
|
}
|
||||||
mi_assert_internal(slice == last || last->slice_count == 0 );
|
mi_assert_internal(slice == last || last->slice_count == 0 );
|
||||||
mi_assert_internal(last->xblock_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->xblock_size==1));
|
mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1));
|
||||||
if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
|
if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
|
||||||
sq = mi_span_queue_for(slice->slice_count,tld);
|
sq = mi_span_queue_for(slice->slice_count,tld);
|
||||||
mi_assert_internal(mi_span_queue_contains(sq,slice));
|
mi_assert_internal(mi_span_queue_contains(sq,slice));
|
||||||
|
@ -331,8 +332,8 @@ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, c
|
||||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||||
{
|
{
|
||||||
const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
|
const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
|
||||||
uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size);
|
uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size);
|
||||||
mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page);
|
mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page);
|
||||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -600,7 +601,7 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t*
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static bool mi_segment_is_abandoned(mi_segment_t* segment) {
|
static bool mi_segment_is_abandoned(mi_segment_t* segment) {
|
||||||
return (segment->thread_id == 0);
|
return (mi_atomic_load_relaxed(&segment->thread_id) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// note: can be called on abandoned segments
|
// note: can be called on abandoned segments
|
||||||
|
@ -620,7 +621,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
|
||||||
mi_slice_t* last = &segment->slices[slice_index + slice_count - 1];
|
mi_slice_t* last = &segment->slices[slice_index + slice_count - 1];
|
||||||
last->slice_count = 0;
|
last->slice_count = 0;
|
||||||
last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
|
last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
|
||||||
last->xblock_size = 0;
|
last->block_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// perhaps decommit
|
// perhaps decommit
|
||||||
|
@ -630,7 +631,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
|
||||||
|
|
||||||
// and push it on the free page queue (if it was not a huge page)
|
// and push it on the free page queue (if it was not a huge page)
|
||||||
if (sq != NULL) mi_span_queue_push( sq, slice );
|
if (sq != NULL) mi_span_queue_push( sq, slice );
|
||||||
else slice->xblock_size = 0; // mark huge page as free anyways
|
else slice->block_size = 0; // mark huge page as free anyways
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -644,7 +645,7 @@ static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
|
static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->xblock_size==0);
|
mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0);
|
||||||
mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
|
||||||
mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
|
mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
|
||||||
mi_span_queue_delete(sq, slice);
|
mi_span_queue_delete(sq, slice);
|
||||||
|
@ -653,14 +654,14 @@ static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld
|
||||||
// note: can be called on abandoned segments
|
// note: can be called on abandoned segments
|
||||||
static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
|
static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
|
mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
|
||||||
mi_segment_t* segment = _mi_ptr_segment(slice);
|
mi_segment_t* const segment = _mi_ptr_segment(slice);
|
||||||
bool is_abandoned = mi_segment_is_abandoned(segment);
|
const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment);
|
||||||
|
|
||||||
// for huge pages, just mark as free but don't add to the queues
|
// for huge pages, just mark as free but don't add to the queues
|
||||||
if (segment->kind == MI_SEGMENT_HUGE) {
|
if (segment->kind == MI_SEGMENT_HUGE) {
|
||||||
// issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
|
// issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
|
||||||
mi_assert_internal((segment->used==0 && slice->xblock_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
|
mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
|
||||||
slice->xblock_size = 0; // mark as free anyways
|
slice->block_size = 0; // mark as free anyways
|
||||||
// we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
|
// we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
|
||||||
// avoid a possible cache miss (and the segment is about to be freed)
|
// avoid a possible cache miss (and the segment is about to be freed)
|
||||||
return slice;
|
return slice;
|
||||||
|
@ -670,7 +671,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
||||||
size_t slice_count = slice->slice_count;
|
size_t slice_count = slice->slice_count;
|
||||||
mi_slice_t* next = slice + slice->slice_count;
|
mi_slice_t* next = slice + slice->slice_count;
|
||||||
mi_assert_internal(next <= mi_segment_slices_end(segment));
|
mi_assert_internal(next <= mi_segment_slices_end(segment));
|
||||||
if (next < mi_segment_slices_end(segment) && next->xblock_size==0) {
|
if (next < mi_segment_slices_end(segment) && next->block_size==0) {
|
||||||
// free next block -- remove it from free and merge
|
// free next block -- remove it from free and merge
|
||||||
mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
|
mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
|
||||||
slice_count += next->slice_count; // extend
|
slice_count += next->slice_count; // extend
|
||||||
|
@ -679,7 +680,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
||||||
if (slice > segment->slices) {
|
if (slice > segment->slices) {
|
||||||
mi_slice_t* prev = mi_slice_first(slice - 1);
|
mi_slice_t* prev = mi_slice_first(slice - 1);
|
||||||
mi_assert_internal(prev >= segment->slices);
|
mi_assert_internal(prev >= segment->slices);
|
||||||
if (prev->xblock_size==0) {
|
if (prev->block_size==0) {
|
||||||
// free previous slice -- remove it from free and merge
|
// free previous slice -- remove it from free and merge
|
||||||
mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
|
mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
|
||||||
slice_count += prev->slice_count;
|
slice_count += prev->slice_count;
|
||||||
|
@ -703,7 +704,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
||||||
static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
|
static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(slice_index < segment->slice_entries);
|
mi_assert_internal(slice_index < segment->slice_entries);
|
||||||
mi_slice_t* const slice = &segment->slices[slice_index];
|
mi_slice_t* const slice = &segment->slices[slice_index];
|
||||||
mi_assert_internal(slice->xblock_size==0 || slice->xblock_size==1);
|
mi_assert_internal(slice->block_size==0 || slice->block_size==1);
|
||||||
|
|
||||||
// commit before changing the slice data
|
// commit before changing the slice data
|
||||||
if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
|
if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
|
||||||
|
@ -715,20 +716,20 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
||||||
slice->slice_count = (uint32_t)slice_count;
|
slice->slice_count = (uint32_t)slice_count;
|
||||||
mi_assert_internal(slice->slice_count == slice_count);
|
mi_assert_internal(slice->slice_count == slice_count);
|
||||||
const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
|
const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
|
||||||
slice->xblock_size = (uint32_t)(bsize >= MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : bsize);
|
slice->block_size = bsize;
|
||||||
mi_page_t* page = mi_slice_to_page(slice);
|
mi_page_t* page = mi_slice_to_page(slice);
|
||||||
mi_assert_internal(mi_page_block_size(page) == bsize);
|
mi_assert_internal(mi_page_block_size(page) == bsize);
|
||||||
|
|
||||||
// set slice back pointers for the first MI_MAX_SLICE_OFFSET entries
|
// set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries
|
||||||
size_t extra = slice_count-1;
|
size_t extra = slice_count-1;
|
||||||
if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET;
|
if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT;
|
||||||
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
|
if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
|
||||||
|
|
||||||
mi_slice_t* slice_next = slice + 1;
|
mi_slice_t* slice_next = slice + 1;
|
||||||
for (size_t i = 1; i <= extra; i++, slice_next++) {
|
for (size_t i = 1; i <= extra; i++, slice_next++) {
|
||||||
slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
|
slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
|
||||||
slice_next->slice_count = 0;
|
slice_next->slice_count = 0;
|
||||||
slice_next->xblock_size = 1;
|
slice_next->block_size = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
|
// and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
|
||||||
|
@ -739,11 +740,12 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
||||||
if (last > slice) {
|
if (last > slice) {
|
||||||
last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
|
last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
|
||||||
last->slice_count = 0;
|
last->slice_count = 0;
|
||||||
last->xblock_size = 1;
|
last->block_size = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// and initialize the page
|
// and initialize the page
|
||||||
page->is_committed = true;
|
page->is_committed = true;
|
||||||
|
page->is_huge = (segment->kind == MI_SEGMENT_HUGE);
|
||||||
segment->used++;
|
segment->used++;
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -751,7 +753,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
||||||
static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
|
static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(_mi_ptr_segment(slice) == segment);
|
mi_assert_internal(_mi_ptr_segment(slice) == segment);
|
||||||
mi_assert_internal(slice->slice_count >= slice_count);
|
mi_assert_internal(slice->slice_count >= slice_count);
|
||||||
mi_assert_internal(slice->xblock_size > 0); // no more in free queue
|
mi_assert_internal(slice->block_size > 0); // no more in free queue
|
||||||
if (slice->slice_count <= slice_count) return;
|
if (slice->slice_count <= slice_count) return;
|
||||||
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
||||||
size_t next_index = mi_slice_index(slice) + slice_count;
|
size_t next_index = mi_slice_index(slice) + slice_count;
|
||||||
|
@ -777,7 +779,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
|
||||||
if (slice->slice_count > slice_count) {
|
if (slice->slice_count > slice_count) {
|
||||||
mi_segment_slice_split(segment, slice, slice_count, tld);
|
mi_segment_slice_split(segment, slice, slice_count, tld);
|
||||||
}
|
}
|
||||||
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0);
|
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
|
||||||
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
|
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
|
||||||
if (page == NULL) {
|
if (page == NULL) {
|
||||||
// commit failed; return NULL but first restore the slice
|
// commit failed; return NULL but first restore the slice
|
||||||
|
@ -954,8 +956,8 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
||||||
while (slice < end) {
|
while (slice < end) {
|
||||||
mi_assert_internal(slice->slice_count > 0);
|
mi_assert_internal(slice->slice_count > 0);
|
||||||
mi_assert_internal(slice->slice_offset == 0);
|
mi_assert_internal(slice->slice_offset == 0);
|
||||||
mi_assert_internal(mi_slice_index(slice)==0 || slice->xblock_size == 0); // no more used pages ..
|
mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages ..
|
||||||
if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
|
if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
|
||||||
mi_segment_span_remove_from_queue(slice, tld);
|
mi_segment_span_remove_from_queue(slice, tld);
|
||||||
}
|
}
|
||||||
#if MI_DEBUG>1
|
#if MI_DEBUG>1
|
||||||
|
@ -981,7 +983,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
|
||||||
|
|
||||||
// note: can be called on abandoned pages
|
// note: can be called on abandoned pages
|
||||||
static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
|
static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(page->xblock_size > 0);
|
mi_assert_internal(page->block_size > 0);
|
||||||
mi_assert_internal(mi_page_all_free(page));
|
mi_assert_internal(mi_page_all_free(page));
|
||||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||||
mi_assert_internal(segment->used > 0);
|
mi_assert_internal(segment->used > 0);
|
||||||
|
@ -993,7 +995,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
|
||||||
// reset the page memory to reduce memory pressure?
|
// reset the page memory to reduce memory pressure?
|
||||||
if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
|
if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||||
_mi_os_reset(start, psize, tld->stats);
|
_mi_os_reset(start, psize, tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1001,7 +1003,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
|
||||||
page->is_zero_init = false;
|
page->is_zero_init = false;
|
||||||
ptrdiff_t ofs = offsetof(mi_page_t, capacity);
|
ptrdiff_t ofs = offsetof(mi_page_t, capacity);
|
||||||
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
|
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
|
||||||
page->xblock_size = 1;
|
page->block_size = 1;
|
||||||
|
|
||||||
// and free it
|
// and free it
|
||||||
mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
|
mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
|
||||||
|
@ -1048,6 +1050,11 @@ reuse their pages and/or free them eventually. The
|
||||||
|
|
||||||
When a block is freed in an abandoned segment, the segment
|
When a block is freed in an abandoned segment, the segment
|
||||||
is reclaimed into that thread.
|
is reclaimed into that thread.
|
||||||
|
|
||||||
|
Moreover, if threads are looking for a fresh segment, they
|
||||||
|
will first consider abondoned segments -- these can be found
|
||||||
|
by scanning the arena memory
|
||||||
|
(segments outside arena memoryare only reclaimed by a free).
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||||
|
@ -1071,9 +1078,9 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||||
while (slice < end) {
|
while (slice < end) {
|
||||||
mi_assert_internal(slice->slice_count > 0);
|
mi_assert_internal(slice->slice_count > 0);
|
||||||
mi_assert_internal(slice->slice_offset == 0);
|
mi_assert_internal(slice->slice_offset == 0);
|
||||||
if (slice->xblock_size == 0) { // a free page
|
if (slice->block_size == 0) { // a free page
|
||||||
mi_segment_span_remove_from_queue(slice,tld);
|
mi_segment_span_remove_from_queue(slice,tld);
|
||||||
slice->xblock_size = 0; // but keep it free
|
slice->block_size = 0; // but keep it free
|
||||||
}
|
}
|
||||||
slice = slice + slice->slice_count;
|
slice = slice + slice->slice_count;
|
||||||
}
|
}
|
||||||
|
@ -1120,7 +1127,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||||
static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
|
static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
|
||||||
mi_slice_t* slice = &segment->slices[0];
|
mi_slice_t* slice = &segment->slices[0];
|
||||||
*end = mi_segment_slices_end(segment);
|
*end = mi_segment_slices_end(segment);
|
||||||
mi_assert_internal(slice->slice_count>0 && slice->xblock_size>0); // segment allocated page
|
mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page
|
||||||
slice = slice + slice->slice_count; // skip the first segment allocated page
|
slice = slice + slice->slice_count; // skip the first segment allocated page
|
||||||
return slice;
|
return slice;
|
||||||
}
|
}
|
||||||
|
@ -1128,7 +1135,6 @@ static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice
|
||||||
// Possibly free pages and check if free space is available
|
// Possibly free pages and check if free space is available
|
||||||
static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
|
static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
|
||||||
mi_assert_internal(mi_segment_is_abandoned(segment));
|
mi_assert_internal(mi_segment_is_abandoned(segment));
|
||||||
bool has_page = false;
|
bool has_page = false;
|
||||||
|
|
||||||
|
@ -1153,11 +1159,9 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
||||||
has_page = true;
|
has_page = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
|
||||||
if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
|
// a page has available free blocks of the right size
|
||||||
// a page has available free blocks of the right size
|
has_page = true;
|
||||||
has_page = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1211,7 +1215,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||||
else {
|
else {
|
||||||
// otherwise reclaim it into the heap
|
// otherwise reclaim it into the heap
|
||||||
_mi_page_reclaim(heap, page);
|
_mi_page_reclaim(heap, page);
|
||||||
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
|
if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page)) {
|
||||||
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
|
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1259,26 +1263,31 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static long mi_segment_get_reclaim_tries(void) {
|
static long mi_segment_get_reclaim_tries(void) {
|
||||||
// limit the tries to 10% (default) of the abandoned segments with at least 8 tries, and at most 1024.
|
// limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
|
||||||
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
|
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
|
||||||
if (perc <= 0) return 0;
|
if (perc <= 0) return 0;
|
||||||
const size_t total_count = _mi_arena_segment_abandoned_count();
|
const size_t total_count = _mi_arena_segment_abandoned_count();
|
||||||
|
if (total_count == 0) return 0;
|
||||||
const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow
|
const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow
|
||||||
long max_tries = (long)(relative_count < 8 ? 8 : (relative_count > 1024 ? 1024 : relative_count));
|
long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count));
|
||||||
|
if (max_tries < 8 && total_count > 8) { max_tries = 8; }
|
||||||
return max_tries;
|
return max_tries;
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
|
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
|
||||||
{
|
{
|
||||||
*reclaimed = false;
|
*reclaimed = false;
|
||||||
mi_segment_t* segment;
|
|
||||||
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap,¤t);
|
|
||||||
long max_tries = mi_segment_get_reclaim_tries();
|
long max_tries = mi_segment_get_reclaim_tries();
|
||||||
|
if (max_tries <= 0) return NULL;
|
||||||
|
|
||||||
|
mi_segment_t* segment;
|
||||||
|
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t);
|
||||||
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
|
||||||
{
|
{
|
||||||
segment->abandoned_visits++;
|
segment->abandoned_visits++;
|
||||||
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
// todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit?
|
||||||
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries
|
||||||
|
// Perhaps we can skip non-suitable ones in a better way?
|
||||||
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
|
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
|
||||||
bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
|
bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
|
||||||
if (segment->used == 0) {
|
if (segment->used == 0) {
|
||||||
|
@ -1337,7 +1346,6 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
||||||
|
|
||||||
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
|
||||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||||
|
|
||||||
// 1. try to reclaim an abandoned segment
|
// 1. try to reclaim an abandoned segment
|
||||||
|
@ -1404,11 +1412,12 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
||||||
segment->thread_id = 0; // huge segments are immediately abandoned
|
segment->thread_id = 0; // huge segments are immediately abandoned
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// for huge pages we initialize the xblock_size as we may
|
// for huge pages we initialize the block_size as we may
|
||||||
// overallocate to accommodate large alignments.
|
// overallocate to accommodate large alignments.
|
||||||
size_t psize;
|
size_t psize;
|
||||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||||
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
|
page->block_size = psize;
|
||||||
|
mi_assert_internal(page->is_huge);
|
||||||
|
|
||||||
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||||
if (page_alignment > 0 && segment->allow_decommit) {
|
if (page_alignment > 0 && segment->allow_decommit) {
|
||||||
|
@ -1439,7 +1448,7 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
||||||
mi_block_set_next(page, block, page->free);
|
mi_block_set_next(page, block, page->free);
|
||||||
page->free = block;
|
page->free = block;
|
||||||
page->used--;
|
page->used--;
|
||||||
page->is_zero = false;
|
page->is_zero_init = false;
|
||||||
mi_assert(page->used == 0);
|
mi_assert(page->used == 0);
|
||||||
mi_tld_t* tld = heap->tld;
|
mi_tld_t* tld = heap->tld;
|
||||||
_mi_segment_page_free(page, true, &tld->segments);
|
_mi_segment_page_free(page, true, &tld->segments);
|
||||||
|
@ -1475,7 +1484,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||||
mi_page_t* page;
|
mi_page_t* page;
|
||||||
if mi_unlikely(page_alignment > MI_ALIGNMENT_MAX) {
|
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||||
mi_assert_internal(_mi_is_power_of_two(page_alignment));
|
mi_assert_internal(_mi_is_power_of_two(page_alignment));
|
||||||
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
|
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
|
||||||
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
||||||
|
|
48
src/stats.c
48
src/stats.c
|
@ -175,13 +175,28 @@ static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* ar
|
||||||
|
|
||||||
static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) {
|
static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) {
|
||||||
_mi_fprintf(out, arg,"%10s:", msg);
|
_mi_fprintf(out, arg,"%10s:", msg);
|
||||||
if (unit > 0) {
|
if (unit != 0) {
|
||||||
mi_print_amount(stat->peak, unit, out, arg);
|
if (unit > 0) {
|
||||||
mi_print_amount(stat->allocated, unit, out, arg);
|
mi_print_amount(stat->peak, unit, out, arg);
|
||||||
mi_print_amount(stat->freed, unit, out, arg);
|
mi_print_amount(stat->allocated, unit, out, arg);
|
||||||
mi_print_amount(stat->current, unit, out, arg);
|
mi_print_amount(stat->freed, unit, out, arg);
|
||||||
mi_print_amount(unit, 1, out, arg);
|
mi_print_amount(stat->current, unit, out, arg);
|
||||||
mi_print_count(stat->allocated, unit, out, arg);
|
mi_print_amount(unit, 1, out, arg);
|
||||||
|
mi_print_count(stat->allocated, unit, out, arg);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_print_amount(stat->peak, -1, out, arg);
|
||||||
|
mi_print_amount(stat->allocated, -1, out, arg);
|
||||||
|
mi_print_amount(stat->freed, -1, out, arg);
|
||||||
|
mi_print_amount(stat->current, -1, out, arg);
|
||||||
|
if (unit == -1) {
|
||||||
|
_mi_fprintf(out, arg, "%24s", "");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_print_amount(-unit, 1, out, arg);
|
||||||
|
mi_print_count((stat->allocated / -unit), 0, out, arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (stat->allocated > stat->freed) {
|
if (stat->allocated > stat->freed) {
|
||||||
_mi_fprintf(out, arg, " ");
|
_mi_fprintf(out, arg, " ");
|
||||||
_mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok));
|
_mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok));
|
||||||
|
@ -191,23 +206,6 @@ static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64
|
||||||
_mi_fprintf(out, arg, " ok\n");
|
_mi_fprintf(out, arg, " ok\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (unit<0) {
|
|
||||||
mi_print_amount(stat->peak, -1, out, arg);
|
|
||||||
mi_print_amount(stat->allocated, -1, out, arg);
|
|
||||||
mi_print_amount(stat->freed, -1, out, arg);
|
|
||||||
mi_print_amount(stat->current, -1, out, arg);
|
|
||||||
if (unit==-1) {
|
|
||||||
_mi_fprintf(out, arg, "%24s", "");
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
mi_print_amount(-unit, 1, out, arg);
|
|
||||||
mi_print_count((stat->allocated / -unit), 0, out, arg);
|
|
||||||
}
|
|
||||||
if (stat->allocated > stat->freed)
|
|
||||||
_mi_fprintf(out, arg, " not all freed!\n");
|
|
||||||
else
|
|
||||||
_mi_fprintf(out, arg, " ok\n");
|
|
||||||
}
|
|
||||||
else {
|
else {
|
||||||
mi_print_amount(stat->peak, 1, out, arg);
|
mi_print_amount(stat->peak, 1, out, arg);
|
||||||
mi_print_amount(stat->allocated, 1, out, arg);
|
mi_print_amount(stat->allocated, 1, out, arg);
|
||||||
|
@ -457,7 +455,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
|
||||||
pinfo.page_faults = 0;
|
pinfo.page_faults = 0;
|
||||||
|
|
||||||
_mi_prim_process_info(&pinfo);
|
_mi_prim_process_info(&pinfo);
|
||||||
|
|
||||||
if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
|
if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
|
||||||
if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
|
if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
|
||||||
if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));
|
if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));
|
||||||
|
|
|
@ -379,7 +379,7 @@ static void bench_alloc_large(void) {
|
||||||
static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
|
static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
|
||||||
std::unique_ptr<char[]> buffers[kNumBuffers];
|
std::unique_ptr<char[]> buffers[kNumBuffers];
|
||||||
|
|
||||||
std::random_device rd;
|
std::random_device rd; (void)rd;
|
||||||
std::mt19937 gen(42); //rd());
|
std::mt19937 gen(42); //rd());
|
||||||
std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
|
std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
|
||||||
std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
|
std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
|
||||||
|
|
|
@ -34,7 +34,7 @@ we therefore test the API over various inputs. Please add more tests :-)
|
||||||
|
|
||||||
#include "mimalloc.h"
|
#include "mimalloc.h"
|
||||||
// #include "mimalloc/internal.h"
|
// #include "mimalloc/internal.h"
|
||||||
#include "mimalloc/types.h" // for MI_DEBUG and MI_ALIGNMENT_MAX
|
#include "mimalloc/types.h" // for MI_DEBUG and MI_BLOCK_ALIGNMENT_MAX
|
||||||
|
|
||||||
#include "testhelper.h"
|
#include "testhelper.h"
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ bool mem_is_zero(uint8_t* p, size_t size) {
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
int main(void) {
|
int main(void) {
|
||||||
mi_option_disable(mi_option_verbose);
|
mi_option_disable(mi_option_verbose);
|
||||||
|
|
||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
// Malloc
|
// Malloc
|
||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
|
@ -159,7 +159,7 @@ int main(void) {
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned6") {
|
CHECK_BODY("malloc-aligned6") {
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) {
|
for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) {
|
||||||
void* ps[8];
|
void* ps[8];
|
||||||
for (int i = 0; i < 8 && ok; i++) {
|
for (int i = 0; i < 8 && ok; i++) {
|
||||||
ps[i] = mi_malloc_aligned(align*13 // size
|
ps[i] = mi_malloc_aligned(align*13 // size
|
||||||
|
@ -175,16 +175,16 @@ int main(void) {
|
||||||
result = ok;
|
result = ok;
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned7") {
|
CHECK_BODY("malloc-aligned7") {
|
||||||
void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX);
|
void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX);
|
||||||
mi_free(p);
|
mi_free(p);
|
||||||
result = ((uintptr_t)p % MI_ALIGNMENT_MAX) == 0;
|
result = ((uintptr_t)p % MI_BLOCK_ALIGNMENT_MAX) == 0;
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned8") {
|
CHECK_BODY("malloc-aligned8") {
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
for (int i = 0; i < 5 && ok; i++) {
|
for (int i = 0; i < 5 && ok; i++) {
|
||||||
int n = (1 << i);
|
int n = (1 << i);
|
||||||
void* p = mi_malloc_aligned(1024, n * MI_ALIGNMENT_MAX);
|
void* p = mi_malloc_aligned(1024, n * MI_BLOCK_ALIGNMENT_MAX);
|
||||||
ok = ((uintptr_t)p % (n*MI_ALIGNMENT_MAX)) == 0;
|
ok = ((uintptr_t)p % (n*MI_BLOCK_ALIGNMENT_MAX)) == 0;
|
||||||
mi_free(p);
|
mi_free(p);
|
||||||
}
|
}
|
||||||
result = ok;
|
result = ok;
|
||||||
|
@ -192,7 +192,7 @@ int main(void) {
|
||||||
CHECK_BODY("malloc-aligned9") {
|
CHECK_BODY("malloc-aligned9") {
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
void* p[8];
|
void* p[8];
|
||||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_ALIGNMENT_MAX, MI_ALIGNMENT_MAX + 1, 2 * MI_ALIGNMENT_MAX, 8 * MI_ALIGNMENT_MAX, 0 };
|
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
|
||||||
for (int i = 0; i < 28 && ok; i++) {
|
for (int i = 0; i < 28 && ok; i++) {
|
||||||
int align = (1 << i);
|
int align = (1 << i);
|
||||||
for (int j = 0; j < 8 && ok; j++) {
|
for (int j = 0; j < 8 && ok; j++) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue