fix lock recursion

This commit is contained in:
daanx 2024-12-20 22:13:58 -08:00
parent a5b7d7f264
commit daac75af36
4 changed files with 62 additions and 35 deletions

View file

@ -279,8 +279,8 @@
</ClCompile> </ClCompile>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ProjectReference Include="mimalloc.vcxproj"> <ProjectReference Include="mimalloc-override.vcxproj">
<Project>{abb5eae7-b3e6-432e-b636-333449892ea6}</Project> <Project>{abb5eae7-b3e6-432e-b636-333449892ea7}</Project>
</ProjectReference> </ProjectReference>
</ItemGroup> </ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

View file

@ -408,9 +408,8 @@ static inline void mi_atomic_yield(void) {
// ---------------------------------------------------------------------- // ----------------------------------------------------------------------
// Locks // Locks
// These do not have to be recursive and should be light-weight // These should be light-weight in-process only locks.
// in-process only locks. Only used for reserving arena's and to // Only used for reserving arena's and to maintain the abandoned list.
// maintain the abandoned list.
// ---------------------------------------------------------------------- // ----------------------------------------------------------------------
#if _MSC_VER #if _MSC_VER
#pragma warning(disable:26110) // unlock with holding lock #pragma warning(disable:26110) // unlock with holding lock
@ -418,6 +417,26 @@ static inline void mi_atomic_yield(void) {
#if defined(_WIN32) #if defined(_WIN32)
#define mi_lock_t CRITICAL_SECTION
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
static inline bool mi_lock_acquire(mi_lock_t* lock) {
EnterCriticalSection(lock);
return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
LeaveCriticalSection(lock);
}
static inline void mi_lock_init(mi_lock_t* lock) {
InitializeCriticalSection(lock);
}
static inline void mi_lock_done(mi_lock_t* lock) {
DeleteCriticalSection(lock);
}
#if 0
#define mi_lock_t SRWLOCK // slim reader-writer lock #define mi_lock_t SRWLOCK // slim reader-writer lock
static inline bool mi_lock_try_acquire(mi_lock_t* lock) { static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
@ -436,7 +455,7 @@ static inline void mi_lock_init(mi_lock_t* lock) {
static inline void mi_lock_done(mi_lock_t* lock) { static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock); (void)(lock);
} }
#endif
#elif defined(MI_USE_PTHREADS) #elif defined(MI_USE_PTHREADS)

View file

@ -275,6 +275,8 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
} }
static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id);
// try to reserve a fresh arena space // try to reserve a fresh arena space
static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t* arena_id) static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t* arena_id)
{ {
@ -325,7 +327,7 @@ static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_
const bool adjust = (overcommit && arena_commit); const bool adjust = (overcommit && arena_commit);
if (adjust) { _mi_stat_adjust_decrease(&_mi_stats_main.committed, arena_reserve, true /* on alloc */); } if (adjust) { _mi_stat_adjust_decrease(&_mi_stats_main.committed, arena_reserve, true /* on alloc */); }
// and try to reserve the arena // and try to reserve the arena
int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id); int err = mi_reserve_os_memory_ex2(subproc, arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
if (err != 0) { if (err != 0) {
if (adjust) { _mi_stat_adjust_increase(&_mi_stats_main.committed, arena_reserve, true); } // roll back if (adjust) { _mi_stat_adjust_increase(&_mi_stats_main.committed, arena_reserve, true); } // roll back
// failed, try a smaller size? // failed, try a smaller size?
@ -1162,14 +1164,14 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
} }
// Reserve a range of regular OS memory // Reserve a range of regular OS memory
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) {
if (arena_id != NULL) *arena_id = _mi_arena_id_none(); if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); // at least one slice size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); // at least one slice
mi_memid_t memid; mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid); void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM; if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field? const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(_mi_subproc(), start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) { if (!mi_manage_os_memory_ex2(subproc, start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
_mi_os_free_ex(start, size, commit, memid); _mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM; return ENOMEM;
@ -1180,6 +1182,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
return 0; return 0;
} }
// Reserve a range of regular OS memory
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
return mi_reserve_os_memory_ex2(_mi_subproc(), size, commit, allow_large, exclusive, arena_id);
}
// Manage a range of regular OS memory // Manage a range of regular OS memory
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept { bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL); return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
@ -1289,7 +1296,7 @@ void mi_debug_show_arenas(bool show_pages, bool show_inuse, bool show_committed)
if (arena == NULL) break; if (arena == NULL) break;
mi_assert(arena->subproc == subproc); mi_assert(arena->subproc == subproc);
slice_total += arena->slice_count; slice_total += arena->slice_count;
_mi_output_message("arena %zu at %p: %zu slices (%zu MiB)%s, subproc: %p\n", i, arena, arena->slice_count, mi_size_of_slices(arena->slice_count)/MI_MiB, (arena->memid.is_pinned ? ", pinned" : "", arena->subproc)); _mi_output_message("arena %zu at %p: %zu slices (%zu MiB)%s, subproc: %p\n", i, arena, arena->slice_count, mi_size_of_slices(arena->slice_count)/MI_MiB, (arena->memid.is_pinned ? ", pinned" : ""), arena->subproc);
if (show_inuse) { if (show_inuse) {
free_total += mi_debug_show_bitmap("in-use slices", arena->slice_count, arena->slices_free, true, NULL); free_total += mi_debug_show_bitmap("in-use slices", arena->slice_count, arena->slices_free, true, NULL);
} }

View file

@ -11,30 +11,31 @@ terms of the MIT license. A copy of the license can be found in the file
#include <string.h> // memcpy, memset #include <string.h> // memcpy, memset
#include <stdlib.h> // atexit #include <stdlib.h> // atexit
#define MI_MEMID_STATIC {{{NULL,0}}, MI_MEM_STATIC, true /* pinned */, true /* committed */, false /* zero */ } #define MI_MEMID_INIT(kind) {{{NULL,0}}, kind, true /* pinned */, true /* committed */, false /* zero */ }
#define MI_MEMID_STATIC MI_MEMID_INIT(MI_MEM_STATIC)
// Empty page used to initialize the small free pages array // Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = { const mi_page_t _mi_page_empty = {
MI_ATOMIC_VAR_INIT(0), // xthread_id MI_ATOMIC_VAR_INIT(0), // xthread_id
NULL, // free NULL, // free
0, // used 0, // used
0, // capacity 0, // capacity
0, // reserved capacity 0, // reserved capacity
0, // block size shift 0, // block size shift
0, // retire_expire 0, // retire_expire
NULL, // local_free NULL, // local_free
MI_ATOMIC_VAR_INIT(0), // xthread_free MI_ATOMIC_VAR_INIT(0), // xthread_free
MI_ATOMIC_VAR_INIT(0), // xflags MI_ATOMIC_VAR_INIT(0), // xflags
0, // block_size 0, // block_size
NULL, // page_start NULL, // page_start
0, // heap tag 0, // heap tag
false, // is_zero false, // is_zero
#if (MI_PADDING || MI_ENCODE_FREELIST) #if (MI_PADDING || MI_ENCODE_FREELIST)
{ 0, 0 }, { 0, 0 }, // keys
#endif #endif
NULL, // xheap NULL, // xheap
NULL, NULL, // next, prev NULL, NULL, // next, prev
MI_MEMID_STATIC // memid MI_MEMID_STATIC // memid
}; };
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) #define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
@ -100,7 +101,7 @@ static mi_decl_cache_align mi_subproc_t subproc_main;
static mi_decl_cache_align mi_tld_t tld_empty = { static mi_decl_cache_align mi_tld_t tld_empty = {
0, // thread_id 0, // thread_id
0, // thread_seq 0, // thread_seq
&subproc_main, // subproc &subproc_main, // subproc
NULL, // heap_backing NULL, // heap_backing
NULL, // heaps list NULL, // heaps list
0, // heartbeat 0, // heartbeat
@ -111,7 +112,7 @@ static mi_decl_cache_align mi_tld_t tld_empty = {
}; };
mi_decl_cache_align const mi_heap_t _mi_heap_empty = { mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
&tld_empty, // tld &tld_empty, // tld
NULL, // exclusive_arena NULL, // exclusive_arena
0, // cookie 0, // cookie
{ 0, 0 }, // keys { 0, 0 }, // keys
@ -136,9 +137,9 @@ extern mi_heap_t heap_main;
static mi_decl_cache_align mi_tld_t tld_main = { static mi_decl_cache_align mi_tld_t tld_main = {
0, // thread_id 0, // thread_id
0, // thread_seq 0, // thread_seq
&subproc_main, // subproc &subproc_main, // subproc
&heap_main, // heap_backing &heap_main, // heap_backing
&heap_main, // heaps list &heap_main, // heaps list
0, // heartbeat 0, // heartbeat
false, // recurse false, // recurse
false, // is_in_threadpool false, // is_in_threadpool
@ -147,7 +148,7 @@ static mi_decl_cache_align mi_tld_t tld_main = {
}; };
mi_decl_cache_align mi_heap_t heap_main = { mi_decl_cache_align mi_heap_t heap_main = {
&tld_main, // thread local data &tld_main, // thread local data
0, // initial cookie 0, // initial cookie
0, // arena id 0, // arena id
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)