diff --git a/ide/vs2022/mimalloc-test-stress.vcxproj b/ide/vs2022/mimalloc-test-stress.vcxproj
index fd88cd8e..672cbb87 100644
--- a/ide/vs2022/mimalloc-test-stress.vcxproj
+++ b/ide/vs2022/mimalloc-test-stress.vcxproj
@@ -279,8 +279,8 @@
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
+
+ {abb5eae7-b3e6-432e-b636-333449892ea7}
diff --git a/include/mimalloc/atomic.h b/include/mimalloc/atomic.h
index ddb5a9a3..ab1e161d 100644
--- a/include/mimalloc/atomic.h
+++ b/include/mimalloc/atomic.h
@@ -408,9 +408,8 @@ static inline void mi_atomic_yield(void) {
// ----------------------------------------------------------------------
// Locks
-// These do not have to be recursive and should be light-weight
-// in-process only locks. Only used for reserving arena's and to
-// maintain the abandoned list.
+// These should be light-weight in-process only locks.
+// Only used for reserving arena's and to maintain the abandoned list.
// ----------------------------------------------------------------------
#if _MSC_VER
#pragma warning(disable:26110) // unlock with holding lock
@@ -418,6 +417,26 @@ static inline void mi_atomic_yield(void) {
#if defined(_WIN32)
+#define mi_lock_t CRITICAL_SECTION
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return TryEnterCriticalSection(lock);
+}
+static inline bool mi_lock_acquire(mi_lock_t* lock) {
+ EnterCriticalSection(lock);
+ return true;
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ LeaveCriticalSection(lock);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ InitializeCriticalSection(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ DeleteCriticalSection(lock);
+}
+
+#if 0
#define mi_lock_t SRWLOCK // slim reader-writer lock
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
@@ -436,7 +455,7 @@ static inline void mi_lock_init(mi_lock_t* lock) {
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
-
+#endif
#elif defined(MI_USE_PTHREADS)
diff --git a/src/arena.c b/src/arena.c
index bb846da9..fd914f43 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -275,6 +275,8 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
}
+static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id);
+
// try to reserve a fresh arena space
static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t* arena_id)
{
@@ -325,7 +327,7 @@ static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_
const bool adjust = (overcommit && arena_commit);
if (adjust) { _mi_stat_adjust_decrease(&_mi_stats_main.committed, arena_reserve, true /* on alloc */); }
// and try to reserve the arena
- int err = mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
+ int err = mi_reserve_os_memory_ex2(subproc, arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id);
if (err != 0) {
if (adjust) { _mi_stat_adjust_increase(&_mi_stats_main.committed, arena_reserve, true); } // roll back
// failed, try a smaller size?
@@ -1162,14 +1164,14 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
}
// Reserve a range of regular OS memory
-int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) {
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); // at least one slice
mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
- if (!mi_manage_os_memory_ex2(_mi_subproc(), start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
+ if (!mi_manage_os_memory_ex2(subproc, start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
_mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
@@ -1180,6 +1182,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
return 0;
}
+// Reserve a range of regular OS memory
+int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ return mi_reserve_os_memory_ex2(_mi_subproc(), size, commit, allow_large, exclusive, arena_id);
+}
+
// Manage a range of regular OS memory
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
@@ -1289,7 +1296,7 @@ void mi_debug_show_arenas(bool show_pages, bool show_inuse, bool show_committed)
if (arena == NULL) break;
mi_assert(arena->subproc == subproc);
slice_total += arena->slice_count;
- _mi_output_message("arena %zu at %p: %zu slices (%zu MiB)%s, subproc: %p\n", i, arena, arena->slice_count, mi_size_of_slices(arena->slice_count)/MI_MiB, (arena->memid.is_pinned ? ", pinned" : "", arena->subproc));
+ _mi_output_message("arena %zu at %p: %zu slices (%zu MiB)%s, subproc: %p\n", i, arena, arena->slice_count, mi_size_of_slices(arena->slice_count)/MI_MiB, (arena->memid.is_pinned ? ", pinned" : ""), arena->subproc);
if (show_inuse) {
free_total += mi_debug_show_bitmap("in-use slices", arena->slice_count, arena->slices_free, true, NULL);
}
diff --git a/src/init.c b/src/init.c
index a15a9c6c..177ca2bd 100644
--- a/src/init.c
+++ b/src/init.c
@@ -11,30 +11,31 @@ terms of the MIT license. A copy of the license can be found in the file
#include // memcpy, memset
#include // atexit
-#define MI_MEMID_STATIC {{{NULL,0}}, MI_MEM_STATIC, true /* pinned */, true /* committed */, false /* zero */ }
+#define MI_MEMID_INIT(kind) {{{NULL,0}}, kind, true /* pinned */, true /* committed */, false /* zero */ }
+#define MI_MEMID_STATIC MI_MEMID_INIT(MI_MEM_STATIC)
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
- MI_ATOMIC_VAR_INIT(0), // xthread_id
- NULL, // free
- 0, // used
- 0, // capacity
- 0, // reserved capacity
- 0, // block size shift
- 0, // retire_expire
- NULL, // local_free
- MI_ATOMIC_VAR_INIT(0), // xthread_free
- MI_ATOMIC_VAR_INIT(0), // xflags
- 0, // block_size
- NULL, // page_start
- 0, // heap tag
- false, // is_zero
+ MI_ATOMIC_VAR_INIT(0), // xthread_id
+ NULL, // free
+ 0, // used
+ 0, // capacity
+ 0, // reserved capacity
+ 0, // block size shift
+ 0, // retire_expire
+ NULL, // local_free
+ MI_ATOMIC_VAR_INIT(0), // xthread_free
+ MI_ATOMIC_VAR_INIT(0), // xflags
+ 0, // block_size
+ NULL, // page_start
+ 0, // heap tag
+ false, // is_zero
#if (MI_PADDING || MI_ENCODE_FREELIST)
- { 0, 0 },
+ { 0, 0 }, // keys
#endif
- NULL, // xheap
- NULL, NULL, // next, prev
- MI_MEMID_STATIC // memid
+ NULL, // xheap
+ NULL, NULL, // next, prev
+ MI_MEMID_STATIC // memid
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
@@ -100,7 +101,7 @@ static mi_decl_cache_align mi_subproc_t subproc_main;
static mi_decl_cache_align mi_tld_t tld_empty = {
0, // thread_id
0, // thread_seq
- &subproc_main, // subproc
+ &subproc_main, // subproc
NULL, // heap_backing
NULL, // heaps list
0, // heartbeat
@@ -111,7 +112,7 @@ static mi_decl_cache_align mi_tld_t tld_empty = {
};
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
- &tld_empty, // tld
+ &tld_empty, // tld
NULL, // exclusive_arena
0, // cookie
{ 0, 0 }, // keys
@@ -136,9 +137,9 @@ extern mi_heap_t heap_main;
static mi_decl_cache_align mi_tld_t tld_main = {
0, // thread_id
0, // thread_seq
- &subproc_main, // subproc
- &heap_main, // heap_backing
- &heap_main, // heaps list
+ &subproc_main, // subproc
+ &heap_main, // heap_backing
+ &heap_main, // heaps list
0, // heartbeat
false, // recurse
false, // is_in_threadpool
@@ -147,7 +148,7 @@ static mi_decl_cache_align mi_tld_t tld_main = {
};
mi_decl_cache_align mi_heap_t heap_main = {
- &tld_main, // thread local data
+ &tld_main, // thread local data
0, // initial cookie
0, // arena id
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)