mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 23:39:31 +03:00
Merge branch 'dev3' into dev3-bin
This commit is contained in:
commit
e81ddcb786
12 changed files with 51 additions and 34 deletions
Binary file not shown.
BIN
bin/minject.exe
BIN
bin/minject.exe
Binary file not shown.
Binary file not shown.
|
@ -148,7 +148,6 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
|
|||
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
|
||||
|
||||
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
|
||||
mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
|
||||
mi_decl_export int mi_version(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
|
||||
|
@ -291,17 +290,26 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
|
|||
#endif
|
||||
|
||||
|
||||
// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
|
||||
// Used for example for separate interpreter's in one process.
|
||||
// Experimental: allow sub-processes whose memory areas stay separated (and no reclamation between them)
|
||||
// Used for example for separate interpreters in one process.
|
||||
typedef void* mi_subproc_id_t;
|
||||
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
|
||||
mi_decl_export mi_subproc_id_t mi_subproc_new(void);
|
||||
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
|
||||
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
|
||||
|
||||
// Experimental: visit abandoned heap areas (from threads that have been terminated)
|
||||
// Experimental: visit abandoned heap areas (that are not owned by a specific heap)
|
||||
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental: objects followed by a guard page.
|
||||
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
|
||||
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
|
||||
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
|
||||
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
|
||||
|
||||
// Experimental: communicate that the thread is part of a threadpool
|
||||
mi_decl_export void mi_thread_set_in_threadpool(void) mi_attr_noexcept;
|
||||
|
||||
// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
|
||||
// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
|
||||
// fall back to `mi_heap_delete`.
|
||||
|
@ -309,12 +317,8 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool al
|
|||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
|
||||
|
||||
// Experimental: objects followed by a guard page.
|
||||
// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
|
||||
// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
|
||||
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
|
||||
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
|
||||
|
||||
|
||||
// experimental
|
||||
|
|
|
@ -127,6 +127,7 @@ mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
|
|||
size_t _mi_thread_seq_id(void) mi_attr_noexcept;
|
||||
mi_tld_t* _mi_thread_tld(void) mi_attr_noexcept;
|
||||
void _mi_heap_guarded_init(mi_heap_t* heap);
|
||||
mi_heap_t* _mi_heap_main_get(void);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
|
|
|
@ -22,14 +22,14 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
// OS memory configuration
|
||||
typedef struct mi_os_mem_config_s {
|
||||
size_t page_size; // default to 4KiB
|
||||
size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows)
|
||||
size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB)
|
||||
size_t physical_memory; // physical memory size
|
||||
size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
size_t page_size; // default to 4KiB
|
||||
size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows)
|
||||
size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB)
|
||||
size_t physical_memory_in_kib; // physical memory size in KiB
|
||||
size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
} mi_os_mem_config_t;
|
||||
|
||||
// Initialize
|
||||
|
@ -125,7 +125,7 @@ bool _mi_prim_thread_is_in_threadpool(void);
|
|||
//-------------------------------------------------------------------
|
||||
// Access to TLS (thread local storage) slots.
|
||||
// We need fast access to both a unique thread id (in `free.c:mi_free`) and
|
||||
// to a thread-local heap pointer (in `alloc.c:mi_malloc`).
|
||||
// to a thread-local heap pointer (in `alloc.c:mi_malloc`).
|
||||
// To achieve this we use specialized code for various platforms.
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define MI_META_PAGE_SIZE MI_ARENA_SLICE_SIZE
|
||||
#define MI_META_PAGE_ALIGN MI_ARENA_SLICE_ALIGN
|
||||
|
||||
#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE > 4k (even on 32-bit)
|
||||
#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE >= 4k (even on 32-bit)
|
||||
#define MI_META_BLOCK_ALIGN MI_META_BLOCK_SIZE
|
||||
#define MI_META_BLOCKS_PER_PAGE (MI_ARENA_SLICE_SIZE / MI_META_BLOCK_SIZE) // 1024
|
||||
#define MI_META_BLOCKS_PER_PAGE (MI_META_PAGE_SIZE / MI_META_BLOCK_SIZE) // 512
|
||||
#define MI_META_MAX_SIZE (MI_BCHUNK_SIZE * MI_META_BLOCK_SIZE)
|
||||
|
||||
typedef struct mi_meta_page_s {
|
||||
|
@ -150,7 +150,7 @@ mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) {
|
|||
const size_t block_idx = memid.mem.meta.block_index;
|
||||
mi_meta_page_t* mpage = (mi_meta_page_t*)memid.mem.meta.meta_page;
|
||||
mi_assert_internal(mi_meta_page_of_ptr(p,NULL) == mpage);
|
||||
mi_assert_internal(block_idx + block_count < MI_META_BLOCKS_PER_PAGE);
|
||||
mi_assert_internal(block_idx + block_count <= MI_META_BLOCKS_PER_PAGE);
|
||||
mi_assert_internal(mi_bbitmap_is_clearN(&mpage->blocks_free, block_idx, block_count));
|
||||
// we zero on free (and on the initial page allocation) so we don't need a "dirty" map
|
||||
_mi_memzero_aligned(mi_meta_block_start(mpage, block_idx), block_count*MI_META_BLOCK_SIZE);
|
||||
|
|
15
src/init.c
15
src/init.c
|
@ -138,7 +138,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
|||
MI_MEMID_STATIC
|
||||
};
|
||||
|
||||
extern mi_heap_t heap_main;
|
||||
extern mi_decl_hidden mi_decl_cache_align mi_heap_t heap_main;
|
||||
|
||||
static mi_decl_cache_align mi_tld_t tld_main = {
|
||||
0, // thread_id
|
||||
|
@ -266,7 +266,7 @@ static void mi_heap_main_init(void) {
|
|||
}
|
||||
}
|
||||
|
||||
mi_heap_t* heap_main_get(void) {
|
||||
mi_heap_t* _mi_heap_main_get(void) {
|
||||
mi_heap_main_init();
|
||||
return &heap_main;
|
||||
}
|
||||
|
@ -602,6 +602,12 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
|
|||
_mi_prim_thread_associate_default_heap(heap);
|
||||
}
|
||||
|
||||
void mi_thread_set_in_threadpool(void) mi_attr_noexcept {
|
||||
mi_tld_t* tld = mi_tld();
|
||||
if (tld!=NULL) {
|
||||
tld->is_in_threadpool = true;
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Run functions on process init/done, and thread init/done
|
||||
|
@ -613,6 +619,11 @@ bool mi_decl_noinline _mi_preloading(void) {
|
|||
return os_preloading;
|
||||
}
|
||||
|
||||
// Returns true if mimalloc was redirected
|
||||
mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
|
||||
return _mi_is_redirected();
|
||||
}
|
||||
|
||||
// Called once by the process loader from `src/prim/prim.c`
|
||||
void _mi_process_load(void) {
|
||||
mi_heap_main_init();
|
||||
|
|
10
src/os.c
10
src/os.c
|
@ -15,11 +15,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
/* -----------------------------------------------------------
|
||||
Initialization.
|
||||
----------------------------------------------------------- */
|
||||
#ifndef MI_DEFAULT_PHYSICAL_MEMORY
|
||||
#ifndef MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB
|
||||
#if MI_INTPTR_SIZE < 8
|
||||
#define MI_DEFAULT_PHYSICAL_MEMORY 4*MI_GiB
|
||||
#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 4*MI_MiB // 4 GiB
|
||||
#else
|
||||
#define MI_DEFAULT_PHYSICAL_MEMORY 32*MI_GiB
|
||||
#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 32*MI_MiB // 32 GiB
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -27,8 +27,8 @@ static mi_os_mem_config_t mi_os_mem_config = {
|
|||
4096, // page size
|
||||
0, // large page size (usually 2MiB)
|
||||
4096, // allocation granularity
|
||||
MI_DEFAULT_PHYSICAL_MEMORY,
|
||||
MI_MAX_VABITS, // in `bits.h`
|
||||
MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB,
|
||||
MI_MAX_VABITS, // in `bits.h`
|
||||
true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
|
||||
false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
|
||||
true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
|
|
|
@ -40,7 +40,7 @@ bool _mi_page_map_init(void) {
|
|||
}
|
||||
|
||||
// Allocate the page map and commit bits
|
||||
mi_page_map_max_address = (void*)(MI_PU(1) << vbits);
|
||||
mi_page_map_max_address = (void*)(vbits >= MI_SIZE_BITS ? (SIZE_MAX - MI_ARENA_SLICE_SIZE + 1) : (MI_PU(1) << vbits));
|
||||
const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT));
|
||||
const bool commit = (page_map_size <= 1*MI_MiB || mi_option_is_enabled(mi_option_pagemap_commit)); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
|
||||
const size_t commit_bits = _mi_divide_up(page_map_size, MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT);
|
||||
|
@ -183,7 +183,7 @@ bool _mi_page_map_init(void) {
|
|||
|
||||
// Allocate the page map and commit bits
|
||||
mi_assert(MI_MAX_VABITS >= vbits);
|
||||
mi_page_map_max_address = (void*)(MI_PU(1) << vbits);
|
||||
mi_page_map_max_address = (void*)(vbits >= MI_SIZE_BITS ? (SIZE_MAX - MI_ARENA_SLICE_SIZE + 1) : (MI_PU(1) << vbits));
|
||||
const size_t page_map_count = (MI_ZU(1) << (vbits - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT));
|
||||
mi_assert(page_map_count <= MI_PAGE_MAP_COUNT);
|
||||
const size_t os_page_size = _mi_os_page_size();
|
||||
|
|
|
@ -143,8 +143,9 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
|
|||
config->alloc_granularity = (size_t)psize;
|
||||
#if defined(_SC_PHYS_PAGES)
|
||||
long pphys = sysconf(_SC_PHYS_PAGES);
|
||||
if (pphys > 0 && (size_t)pphys < (SIZE_MAX/(size_t)psize)) {
|
||||
config->physical_memory = (size_t)pphys * (size_t)psize;
|
||||
const size_t psize_in_kib = (size_t)psize / MI_KiB;
|
||||
if (psize_in_kib > 0 && pphys > 0 && (size_t)pphys <= (SIZE_MAX/psize_in_kib)) {
|
||||
config->physical_memory_in_kib = (size_t)pphys * psize_in_kib;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -173,8 +173,8 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
|
|||
if (pGetPhysicallyInstalledSystemMemory != NULL) {
|
||||
ULONGLONG memInKiB = 0;
|
||||
if ((*pGetPhysicallyInstalledSystemMemory)(&memInKiB)) {
|
||||
if (memInKiB > 0 && memInKiB < (SIZE_MAX / MI_KiB)) {
|
||||
config->physical_memory = (size_t)memInKiB * MI_KiB;
|
||||
if (memInKiB > 0 && memInKiB <= SIZE_MAX) {
|
||||
config->physical_memory_in_kib = (size_t)memInKiB;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue