mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge from dev-slice
This commit is contained in:
commit
0fa99d41fc
27 changed files with 628 additions and 268 deletions
|
@ -91,6 +91,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
|
|||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
|
||||
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
|
||||
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
|
||||
void _mi_heap_guarded_init(mi_heap_t* heap);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
|
@ -641,16 +642,40 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
|||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
#if MI_DEBUG_GUARDED
|
||||
static inline bool mi_page_has_guarded(const mi_page_t* page) {
|
||||
return page->flags.x.has_guarded;
|
||||
/* -------------------------------------------------------------------
|
||||
Guarded objects
|
||||
------------------------------------------------------------------- */
|
||||
#if MI_GUARDED
|
||||
static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) {
|
||||
const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block;
|
||||
return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED);
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_guarded(mi_page_t* page, bool has_guarded) {
|
||||
page->flags.x.has_guarded = has_guarded;
|
||||
static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
|
||||
// this code is written to result in fast assembly as it is on the hot path for allocation
|
||||
const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time..
|
||||
if mi_likely(count != 0) {
|
||||
// no sample
|
||||
heap->guarded_sample_count = count;
|
||||
return false;
|
||||
}
|
||||
else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) {
|
||||
// use guarded allocation
|
||||
heap->guarded_sample_count = heap->guarded_sample_rate; // reset
|
||||
return (heap->guarded_sample_rate != 0);
|
||||
}
|
||||
else {
|
||||
// failed size criteria, rewind count (but don't write to an empty heap)
|
||||
if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ typedef struct mi_os_mem_config_s {
|
|||
size_t page_size; // default to 4KiB
|
||||
size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows)
|
||||
size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB)
|
||||
size_t physical_memory; // physical memory size
|
||||
size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
|
@ -41,9 +43,10 @@ int _mi_prim_free(void* addr, size_t size );
|
|||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// which will later be committed explicitly using `_mi_prim_commit`.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// The `hint_addr` address is either `NULL` or a preferred allocation address but can be ignored.
|
||||
// pre: !commit => !allow_large
|
||||
// try_alignment >= _mi_os_page_size() and a power of 2
|
||||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
|
||||
int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
|
||||
|
||||
// Commit memory. Returns error code or 0 on success.
|
||||
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
|
||||
|
|
|
@ -75,8 +75,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
|
||||
// Padding should be disabled when using guard pages
|
||||
// #define MI_DEBUG_GUARDED 1
|
||||
#if defined(MI_DEBUG_GUARDED)
|
||||
// #define MI_GUARDED 1
|
||||
#if defined(MI_GUARDED)
|
||||
#define MI_PADDING 0
|
||||
#endif
|
||||
|
||||
|
@ -244,6 +244,13 @@ typedef struct mi_block_s {
|
|||
mi_encoded_t next;
|
||||
} mi_block_t;
|
||||
|
||||
#if MI_GUARDED
|
||||
// we always align guarded pointers in a block at an offset
|
||||
// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones
|
||||
#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0))
|
||||
#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED)
|
||||
#endif
|
||||
|
||||
|
||||
// The delayed flags are used for efficient multi-threaded free-ing
|
||||
typedef enum mi_delayed_e {
|
||||
|
@ -262,7 +269,6 @@ typedef union mi_page_flags_s {
|
|||
struct {
|
||||
uint8_t in_full : 1;
|
||||
uint8_t has_aligned : 1;
|
||||
uint8_t has_guarded : 1; // only used with MI_DEBUG_GUARDED
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#else
|
||||
|
@ -272,7 +278,6 @@ typedef union mi_page_flags_s {
|
|||
struct {
|
||||
uint8_t in_full;
|
||||
uint8_t has_aligned;
|
||||
uint8_t has_guarded; // only used with MI_DEBUG_GUARDED
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#endif
|
||||
|
@ -556,6 +561,13 @@ struct mi_heap_s {
|
|||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
|
||||
#if MI_GUARDED
|
||||
size_t guarded_size_min; // minimal size for guarded objects
|
||||
size_t guarded_size_max; // maximal size for guarded objects
|
||||
size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages)
|
||||
size_t guarded_sample_seed; // starting sample count
|
||||
size_t guarded_sample_count; // current sample count (counting down to 0)
|
||||
#endif
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
};
|
||||
|
@ -649,6 +661,7 @@ typedef struct mi_stats_s {
|
|||
mi_stat_counter_t arena_count;
|
||||
mi_stat_counter_t arena_crossover_count;
|
||||
mi_stat_counter_t arena_rollback_count;
|
||||
mi_stat_counter_t guarded_alloc_count;
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue