mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-07 03:48:42 +03:00
merge from dev
This commit is contained in:
commit
56c0a8025a
9 changed files with 78 additions and 68 deletions
|
@ -245,11 +245,13 @@ extern "C" {
|
|||
int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
|
||||
|
||||
// `aligned_alloc` is only available when __USE_ISOC11 is defined.
|
||||
// Note: it seems __USE_ISOC11 is not defined in musl (and perhaps other libc's) so we only check
|
||||
// for it if using glibc.
|
||||
// Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot
|
||||
// override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9.
|
||||
// Fortunately, in the case where `aligned_alloc` is declared as `static inline` it
|
||||
// uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves.
|
||||
#if __USE_ISOC11
|
||||
#if !defined(__GLIBC__) || __USE_ISOC11
|
||||
void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
|
||||
#endif
|
||||
#endif
|
||||
|
|
22
src/arena.c
22
src/arena.c
|
@ -237,33 +237,33 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
}
|
||||
|
||||
// set the dirty bits (todo: no need for an atomic op here?)
|
||||
if (arena->memid.was_zero && arena->blocks_dirty != NULL) {
|
||||
memid->was_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
|
||||
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
}
|
||||
|
||||
// set commit state
|
||||
if (arena->blocks_committed == NULL) {
|
||||
// always committed
|
||||
memid->was_committed = true;
|
||||
memid->initially_committed = true;
|
||||
}
|
||||
else if (commit) {
|
||||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||
memid->was_committed = true;
|
||||
memid->initially_committed = true;
|
||||
bool any_uncommitted;
|
||||
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
|
||||
if (any_uncommitted) {
|
||||
bool commit_zero = false;
|
||||
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
|
||||
memid->was_committed = false;
|
||||
memid->initially_committed = false;
|
||||
}
|
||||
else {
|
||||
if (commit_zero) { memid->was_zero = true; }
|
||||
if (commit_zero) { memid->initially_zero = true; }
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// no need to commit, but check if already fully committed
|
||||
memid->was_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
|
||||
return p;
|
||||
|
@ -752,7 +752,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
|
|||
if (size < MI_ARENA_BLOCK_SIZE) return false;
|
||||
|
||||
if (is_large) {
|
||||
mi_assert_internal(memid.was_committed && memid.is_pinned);
|
||||
mi_assert_internal(memid.initially_committed && memid.is_pinned);
|
||||
}
|
||||
|
||||
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
|
||||
|
@ -781,7 +781,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
|
|||
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
|
||||
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
|
||||
// initialize committed bitmap?
|
||||
if (arena->blocks_committed != NULL && arena->memid.was_committed) {
|
||||
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
|
||||
memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
|
||||
}
|
||||
|
||||
|
@ -799,8 +799,8 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
|
|||
|
||||
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
|
||||
memid.was_committed = is_committed;
|
||||
memid.was_zero = is_zero;
|
||||
memid.initially_committed = is_committed;
|
||||
memid.initially_zero = is_zero;
|
||||
memid.is_pinned = is_large;
|
||||
return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
|
||||
}
|
||||
|
|
|
@ -244,7 +244,7 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
|||
}
|
||||
if (td != NULL) {
|
||||
td->memid = memid;
|
||||
is_zero = memid.was_zero;
|
||||
is_zero = memid.initially_zero;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -387,9 +387,11 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
|||
}
|
||||
|
||||
int _mi_prim_reset(void* start, size_t size) {
|
||||
// We always use MADV_DONTNEED if possible even if it may be a bit more expensive as MADV_FREE
|
||||
// as this guarantees that we see the actual rss reflected in tools like `top`.
|
||||
#if 0 && defined(MADV_FREE)
|
||||
// We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
|
||||
// will not reduce the `rss` stats in tools like `top` even though the memory is available
|
||||
// to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
|
||||
// default `MADV_DONTNEED` is used though.
|
||||
#if defined(MADV_FREE)
|
||||
static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
|
||||
int oadvice = (int)mi_atomic_load_relaxed(&advice);
|
||||
int err;
|
||||
|
|
|
@ -285,9 +285,9 @@ int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) {
|
|||
int _mi_prim_reset(void* addr, size_t size) {
|
||||
void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
mi_assert_internal(p == addr);
|
||||
#if 1
|
||||
#if 0
|
||||
if (p != NULL) {
|
||||
VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory from the working set
|
||||
VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory directly from the working set
|
||||
}
|
||||
#endif
|
||||
return (p != NULL ? 0 : (int)GetLastError());
|
||||
|
|
|
@ -824,7 +824,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
|
|||
|
||||
// ensure metadata part of the segment is committed
|
||||
mi_commit_mask_t commit_mask;
|
||||
if (memid.was_committed) {
|
||||
if (memid.initially_committed) {
|
||||
mi_commit_mask_create_full(&commit_mask);
|
||||
}
|
||||
else {
|
||||
|
@ -878,7 +878,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
|
|||
if (segment == NULL) return NULL;
|
||||
|
||||
// zero the segment info? -- not always needed as it may be zero initialized from the OS
|
||||
if (!segment->memid.was_zero) {
|
||||
if (!segment->memid.initially_zero) {
|
||||
ptrdiff_t ofs = offsetof(mi_segment_t, next);
|
||||
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
|
||||
size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue