mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-08-24 00:04:48 +03:00
Compare commits
10 commits
a067bd42ae
...
6cfc8ad9d0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6cfc8ad9d0 | ||
![]() |
d8b7456a1f | ||
![]() |
d7d6c3b5c3 | ||
![]() |
e19f08aefe | ||
![]() |
57830a4b25 | ||
![]() |
eeab42be46 | ||
![]() |
2f0540c4f9 | ||
![]() |
444902a4c8 | ||
![]() |
d7431402c5 | ||
![]() |
2696627aaf |
5 changed files with 62 additions and 44 deletions
|
@ -59,7 +59,7 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
|
|||
// pre: needs_recommit != NULL
|
||||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
||||
|
||||
// Reset memory. The range keeps being accessible but the content might be reset.
|
||||
// Reset memory. The range keeps being accessible but the content might be reset to zero at any moment.
|
||||
// Returns error code or 0 on success.
|
||||
int _mi_prim_reset(void* addr, size_t size);
|
||||
|
||||
|
|
11
src/arena.c
11
src/arena.c
|
@ -200,9 +200,11 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
// now actually commit
|
||||
bool commit_zero = false;
|
||||
if (!mi_arena_commit(arena, p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed_count))) {
|
||||
memid->initially_committed = false;
|
||||
// if the commit fails, we roll back
|
||||
_mi_arenas_free( p, mi_size_of_slices(slice_count), *memid); // this will uncommit as well
|
||||
return NULL;
|
||||
}
|
||||
else {
|
||||
|
||||
// committed
|
||||
if (commit_zero) { memid->initially_zero = true; }
|
||||
#if MI_DEBUG > 1
|
||||
|
@ -214,7 +216,6 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else {
|
||||
// already fully committed.
|
||||
_mi_os_reuse(p, mi_size_of_slices(slice_count));
|
||||
|
@ -233,8 +234,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
}
|
||||
}
|
||||
else {
|
||||
// no need to commit, but check if already fully committed
|
||||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||
// no need to commit, but check if it is already fully committed
|
||||
memid->initially_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count);
|
||||
if (!memid->initially_committed) {
|
||||
// partly committed.. adjust stats
|
||||
|
@ -247,6 +247,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(
|
|||
|
||||
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
|
||||
if (commit) { mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); }
|
||||
if (commit) { mi_assert_internal(memid->initially_committed); }
|
||||
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
|
||||
|
||||
return p;
|
||||
|
|
41
src/os.c
41
src/os.c
|
@ -188,6 +188,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
|||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
size_t csize = memid.mem.os.size;
|
||||
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
|
||||
mi_assert_internal(csize >= size);
|
||||
size_t commit_size = (still_committed ? csize : 0);
|
||||
void* base = addr;
|
||||
// different base? (due to alignment)
|
||||
|
@ -351,9 +352,11 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
|
|||
bool os_is_large = false;
|
||||
bool os_is_zero = false;
|
||||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
|
||||
if (p != NULL) {
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
*memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large);
|
||||
}
|
||||
mi_assert_internal(memid->mem.os.size >= size);
|
||||
mi_assert_internal(memid->initially_committed);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -369,24 +372,40 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
|||
bool os_is_zero = false;
|
||||
void* os_base = NULL;
|
||||
void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
|
||||
if (p != NULL) {
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
*memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large);
|
||||
memid->mem.os.base = os_base;
|
||||
memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned?
|
||||
|
||||
mi_assert_internal(memid->mem.os.size >= size);
|
||||
mi_assert_internal(_mi_is_aligned(p,alignment));
|
||||
if (commit) { mi_assert_internal(memid->initially_committed); }
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
mi_decl_nodiscard static void* mi_os_ensure_zero(void* p, size_t size, mi_memid_t* memid) {
|
||||
if (p==NULL || size==0) return p;
|
||||
// ensure committed
|
||||
if (!memid->initially_committed) {
|
||||
bool is_zero = false;
|
||||
if (!_mi_os_commit(p, size, &is_zero)) {
|
||||
_mi_os_free(p, size, *memid);
|
||||
return NULL;
|
||||
}
|
||||
memid->initially_committed = true;
|
||||
}
|
||||
// ensure zero'd
|
||||
if (memid->initially_zero) return p;
|
||||
_mi_memzero_aligned(p,size);
|
||||
memid->initially_zero = true;
|
||||
return p;
|
||||
}
|
||||
|
||||
void* _mi_os_zalloc(size_t size, mi_memid_t* memid) {
|
||||
void* p = _mi_os_alloc(size,memid);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// zero the OS memory if needed
|
||||
if (!memid->initially_zero) {
|
||||
_mi_memzero_aligned(p, size);
|
||||
memid->initially_zero = true;
|
||||
}
|
||||
return p;
|
||||
return mi_os_ensure_zero(p, size, memid);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
|
|
@ -297,14 +297,11 @@ static mi_page_t** mi_page_map_ensure_submap_at(size_t idx) {
|
|||
mi_memid_t memid;
|
||||
mi_page_t** expect = sub;
|
||||
const size_t submap_size = MI_PAGE_MAP_SUB_SIZE;
|
||||
sub = (mi_page_t**)_mi_os_alloc(submap_size, &memid);
|
||||
sub = (mi_page_t**)_mi_os_zalloc(submap_size, &memid);
|
||||
if (sub == NULL) {
|
||||
_mi_error_message(EFAULT, "internal error: unable to extend the page map\n");
|
||||
return NULL;
|
||||
}
|
||||
if (!memid.initially_zero) {
|
||||
_mi_memzero_aligned(sub, submap_size);
|
||||
}
|
||||
if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) {
|
||||
// another thread already allocated it.. free and continue
|
||||
_mi_os_free(sub, submap_size, memid);
|
||||
|
|
|
@ -435,6 +435,7 @@ int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
|
|||
}
|
||||
|
||||
int _mi_prim_reuse(void* start, size_t size) {
|
||||
MI_UNUSED(start); MI_UNUSED(size);
|
||||
#if defined(__APPLE__) && defined(MADV_FREE_REUSE)
|
||||
return unix_madvise(start, size, MADV_FREE_REUSE);
|
||||
#endif
|
||||
|
@ -443,18 +444,17 @@ int _mi_prim_reuse(void* start, size_t size) {
|
|||
|
||||
int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
||||
int err = 0;
|
||||
#if !MI_DEBUG && MI_SECURE<=2
|
||||
*needs_recommit = false;
|
||||
#if defined(__APPLE__) && defined(MADV_FREE_REUSABLE)
|
||||
// decommit on macOS: use MADV_FREE_REUSABLE as it does immediate rss accounting (issue #1097)
|
||||
err = unix_madvise(start, size, MADV_FREE_REUSABLE);
|
||||
if (err) { err = unix_madvise(start, size, MADV_DONTNEED); }
|
||||
#else
|
||||
// decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
|
||||
err = unix_madvise(start, size, MADV_DONTNEED);
|
||||
#endif
|
||||
#if !MI_DEBUG && MI_SECURE<=2
|
||||
*needs_recommit = false;
|
||||
#else
|
||||
// note: don't use MADV_FREE_REUSABLE as the range may contain protected areas
|
||||
err = unix_madvise(start, size, MADV_DONTNEED);
|
||||
*needs_recommit = true;
|
||||
mprotect(start, size, PROT_NONE);
|
||||
#endif
|
||||
|
@ -470,8 +470,9 @@ int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
|
|||
|
||||
int _mi_prim_reset(void* start, size_t size) {
|
||||
int err = 0;
|
||||
#if defined(__APPLE__) && defined(MADV_FREE_REUSABLE)
|
||||
// on macOS we try to use MADV_FREE_REUSABLE as it seems the fastest
|
||||
|
||||
// on macOS can use MADV_FREE_REUSABLE (but we disable this for now as it seems slower)
|
||||
#if 0 && defined(__APPLE__) && defined(MADV_FREE_REUSABLE)
|
||||
err = unix_madvise(start, size, MADV_FREE_REUSABLE);
|
||||
if (err==0) return 0;
|
||||
// fall through
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue