mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-01 09:14:38 +03:00
add _mi_os_zalloc
This commit is contained in:
parent
d7431402c5
commit
2f0540c4f9
6 changed files with 51 additions and 34 deletions
|
@ -154,6 +154,7 @@ void _mi_heap_guarded_init(mi_heap_t* heap);
|
||||||
// os.c
|
// os.c
|
||||||
void _mi_os_init(void); // called from process init
|
void _mi_os_init(void); // called from process init
|
||||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid);
|
void* _mi_os_alloc(size_t size, mi_memid_t* memid);
|
||||||
|
void* _mi_os_zalloc(size_t size, mi_memid_t* memid);
|
||||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid);
|
void _mi_os_free(void* p, size_t size, mi_memid_t memid);
|
||||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
|
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
|
||||||
// pre: needs_recommit != NULL
|
// pre: needs_recommit != NULL
|
||||||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
||||||
|
|
||||||
// Reset memory. The range keeps being accessible but the content might be reset.
|
// Reset memory. The range keeps being accessible but the content might be reset to zero at any moment.
|
||||||
// Returns error code or 0 on success.
|
// Returns error code or 0 on success.
|
||||||
int _mi_prim_reset(void* addr, size_t size);
|
int _mi_prim_reset(void* addr, size_t size);
|
||||||
|
|
||||||
|
|
|
@ -188,14 +188,9 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
|
|
||||||
// or fall back to the OS
|
// or fall back to the OS
|
||||||
p = _mi_os_alloc(size, memid);
|
p = _mi_os_zalloc(size, memid);
|
||||||
if (p == NULL) return NULL;
|
if (p == NULL) return NULL;
|
||||||
|
|
||||||
// zero the OS memory if needed
|
|
||||||
if (!memid->initially_zero) {
|
|
||||||
_mi_memzero_aligned(p, size);
|
|
||||||
memid->initially_zero = true;
|
|
||||||
}
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
28
src/init.c
28
src/init.c
|
@ -298,7 +298,6 @@ static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
|
||||||
|
|
||||||
static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
||||||
// try to find thread metadata in the cache
|
// try to find thread metadata in the cache
|
||||||
bool is_zero = false;
|
|
||||||
mi_thread_data_t* td = NULL;
|
mi_thread_data_t* td = NULL;
|
||||||
for (int i = 0; i < TD_CACHE_SIZE; i++) {
|
for (int i = 0; i < TD_CACHE_SIZE; i++) {
|
||||||
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
|
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
|
||||||
|
@ -306,32 +305,25 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
||||||
// found cached allocation, try use it
|
// found cached allocation, try use it
|
||||||
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
|
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
|
||||||
if (td != NULL) {
|
if (td != NULL) {
|
||||||
break;
|
_mi_memzero(td, offsetof(mi_thread_data_t,memid));
|
||||||
|
return td;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if that fails, allocate as meta data
|
// if that fails, allocate as meta data
|
||||||
|
mi_memid_t memid;
|
||||||
|
td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid);
|
||||||
if (td == NULL) {
|
if (td == NULL) {
|
||||||
mi_memid_t memid;
|
// if this fails, try once more. (issue #257)
|
||||||
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
|
td = (mi_thread_data_t*)_mi_os_zalloc(sizeof(mi_thread_data_t), &memid);
|
||||||
if (td == NULL) {
|
if (td == NULL) {
|
||||||
// if this fails, try once more. (issue #257)
|
// really out of memory
|
||||||
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
|
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
|
||||||
if (td == NULL) {
|
return NULL;
|
||||||
// really out of memory
|
|
||||||
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (td != NULL) {
|
|
||||||
td->memid = memid;
|
|
||||||
is_zero = memid.initially_zero;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
td->memid = memid;
|
||||||
if (td != NULL && !is_zero) {
|
|
||||||
_mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid));
|
|
||||||
}
|
|
||||||
return td;
|
return td;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
45
src/os.c
45
src/os.c
|
@ -182,6 +182,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
||||||
if (mi_memkind_is_os(memid.memkind)) {
|
if (mi_memkind_is_os(memid.memkind)) {
|
||||||
size_t csize = memid.mem.os.size;
|
size_t csize = memid.mem.os.size;
|
||||||
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
|
if (csize==0) { csize = _mi_os_good_alloc_size(size); }
|
||||||
|
mi_assert_internal(csize >= size);
|
||||||
size_t commit_size = (still_committed ? csize : 0);
|
size_t commit_size = (still_committed ? csize : 0);
|
||||||
void* base = addr;
|
void* base = addr;
|
||||||
// different base? (due to alignment)
|
// different base? (due to alignment)
|
||||||
|
@ -341,9 +342,11 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
|
||||||
bool os_is_large = false;
|
bool os_is_large = false;
|
||||||
bool os_is_zero = false;
|
bool os_is_zero = false;
|
||||||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
|
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
|
||||||
if (p != NULL) {
|
if (p == NULL) return NULL;
|
||||||
*memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large);
|
|
||||||
}
|
*memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large);
|
||||||
|
mi_assert_internal(memid->mem.os.size >= size);
|
||||||
|
mi_assert_internal(memid->initially_committed);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,14 +362,40 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
||||||
bool os_is_zero = false;
|
bool os_is_zero = false;
|
||||||
void* os_base = NULL;
|
void* os_base = NULL;
|
||||||
void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
|
void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
|
||||||
if (p != NULL) {
|
if (p == NULL) return NULL;
|
||||||
*memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large);
|
|
||||||
memid->mem.os.base = os_base;
|
*memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large);
|
||||||
memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned?
|
memid->mem.os.base = os_base;
|
||||||
}
|
memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned?
|
||||||
|
|
||||||
|
mi_assert_internal(memid->mem.os.size >= size);
|
||||||
|
mi_assert_internal(_mi_is_aligned(p,alignment));
|
||||||
|
mi_assert_internal(!commit || memid->initially_committed);
|
||||||
|
mi_assert_internal(!memid->initially_zero || memid->initially_committed);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
mi_decl_nodiscard static void* mi_os_ensure_zero(void* p, size_t size, mi_memid_t* memid) {
|
||||||
|
if (p==NULL || size==0 || memid->initially_zero) return p;
|
||||||
|
if (!memid->initially_committed) {
|
||||||
|
bool is_zero = false;
|
||||||
|
if (!_mi_os_commit(p, size, &is_zero)) {
|
||||||
|
_mi_os_free(p, size, *memid);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
memid->initially_committed = true;
|
||||||
|
}
|
||||||
|
_mi_memzero_aligned(p,size);
|
||||||
|
memid->initially_zero = true;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* _mi_os_zalloc(size_t size, mi_memid_t* memid) {
|
||||||
|
void* p = _mi_os_alloc(size,memid);
|
||||||
|
return mi_os_ensure_zero(p, size, memid);
|
||||||
|
}
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
OS aligned allocation with an offset. This is used
|
OS aligned allocation with an offset. This is used
|
||||||
for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc
|
for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc
|
||||||
|
|
|
@ -61,7 +61,7 @@ static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bo
|
||||||
if mi_unlikely(part == NULL) {
|
if mi_unlikely(part == NULL) {
|
||||||
if (!create_on_demand) return NULL;
|
if (!create_on_demand) return NULL;
|
||||||
mi_memid_t memid;
|
mi_memid_t memid;
|
||||||
part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid);
|
part = (mi_segmap_part_t*)_mi_os_zalloc(sizeof(mi_segmap_part_t), &memid);
|
||||||
if (part == NULL) return NULL;
|
if (part == NULL) return NULL;
|
||||||
part->memid = memid;
|
part->memid = memid;
|
||||||
mi_segmap_part_t* expected = NULL;
|
mi_segmap_part_t* expected = NULL;
|
||||||
|
|
Loading…
Add table
Reference in a new issue