mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 22:49:32 +03:00
clarify is_zero meaing
This commit is contained in:
parent
4d976270eb
commit
09e42aea4e
4 changed files with 9 additions and 12 deletions
|
@ -291,7 +291,7 @@ typedef struct mi_page_s {
|
|||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire:7; // expiration count for retired blocks
|
||||
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
|
|
|
@ -222,7 +222,7 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
|
|||
if (newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
const mi_page_t* page = _mi_ptr_page(newp);
|
||||
if (page->is_zero) {
|
||||
if (page->free_is_zero) {
|
||||
// already zero initialized
|
||||
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
// zero the block? note: we need to zero the full block size (issue #63)
|
||||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
if (page->is_zero) {
|
||||
if (page->free_is_zero) {
|
||||
block->next = 0;
|
||||
}
|
||||
else {
|
||||
|
|
15
src/page.c
15
src/page.c
|
@ -84,7 +84,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
|||
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
|
||||
|
||||
#if MI_DEBUG>3 // generally too expensive to check this
|
||||
if (page->is_zero) {
|
||||
if (page->free_is_zero) {
|
||||
const size_t ubsize = mi_page_usable_block_size(page);
|
||||
for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
|
||||
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
|
||||
|
@ -220,7 +220,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|||
// usual case
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
page->is_zero = false;
|
||||
page->free_is_zero = false;
|
||||
}
|
||||
else if (force) {
|
||||
// append -- only on shutdown (force) as this is a linear operation
|
||||
|
@ -232,7 +232,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|||
mi_block_set_next(page, tail, page->free);
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
page->is_zero = false;
|
||||
page->free_is_zero = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -662,12 +662,9 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
page->keys[0] = _mi_heap_random_next(heap);
|
||||
page->keys[1] = _mi_heap_random_next(heap);
|
||||
#endif
|
||||
page->is_zero = page->is_zero_init;
|
||||
#if MI_DEBUG>1
|
||||
if (page->is_zero_init) {
|
||||
mi_mem_is_zero(page_start, page_size);
|
||||
}
|
||||
#endif
|
||||
page->free_is_zero = page->is_zero_init;
|
||||
mi_assert_expensive(!page->is_zero_init || mi_mem_is_zero(page_start, page_size));
|
||||
|
||||
mi_assert_internal(page->capacity == 0);
|
||||
mi_assert_internal(page->free == NULL);
|
||||
mi_assert_internal(page->used == 0);
|
||||
|
|
Loading…
Add table
Reference in a new issue