mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
fix page info size and order; atomic page flags
This commit is contained in:
parent
5a5943ad33
commit
659a9dd51d
10 changed files with 87 additions and 89 deletions
36
src/arena.c
36
src/arena.c
|
@ -483,7 +483,7 @@ static bool mi_arena_try_claim_abandoned(size_t slice_index, void* arg1, void* a
|
|||
mi_page_t* const page = (mi_page_t*)mi_arena_slice_start(arena, slice_index);
|
||||
// can we claim ownership?
|
||||
if (!mi_page_try_claim_ownership(page)) {
|
||||
// there was a concurrent free ..
|
||||
// there was a concurrent free ..
|
||||
// we need to keep it in the abandoned map as the free will call `mi_arena_page_unabandon`,
|
||||
// and wait for readers (us!) to finish. This is why it is very important to set the abandoned
|
||||
// bit again (or otherwise the unabandon will never stop waiting).
|
||||
|
@ -596,7 +596,9 @@ static mi_page_t* mi_arena_page_alloc_fresh(size_t slice_count, size_t block_siz
|
|||
}
|
||||
}
|
||||
#endif
|
||||
mi_assert(MI_PAGE_INFO_SIZE >= _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN));
|
||||
if (MI_PAGE_INFO_SIZE < _mi_align_up(sizeof(*page), MI_PAGE_MIN_BLOCK_ALIGN)) {
|
||||
_mi_error_message(EFAULT, "fatal internal error: MI_PAGE_INFO_SIZE is too small\n");
|
||||
};
|
||||
const size_t block_start = (os_align ? MI_PAGE_ALIGN : MI_PAGE_INFO_SIZE);
|
||||
const size_t reserved = (os_align ? 1 : (mi_size_of_slices(slice_count) - block_start) / block_size);
|
||||
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
|
||||
|
@ -1126,28 +1128,22 @@ static size_t mi_debug_show_bfield(mi_bfield_t field, char* buf) {
|
|||
return bit_set_count;
|
||||
}
|
||||
|
||||
static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t slice_count, mi_bitmap_t* bitmap, bool invert) {
|
||||
_mi_output_message("%s%s:\n", prefix, header);
|
||||
static size_t mi_debug_show_bitmap(const char* header, size_t slice_count, mi_bitmap_t* bitmap, bool invert) {
|
||||
_mi_output_message("%s:\n", header);
|
||||
size_t bit_count = 0;
|
||||
size_t bit_set_count = 0;
|
||||
for (size_t i = 0; i < mi_bitmap_chunk_count(bitmap) && bit_count < slice_count; i++) {
|
||||
char buf[MI_BCHUNK_BITS + 64]; _mi_memzero(buf, sizeof(buf));
|
||||
size_t k = 0;
|
||||
mi_bchunk_t* chunk = &bitmap->chunks[i];
|
||||
|
||||
if (i<10) { buf[k++] = ' '; }
|
||||
if (i<100) { itoa((int)i, buf+k, 10); k += (i < 10 ? 1 : 2); }
|
||||
buf[k++] = ' ';
|
||||
|
||||
if (i<10) { buf[k++] = ('0' + (char)i); buf[k++] = ' '; buf[k++] = ' '; }
|
||||
else if (i<100) { buf[k++] = ('0' + (char)(i/10)); buf[k++] = ('0' + (char)(i%10)); buf[k++] = ' '; }
|
||||
else if (i<1000) { buf[k++] = ('0' + (char)(i/100)); buf[k++] = ('0' + (char)((i%100)/10)); buf[k++] = ('0' + (char)(i%10)); }
|
||||
|
||||
for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) {
|
||||
if (j > 0 && (j % 4) == 0) {
|
||||
buf[k++] = '\n';
|
||||
_mi_memcpy(buf+k, prefix, strlen(prefix)); k += strlen(prefix);
|
||||
buf[k++] = ' ';
|
||||
buf[k++] = ' ';
|
||||
buf[k++] = ' ';
|
||||
buf[k++] = ' ';
|
||||
buf[k++] = ' ';
|
||||
buf[k++] = '\n'; _mi_memset(buf+k,' ',5); k += 5;
|
||||
}
|
||||
if (bit_count < slice_count) {
|
||||
mi_bfield_t bfield = chunk->bfields[j];
|
||||
|
@ -1164,9 +1160,9 @@ static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_
|
|||
}
|
||||
bit_count += MI_BFIELD_BITS;
|
||||
}
|
||||
_mi_output_message("%s %s\n", prefix, buf);
|
||||
_mi_output_message(" %s\n", buf);
|
||||
}
|
||||
_mi_output_message("%s total ('x'): %zu\n", prefix, bit_set_count);
|
||||
_mi_output_message(" total ('x'): %zu\n", bit_set_count);
|
||||
return bit_set_count;
|
||||
}
|
||||
|
||||
|
@ -1183,12 +1179,12 @@ void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge)
|
|||
slice_total += arena->slice_count;
|
||||
_mi_output_message("arena %zu: %zu slices (%zu MiB)%s\n", i, arena->slice_count, mi_size_of_slices(arena->slice_count)/MI_MiB, (arena->memid.is_pinned ? ", pinned" : ""));
|
||||
if (show_inuse) {
|
||||
free_total += mi_debug_show_bitmap(" ", "in-use slices", arena->slice_count, arena->slices_free, true);
|
||||
free_total += mi_debug_show_bitmap("in-use slices", arena->slice_count, arena->slices_free, true);
|
||||
}
|
||||
mi_debug_show_bitmap(" ", "committed slices", arena->slice_count, arena->slices_committed, false);
|
||||
mi_debug_show_bitmap("committed slices", arena->slice_count, arena->slices_committed, false);
|
||||
// todo: abandoned slices
|
||||
if (show_purge) {
|
||||
purge_total += mi_debug_show_bitmap(" ", "purgeable slices", arena->slice_count, arena->slices_purge, false);
|
||||
purge_total += mi_debug_show_bitmap("purgeable slices", arena->slice_count, arena->slices_purge, false);
|
||||
}
|
||||
}
|
||||
if (show_inuse) _mi_output_message("total inuse slices : %zu\n", slice_total - free_total);
|
||||
|
|
|
@ -805,10 +805,10 @@ static bool mi_bitmap_chunkmap_try_clear(mi_bitmap_t* bitmap, size_t chunk_idx)
|
|||
return false;
|
||||
}
|
||||
// record the max clear
|
||||
size_t oldmax = mi_atomic_load_relaxed(&bitmap->chunk_max_clear);
|
||||
/*size_t oldmax = mi_atomic_load_relaxed(&bitmap->chunk_max_clear);
|
||||
do {
|
||||
if mi_likely(chunk_idx <= oldmax) break;
|
||||
} while (!mi_atomic_cas_weak_acq_rel(&bitmap->chunk_max_clear, &oldmax, chunk_idx));
|
||||
} while (!mi_atomic_cas_weak_acq_rel(&bitmap->chunk_max_clear, &oldmax, chunk_idx));*/
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1046,7 +1046,7 @@ bool mi_bitmap_is_xsetN(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx, size_t n
|
|||
{ \
|
||||
/* start chunk index -- todo: can depend on the tseq to decrease contention between threads */ \
|
||||
MI_UNUSED(tseq); \
|
||||
const size_t chunk_max = mi_atomic_load_acquire(&bitmap->chunk_max_clear); /* mi_bitmap_chunk_count(bitmap) */ \
|
||||
/* const size_t chunk_max = mi_atomic_load_acquire(&bitmap->chunk_max_clear); */ /* mi_bitmap_chunk_count(bitmap) */ \
|
||||
const size_t chunk_start = 0; /* (chunk_max <= 1 ? 0 : (tseq % chunk_max)); */ /* space out threads */ \
|
||||
const size_t chunkmap_max_bfield = _mi_divide_up( mi_bitmap_chunk_count(bitmap), MI_BCHUNK_BITS ); \
|
||||
const size_t chunkmap_start = chunk_start / MI_BFIELD_BITS; \
|
||||
|
|
|
@ -163,8 +163,9 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
if mi_unlikely(page==NULL) return;
|
||||
|
||||
const bool is_local = (_mi_prim_thread_id() == mi_page_thread_id(page));
|
||||
const mi_page_flags_t flags = mi_page_flags(page);
|
||||
if mi_likely(is_local) { // thread-local free?
|
||||
if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
|
||||
if mi_likely(flags == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
|
||||
// thread-local, aligned, and not a full page
|
||||
mi_block_t* const block = (mi_block_t*)p;
|
||||
mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */);
|
||||
|
@ -176,7 +177,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
}
|
||||
else {
|
||||
// free-ing in a page owned by a heap in another thread, or on abandoned page (not belonging to a heap)
|
||||
if mi_likely(page->flags.full_aligned == 0) {
|
||||
if mi_likely(flags == 0) {
|
||||
// blocks are aligned (and not a full page)
|
||||
mi_block_t* const block = (mi_block_t*)p;
|
||||
mi_free_block_mt(page,block);
|
||||
|
|
10
src/init.c
10
src/init.c
|
@ -20,21 +20,21 @@ const mi_page_t _mi_page_empty = {
|
|||
0, // capacity
|
||||
0, // reserved capacity
|
||||
0, // block size shift
|
||||
0, // heap tag
|
||||
{ 0 }, // flags
|
||||
false, // is_zero
|
||||
0, // retire_expire
|
||||
NULL, // local_free
|
||||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||
MI_ATOMIC_VAR_INIT(0), // xflags
|
||||
0, // block_size
|
||||
NULL, // page_start
|
||||
0, // heap tag
|
||||
false, // is_zero
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
{ 0, 0 },
|
||||
#endif
|
||||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||
NULL, // xheap
|
||||
NULL, NULL, // next, prev
|
||||
NULL, // subproc
|
||||
{ {{ NULL, 0, 0}}, false, false, false, MI_MEM_NONE } // memid
|
||||
{ {{ NULL, 0}}, false, false, false, MI_MEM_NONE } // memid
|
||||
};
|
||||
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
|
|
4
src/os.c
4
src/os.c
|
@ -128,7 +128,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
|||
// different base? (due to alignment)
|
||||
if (memid.mem.os.base != base) {
|
||||
mi_assert(memid.mem.os.base <= addr);
|
||||
mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr);
|
||||
// mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr);
|
||||
base = memid.mem.os.base;
|
||||
if (memid.mem.os.size==0) { csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base); }
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
|||
if (p != NULL) {
|
||||
*memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large);
|
||||
memid->mem.os.base = os_base;
|
||||
memid->mem.os.alignment = alignment;
|
||||
// memid->mem.os.alignment = alignment;
|
||||
memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned
|
||||
}
|
||||
return p;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue