diff --git a/src/arena-meta.c b/src/arena-meta.c index ff50ea60..530e42cb 100644 --- a/src/arena-meta.c +++ b/src/arena-meta.c @@ -25,9 +25,9 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_META_PAGE_SIZE MI_ARENA_SLICE_SIZE #define MI_META_PAGE_ALIGN MI_ARENA_SLICE_ALIGN -#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE > 4k (even on 32-bit) +#define MI_META_BLOCK_SIZE (128) // large enough such that META_MAX_SIZE >= 4k (even on 32-bit) #define MI_META_BLOCK_ALIGN MI_META_BLOCK_SIZE -#define MI_META_BLOCKS_PER_PAGE (MI_ARENA_SLICE_SIZE / MI_META_BLOCK_SIZE) // 1024 +#define MI_META_BLOCKS_PER_PAGE (MI_META_PAGE_SIZE / MI_META_BLOCK_SIZE) // 512 #define MI_META_MAX_SIZE (MI_BCHUNK_SIZE * MI_META_BLOCK_SIZE) typedef struct mi_meta_page_s { @@ -150,7 +150,7 @@ mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) { const size_t block_idx = memid.mem.meta.block_index; mi_meta_page_t* mpage = (mi_meta_page_t*)memid.mem.meta.meta_page; mi_assert_internal(mi_meta_page_of_ptr(p,NULL) == mpage); - mi_assert_internal(block_idx + block_count < MI_META_BLOCKS_PER_PAGE); + mi_assert_internal(block_idx + block_count <= MI_META_BLOCKS_PER_PAGE); mi_assert_internal(mi_bbitmap_is_clearN(&mpage->blocks_free, block_idx, block_count)); // we zero on free (and on the initial page allocation) so we don't need a "dirty" map _mi_memzero_aligned(mi_meta_block_start(mpage, block_idx), block_count*MI_META_BLOCK_SIZE);