fix compilation warnings for new uint16_t size for used field

This commit is contained in:
Daan Leijen 2024-03-24 08:10:35 -07:00
parent 9085596eab
commit 60c4a0fe56
5 changed files with 68 additions and 53 deletions

View file

@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_noinline __declspec(noinline) #define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread) #define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) #define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#define mi_decl_weak #define mi_decl_weak
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc #elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline)) #define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread #define mi_decl_thread __thread
@ -40,7 +40,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_noinline #define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-) #define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align #define mi_decl_cache_align
#define mi_decl_weak #define mi_decl_weak
#endif #endif
#if defined(__EMSCRIPTEN__) && !defined(__wasi__) #if defined(__EMSCRIPTEN__) && !defined(__wasi__)
@ -91,7 +91,7 @@ void _mi_thread_data_collect(void);
// os.c // os.c
void _mi_os_init(void); // called from process init void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
@ -132,8 +132,8 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
size_t _mi_arena_segment_abandoned_count(void); size_t _mi_arena_segment_abandoned_count(void);
typedef struct mi_arena_field_cursor_s { // abstract typedef struct mi_arena_field_cursor_s { // abstract
mi_arena_id_t start; mi_arena_id_t start;
int count; int count;
size_t bitmap_idx; size_t bitmap_idx;
} mi_arena_field_cursor_t; } mi_arena_field_cursor_t;
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current); void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current);

View file

@ -300,14 +300,14 @@ typedef struct mi_page_s {
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift == block_size)` (used for fast path in `free.c:_mi_page_ptr_unalign`) uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift == block_size)` (used for fast path in `free.c:_mi_page_ptr_unalign`)
uint8_t block_offset_adj; // if not zero, then `(page_start - (uint8_t*)page - 8*(block_offset_adj-1)) % block_size == 0)` (used for fast path in `free.c:_mi_page_ptr_unalign`) uint8_t block_offset_adj; // if not zero, then `(page_start - (uint8_t*)page - 8*(block_offset_adj-1)) % block_size == 0)` (used for fast path in `free.c:_mi_page_ptr_unalign`)
uint32_t xblock_size; // size available in each block (always `>0`) uint32_t xblock_size; // size available in each block (always `>0`)
#if (MI_ENCODE_FREELIST || MI_PADDING) #if (MI_ENCODE_FREELIST || MI_PADDING)
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
#endif #endif
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap; _Atomic(uintptr_t) xheap;
struct mi_page_s* next; // next page owned by the heap with the same `block_size` struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size` struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
} mi_page_t; } mi_page_t;
@ -373,7 +373,7 @@ typedef struct mi_segment_s {
bool allow_decommit; bool allow_decommit;
bool allow_purge; bool allow_purge;
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE` size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
// segment fields // segment fields
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init` struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
struct mi_segment_s* prev; struct mi_segment_s* prev;
@ -450,7 +450,7 @@ struct mi_heap_s {
mi_tld_t* tld; mi_tld_t* tld;
_Atomic(mi_block_t*) thread_delayed_free; _Atomic(mi_block_t*) thread_delayed_free;
mi_threadid_t thread_id; // thread this heap belongs too mi_threadid_t thread_id; // thread this heap belongs too
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation mi_random_ctx_t random; // random number context used for secure allocation
@ -460,7 +460,7 @@ struct mi_heap_s {
mi_heap_t* next; // list of heaps per thread mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
}; };

View file

@ -6,6 +6,11 @@ terms of the MIT license. A copy of the license can be found in the file
-----------------------------------------------------------------------------*/ -----------------------------------------------------------------------------*/
#if !defined(MI_IN_ALLOC_C) #if !defined(MI_IN_ALLOC_C)
#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)" #error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)"
// add includes help an IDE
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
#include "mimalloc/prim.h" // _mi_prim_thread_id()
#endif #endif
// forward declarations // forward declarations
@ -26,7 +31,7 @@ static mi_decl_noinline void mi_free_block_mt(mi_segment_t* segment, mi_page_t*
// fast path written carefully to prevent spilling on the stack // fast path written carefully to prevent spilling on the stack
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool check_full) static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool check_full)
{ {
// owning thread can free a block directly // checks
if mi_unlikely(mi_check_is_double_free(page, block)) return; if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block); mi_check_padding(page, block);
mi_stat_free(page, block); mi_stat_free(page, block);
@ -34,47 +39,57 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif #endif
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
// actual free: push on the local free list
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
const uint32_t used = page->used - 1; if mi_unlikely(--page->used == 0) {
page->used = used;
if mi_unlikely(used == 0) { // generates better code than: --page->used == 0
_mi_page_retire(page); _mi_page_retire(page);
} }
else if mi_unlikely(check_full && mi_page_is_in_full(page)) { else if mi_unlikely(check_full && mi_page_is_in_full(page)) {
_mi_page_unfull(page); _mi_page_unfull(page);
} }
} }
// Adjust a block that was allocated aligned, to the actual start of the block in the page. // Adjust a block that was allocated aligned, to the actual start of the block in the page.
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
mi_assert_internal(page!=NULL && p!=NULL); mi_assert_internal(page!=NULL && p!=NULL);
const size_t diff = (mi_likely(page->block_offset_adj != 0)
? (uint8_t*)p - (uint8_t*)page - 8*(page->block_offset_adj-1) size_t diff;
: (uint8_t*)p - _mi_page_start(segment, page, NULL)); if mi_likely(page->block_offset_adj != 0) {
diff = (uint8_t*)p - (uint8_t*)page - 8 * (page->block_offset_adj - 1);
const size_t adjust = (mi_likely(page->block_size_shift != 0) }
? diff & (((size_t)1 << page->block_size_shift) - 1) else {
: diff % mi_page_block_size(page)); diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
}
size_t adjust;
if mi_likely(page->block_size_shift != 0) {
adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
}
else {
adjust = diff % mi_page_block_size(page);
}
return (mi_block_t*)((uintptr_t)p - adjust); return (mi_block_t*)((uintptr_t)p - adjust);
} }
// free a local pointer // free a local pointer (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_local(mi_segment_t* segment, mi_page_t* page, void* p) mi_attr_noexcept { static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_free_block_local(page, block, true); mi_free_block_local(page, block, true);
} }
// free a pointer owned by another thread // free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_segment_t* segment, mi_page_t* page, void* p) mi_attr_noexcept { static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = _mi_page_ptr_unalign(segment, page, p); // don't check `has_aligned` flag to avoid a race (issue #865) mi_block_t* const block = _mi_page_ptr_unalign(segment, page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
mi_free_block_mt(segment, page, block); mi_free_block_mt(segment, page, block);
} }
// generic free (for runtime integration) // generic free (for runtime integration)
void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
if (is_local) mi_free_generic_local(segment,page,p); if (is_local) mi_free_generic_local(page,segment,p);
else mi_free_generic_mt(segment,page,p); else mi_free_generic_mt(page,segment,p);
} }
// Get the segment data belonging to a pointer // Get the segment data belonging to a pointer
@ -127,16 +142,16 @@ void mi_free(void* p) mi_attr_noexcept
if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned) if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
// thread-local, aligned, and not a full page // thread-local, aligned, and not a full page
mi_block_t* const block = (mi_block_t*)p; mi_block_t* const block = (mi_block_t*)p;
mi_free_block_local(page,block,false /* no need to check if the page is full */); mi_free_block_local(page, block, false /* no need to check if the page is full */);
} }
else { else {
// page is full or contains (inner) aligned blocks; use generic path // page is full or contains (inner) aligned blocks; use generic path
mi_free_generic_local(segment, page, p); mi_free_generic_local(page, segment, p);
} }
} }
else { else {
// not thread-local; use generic path // not thread-local; use generic path
mi_free_generic_mt(segment, page, p); mi_free_generic_mt(page, segment, p);
} }
} }
@ -174,7 +189,7 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// the owning thread in `_mi_free_delayed_block`. // the owning thread in `_mi_free_delayed_block`.
static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block ) static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block )
{ {
// Try to put the block on either the page-local thread free list, // Try to put the block on either the page-local thread free list,
// or the heap delayed free list (if this is the first non-local free in that page) // or the heap delayed free list (if this is the first non-local free in that page)
mi_thread_free_t tfreex; mi_thread_free_t tfreex;
bool use_delayed; bool use_delayed;
@ -217,17 +232,17 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
static void mi_stat_huge_free(const mi_page_t* page); static void mi_stat_huge_free(const mi_page_t* page);
#endif #endif
// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) // Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block)
{ {
// first see if the segment was abandoned and if we can reclaim it into our thread // first see if the segment was abandoned and if we can reclaim it into our thread
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) && if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
segment->page_kind != MI_PAGE_HUGE && segment->page_kind != MI_PAGE_HUGE &&
#endif #endif
mi_atomic_load_relaxed(&segment->thread_id) == 0) mi_atomic_load_relaxed(&segment->thread_id) == 0)
{ {
// the segment is abandoned, try to reclaim it into our heap // the segment is abandoned, try to reclaim it into our heap
if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) { if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {
@ -240,13 +255,13 @@ static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t*
// The padding check may access the non-thread-owned page for the key values. // The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet). // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block); mi_check_padding(page, block);
// adjust stats (after padding check and potential recursive `mi_free` above) // adjust stats (after padding check and potential recursive `mi_free` above)
mi_stat_free(page, block); // stat_free may access the padding mi_stat_free(page, block); // stat_free may access the padding
mi_track_free_size(block, mi_page_usable_size_of(page,block)); mi_track_free_size(block, mi_page_usable_size_of(page,block));
// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
_mi_padding_shrink(page, block, sizeof(mi_block_t)); _mi_padding_shrink(page, block, sizeof(mi_block_t));
if (segment->page_kind == MI_PAGE_HUGE) { if (segment->page_kind == MI_PAGE_HUGE) {
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
@ -266,7 +281,7 @@ static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t*
memset(block, MI_DEBUG_FREED, mi_usable_size(block)); memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif #endif
} }
// and finally free the actual block by pushing it on the owning heap // and finally free the actual block by pushing it on the owning heap
// thread_delayed free list (or heap delayed free list) // thread_delayed free list (or heap delayed free list)
mi_free_block_delayed_mt(page,block); mi_free_block_delayed_mt(page,block);

View file

@ -224,7 +224,7 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
is_zero = memid.initially_zero; is_zero = memid.initially_zero;
} }
} }
if (td != NULL && !is_zero) { if (td != NULL && !is_zero) {
_mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid)); _mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid));
} }
@ -399,23 +399,23 @@ void mi_thread_done(void) mi_attr_noexcept {
_mi_thread_done(NULL); _mi_thread_done(NULL);
} }
void _mi_thread_done(mi_heap_t* heap) void _mi_thread_done(mi_heap_t* heap)
{ {
// calling with NULL implies using the default heap // calling with NULL implies using the default heap
if (heap == NULL) { if (heap == NULL) {
heap = mi_prim_get_default_heap(); heap = mi_prim_get_default_heap();
if (heap == NULL) return; if (heap == NULL) return;
} }
// prevent re-entrancy through heap_done/heap_set_default_direct (issue #699) // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
if (!mi_heap_is_initialized(heap)) { if (!mi_heap_is_initialized(heap)) {
return; return;
} }
// adjust stats // adjust stats
mi_atomic_decrement_relaxed(&thread_count); mi_atomic_decrement_relaxed(&thread_count);
_mi_stat_decrease(&_mi_stats_main.threads, 1); _mi_stat_decrease(&_mi_stats_main.threads, 1);
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps... // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
if (heap->thread_id != _mi_thread_id()) return; if (heap->thread_id != _mi_thread_id()) return;
@ -437,7 +437,7 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
// ensure the default heap is passed to `_mi_thread_done` // ensure the default heap is passed to `_mi_thread_done`
// setting to a non-NULL value also ensures `mi_thread_done` is called. // setting to a non-NULL value also ensures `mi_thread_done` is called.
_mi_prim_thread_associate_default_heap(heap); _mi_prim_thread_associate_default_heap(heap);
} }
@ -597,7 +597,7 @@ static void mi_cdecl mi_process_done(void) {
// release any thread specific resources and ensure _mi_thread_done is called on all but the main thread // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
_mi_prim_thread_done_auto_done(); _mi_prim_thread_done_auto_done();
#ifndef MI_SKIP_COLLECT_ON_EXIT #ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG || !defined(MI_SHARED_LIB)) #if (MI_DEBUG || !defined(MI_SHARED_LIB))
// free all memory if possible on process exit. This is not needed for a stand-alone process // free all memory if possible on process exit. This is not needed for a stand-alone process

View file

@ -192,8 +192,8 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
if (head == NULL) return; if (head == NULL) return;
// find the tail -- also to get a proper count (without data races) // find the tail -- also to get a proper count (without data races)
uint32_t max_count = page->capacity; // cannot collect more than capacity size_t max_count = page->capacity; // cannot collect more than capacity
uint32_t count = 1; size_t count = 1;
mi_block_t* tail = head; mi_block_t* tail = head;
mi_block_t* next; mi_block_t* next;
while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
@ -211,7 +211,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
page->local_free = head; page->local_free = head;
// update counts now // update counts now
page->used -= count; page->used -= (uint16_t)count;
} }
void _mi_page_free_collect(mi_page_t* page, bool force) { void _mi_page_free_collect(mi_page_t* page, bool force) {
@ -677,7 +677,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
} }
#endif #endif
if (_mi_is_power_of_two(block_size) && block_size > 0) { if (_mi_is_power_of_two(block_size) && block_size > 0) {
page->block_size_shift = (uint32_t)(mi_ctz((uintptr_t)block_size)); page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
} }
const ptrdiff_t start_offset = (uint8_t*)page_start - (uint8_t*)page; const ptrdiff_t start_offset = (uint8_t*)page_start - (uint8_t*)page;
const ptrdiff_t start_adjust = start_offset % block_size; const ptrdiff_t start_adjust = start_offset % block_size;