mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 11:34:38 +03:00
fix compilation warnings for new uint16_t size for used field
This commit is contained in:
parent
9085596eab
commit
60c4a0fe56
5 changed files with 68 additions and 53 deletions
|
@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define mi_decl_noinline __declspec(noinline)
|
||||
#define mi_decl_thread __declspec(thread)
|
||||
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
||||
#define mi_decl_weak
|
||||
#define mi_decl_weak
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
||||
#define mi_decl_noinline __attribute__((noinline))
|
||||
#define mi_decl_thread __thread
|
||||
|
@ -40,7 +40,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#define mi_decl_noinline
|
||||
#define mi_decl_thread __thread // hope for the best :-)
|
||||
#define mi_decl_cache_align
|
||||
#define mi_decl_weak
|
||||
#define mi_decl_weak
|
||||
#endif
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||
|
@ -91,7 +91,7 @@ void _mi_thread_data_collect(void);
|
|||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
|
||||
|
||||
|
@ -132,8 +132,8 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
|
|||
size_t _mi_arena_segment_abandoned_count(void);
|
||||
|
||||
typedef struct mi_arena_field_cursor_s { // abstract
|
||||
mi_arena_id_t start;
|
||||
int count;
|
||||
mi_arena_id_t start;
|
||||
int count;
|
||||
size_t bitmap_idx;
|
||||
} mi_arena_field_cursor_t;
|
||||
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current);
|
||||
|
|
|
@ -300,14 +300,14 @@ typedef struct mi_page_s {
|
|||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift == block_size)` (used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t block_offset_adj; // if not zero, then `(page_start - (uint8_t*)page - 8*(block_offset_adj-1)) % block_size == 0)` (used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
#endif
|
||||
#endif
|
||||
|
||||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
_Atomic(uintptr_t) xheap;
|
||||
|
||||
|
||||
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
|
||||
} mi_page_t;
|
||||
|
@ -373,7 +373,7 @@ typedef struct mi_segment_s {
|
|||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
||||
|
||||
|
||||
// segment fields
|
||||
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
|
||||
struct mi_segment_s* prev;
|
||||
|
@ -450,7 +450,7 @@ struct mi_heap_s {
|
|||
mi_tld_t* tld;
|
||||
_Atomic(mi_block_t*) thread_delayed_free;
|
||||
mi_threadid_t thread_id; // thread this heap belongs too
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||
mi_random_ctx_t random; // random number context used for secure allocation
|
||||
|
@ -460,7 +460,7 @@ struct mi_heap_s {
|
|||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue