mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 14:09:31 +03:00
parameter of clz/ctz is size_t
This commit is contained in:
parent
c62d276835
commit
ece1defe5b
6 changed files with 49 additions and 49 deletions
|
@ -213,8 +213,8 @@ void _mi_deferred_free(mi_heap_t* heap, bool force);
|
|||
void _mi_page_free_collect(mi_page_t* page,bool force);
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin); // for stats
|
||||
uint8_t _mi_bin(size_t size); // for stats
|
||||
size_t _mi_bin_size(size_t bin); // for stats
|
||||
size_t _mi_bin(size_t size); // for stats
|
||||
|
||||
// "heap.c"
|
||||
void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
|
||||
|
@ -839,21 +839,21 @@ static inline size_t _mi_os_numa_node_count(void) {
|
|||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_clzl(x);
|
||||
#else
|
||||
return __builtin_clzll(x);
|
||||
#endif
|
||||
static inline size_t mi_clz(size_t x) {
|
||||
if (x==0) return MI_SIZE_BITS;
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_clzl(x);
|
||||
#else
|
||||
return __builtin_clzll(x);
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_ctzl(x);
|
||||
#else
|
||||
return __builtin_ctzll(x);
|
||||
#endif
|
||||
static inline size_t mi_ctz(size_t x) {
|
||||
if (x==0) return MI_SIZE_BITS;
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_ctzl(x);
|
||||
#else
|
||||
return __builtin_ctzll(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
@ -861,24 +861,24 @@ static inline size_t mi_ctz(uintptr_t x) {
|
|||
#include <limits.h> // LONG_MAX
|
||||
#include <intrin.h> // BitScanReverse64
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
static inline size_t mi_clz(size_t x) {
|
||||
if (x==0) return MI_SIZE_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanReverse(&idx, x);
|
||||
#else
|
||||
_BitScanReverse64(&idx, x);
|
||||
#endif
|
||||
return ((MI_INTPTR_BITS - 1) - idx);
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
_BitScanReverse(&idx, x);
|
||||
#else
|
||||
_BitScanReverse64(&idx, x);
|
||||
#endif
|
||||
return ((MI_SIZE_BITS - 1) - idx);
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
static inline size_t mi_ctz(size_t x) {
|
||||
if (x==0) return MI_SIZE_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanForward(&idx, x);
|
||||
#else
|
||||
_BitScanForward64(&idx, x);
|
||||
#endif
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
_BitScanForward(&idx, x);
|
||||
#else
|
||||
_BitScanForward64(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
|
@ -941,9 +941,9 @@ static inline size_t mi_clz(size_t x) {
|
|||
|
||||
#endif
|
||||
|
||||
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
|
||||
// "bit scan reverse": Return index of the highest bit (or MI_SIZE_BITS if `x` is zero)
|
||||
static inline size_t mi_bsr(size_t x) {
|
||||
return (x==0 ? MI_SIZE_BITS : MI_SIZE_BITS - 1 - mi_clz(x));
|
||||
}
|
||||
|
||||
|
||||
|
|
10
src/bitmap.c
10
src/bitmap.c
|
@ -81,7 +81,7 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
|
|||
// on to the next bit range
|
||||
#ifdef MI_HAVE_FAST_BITSCAN
|
||||
mi_assert_internal(mapm != 0);
|
||||
const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx));
|
||||
const size_t shift = (count == 1 ? 1 : (MI_SIZE_BITS - mi_clz(mapm) - bitidx));
|
||||
mi_assert_internal(shift > 0 && shift <= count);
|
||||
#else
|
||||
const size_t shift = 1;
|
||||
|
@ -146,7 +146,7 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size
|
|||
return ((field & mask) == mask);
|
||||
}
|
||||
|
||||
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
|
||||
// Returns `true` if successful when all previous `count` bits were 0.
|
||||
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
const size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
|
@ -154,9 +154,9 @@ bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count
|
|||
const size_t mask = mi_bitmap_mask_(count, bitidx);
|
||||
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
|
||||
size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
|
||||
do {
|
||||
do {
|
||||
if ((expected & mask) != 0) return false;
|
||||
}
|
||||
}
|
||||
while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
|
||||
mi_assert_internal((expected & mask) == 0);
|
||||
return true;
|
||||
|
@ -194,7 +194,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
|
|||
if (initial == 0) return false;
|
||||
if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
|
||||
if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
|
||||
|
||||
|
||||
// scan ahead
|
||||
size_t found = initial;
|
||||
size_t mask = 0; // mask bits for the final field
|
||||
|
|
|
@ -552,7 +552,7 @@ void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
|
|||
|
||||
static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) {
|
||||
mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
|
||||
*shift = MI_INTPTR_BITS - mi_clz(divisor - 1);
|
||||
*shift = MI_SIZE_BITS - mi_clz(divisor - 1);
|
||||
*magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
|
|||
// Returns MI_BIN_HUGE if the size is too large.
|
||||
// We use `wsize` for the size in "machine word sizes",
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline uint8_t mi_bin(size_t size) {
|
||||
static inline size_t mi_bin(size_t size) {
|
||||
size_t wsize = _mi_wsize_from_size(size);
|
||||
#if defined(MI_ALIGN4W)
|
||||
if mi_likely(wsize <= 4) {
|
||||
|
@ -81,7 +81,7 @@ static inline uint8_t mi_bin(size_t size) {
|
|||
#endif
|
||||
wsize--;
|
||||
// find the highest bit
|
||||
const size_t b = mi_bsr(wsize); // note: wsize != 0
|
||||
const size_t b = (MI_SIZE_BITS - 1 - mi_clz(wsize)); // note: wsize != 0
|
||||
// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
|
||||
// - adjust with 3 because we use do not round the first 8 sizes
|
||||
// which each get an exact bin
|
||||
|
@ -97,11 +97,11 @@ static inline uint8_t mi_bin(size_t size) {
|
|||
Queue of pages with free blocks
|
||||
----------------------------------------------------------- */
|
||||
|
||||
uint8_t _mi_bin(size_t size) {
|
||||
size_t _mi_bin(size_t size) {
|
||||
return mi_bin(size);
|
||||
}
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin) {
|
||||
size_t _mi_bin_size(size_t bin) {
|
||||
return _mi_heap_empty.pages[bin].block_size;
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
|||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
mi_assert_internal(heap!=NULL);
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||
size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
|
||||
|
@ -180,7 +180,7 @@ static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_que
|
|||
}
|
||||
else {
|
||||
// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
|
||||
uint8_t bin = mi_bin(size);
|
||||
size_t bin = mi_bin(size);
|
||||
const mi_page_queue_t* prev = pq - 1;
|
||||
while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
|
||||
prev--;
|
||||
|
|
|
@ -123,7 +123,7 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
|
|||
if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; }
|
||||
// get virtual address bits
|
||||
if ((uintptr_t)si.lpMaximumApplicationAddress > 0) {
|
||||
const size_t vbits = MI_INTPTR_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress);
|
||||
const size_t vbits = MI_SIZE_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress);
|
||||
config->virtual_address_bits = vbits;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ int main() {
|
|||
mi_version();
|
||||
mi_stats_reset();
|
||||
|
||||
mi_bins();
|
||||
// mi_bins();
|
||||
|
||||
// test_manage_os_memory();
|
||||
// test_large_pages();
|
||||
|
@ -315,7 +315,7 @@ static void test_large_pages(void) {
|
|||
// bin size experiments
|
||||
// ------------------------------
|
||||
|
||||
#if 1
|
||||
#if 0
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue