diff --git a/src/arena.c b/src/arena.c index ac57ad8e..6b1e951f 100644 --- a/src/arena.c +++ b/src/arena.c @@ -62,7 +62,7 @@ typedef struct mi_arena_s { bool is_zero_init; // is the arena zero initialized? bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL bool is_large; // large- or huge OS pages (always committed) - _Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks + _Atomic(size_t) search_idx; // optimization to start the search for free blocks mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) @@ -71,7 +71,7 @@ typedef struct mi_arena_s { // The available arenas static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS]; -static mi_decl_cache_align _Atomic(uintptr_t) mi_arena_count; // = 0 +static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0 /* ----------------------------------------------------------- @@ -286,7 +286,7 @@ static bool mi_arena_add(mi_arena_t* arena) { mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0); mi_assert_internal(arena->block_count > 0); - uintptr_t i = mi_atomic_increment_acq_rel(&mi_arena_count); + size_t i = mi_atomic_increment_acq_rel(&mi_arena_count); if (i >= MI_MAX_ARENAS) { mi_atomic_decrement_acq_rel(&mi_arena_count); return false; diff --git a/src/bitmap.c b/src/bitmap.c index 6b0183b0..af6de0a1 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ---------------------------------------------------------------------------- Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`uintptr_t`) +represeted as an array of fields where each field is a machine word (`size_t`) There are two api's; the standard one cannot have sequences that cross between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). @@ -26,11 +26,12 @@ between the fields. (This is used in arena allocation) ----------------------------------------------------------- */ // The bit mask for a given number of blocks at a specified bit index. -static inline uintptr_t mi_bitmap_mask_(size_t count, size_t bitidx) { +static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) { mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS); mi_assert_internal(count > 0); if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL; - return ((((uintptr_t)1 << count) - 1) << bitidx); + if (count == 0) return 0; + return ((((size_t)1 << count) - 1) << bitidx); } @@ -45,27 +46,27 @@ inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, cons mi_assert_internal(bitmap_idx != NULL); mi_assert_internal(count <= MI_BITMAP_FIELD_BITS); mi_assert_internal(count > 0); - _Atomic(uintptr_t)* field = &bitmap[idx]; - uintptr_t map = mi_atomic_load_relaxed(field); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); if (map==MI_BITMAP_FIELD_FULL) return false; // short cut // search for 0-bit sequence of length count - const uintptr_t mask = mi_bitmap_mask_(count, 0); - const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count; + const size_t mask = mi_bitmap_mask_(count, 0); + const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count; #ifdef MI_HAVE_FAST_BITSCAN size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible #else size_t bitidx = 0; // otherwise start at 0 #endif - uintptr_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx + size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx // scan linearly for a free range of zero bits while (bitidx <= bitidx_max) { - const uintptr_t mapm = map & m; + const size_t mapm = map & m; if (mapm == 0) { // are the mask bits free at bitidx? mi_assert_internal((m >> bitidx) == mask); // no overflow? - const uintptr_t newmap = map | m; + const size_t newmap = map | m; mi_assert_internal((newmap^map) >> bitidx == mask); if (!mi_atomic_cas_weak_acq_rel(field, &map, newmap)) { // TODO: use strong cas here? // no success, another thread claimed concurrently.. keep going (with updated `map`) @@ -120,10 +121,10 @@ bool _mi_bitmap_try_find_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, c bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { const size_t idx = mi_bitmap_index_field(bitmap_idx); const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const uintptr_t mask = mi_bitmap_mask_(count, bitidx); + const size_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); // mi_assert_internal((bitmap[idx] & mask) == mask); - uintptr_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask); + size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask); return ((prev & mask) == mask); } @@ -133,10 +134,10 @@ bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) { const size_t idx = mi_bitmap_index_field(bitmap_idx); const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const uintptr_t mask = mi_bitmap_mask_(count, bitidx); + const size_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0); - uintptr_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask); + size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask); if (any_zero != NULL) *any_zero = ((prev & mask) != mask); return ((prev & mask) == 0); } @@ -145,9 +146,9 @@ bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) { const size_t idx = mi_bitmap_index_field(bitmap_idx); const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const uintptr_t mask = mi_bitmap_mask_(count, bitidx); + const size_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); - uintptr_t field = mi_atomic_load_relaxed(&bitmap[idx]); + size_t field = mi_atomic_load_relaxed(&bitmap[idx]); if (any_ones != NULL) *any_ones = ((field & mask) != 0); return ((field & mask) == mask); } @@ -175,8 +176,8 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit mi_assert_internal(bitmap_idx != NULL); // check initial trailing zeros - _Atomic(uintptr_t)* field = &bitmap[idx]; - uintptr_t map = mi_atomic_load_relaxed(field); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); const size_t initial = mi_clz(map); // count of initial zeros starting at idx mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS); if (initial == 0) return false; @@ -185,11 +186,11 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit // scan ahead size_t found = initial; - uintptr_t mask = 0; // mask bits for the final field + size_t mask = 0; // mask bits for the final field while(found < count) { field++; map = mi_atomic_load_relaxed(field); - const uintptr_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found)); + const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found)); mask = mi_bitmap_mask_(mask_bits, 0); if ((map & mask) != 0) return false; found += mask_bits; @@ -198,13 +199,13 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit // found range of zeros up to the final field; mask contains mask in the final field // now claim it atomically - _Atomic(uintptr_t)* const final_field = field; - const uintptr_t final_mask = mask; - _Atomic(uintptr_t)* const initial_field = &bitmap[idx]; - const uintptr_t initial_mask = mi_bitmap_mask_(initial, MI_BITMAP_FIELD_BITS - initial); + mi_bitmap_field_t* const final_field = field; + const size_t final_mask = mask; + mi_bitmap_field_t* const initial_field = &bitmap[idx]; + const size_t initial_mask = mi_bitmap_mask_(initial, MI_BITMAP_FIELD_BITS - initial); // initial field - uintptr_t newmap; + size_t newmap; field = initial_field; map = mi_atomic_load_relaxed(field); do { @@ -279,7 +280,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm } // Helper for masks across fields; returns the mid count, post_mask may be 0 -static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, uintptr_t* pre_mask, uintptr_t* mid_mask, uintptr_t* post_mask) { +static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) { MI_UNUSED_RELEASE(bitmap_fields); const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) { @@ -307,13 +308,13 @@ static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_ // Returns `true` if all `count` bits were 1 previously. bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { size_t idx = mi_bitmap_index_field(bitmap_idx); - uintptr_t pre_mask; - uintptr_t mid_mask; - uintptr_t post_mask; + size_t pre_mask; + size_t mid_mask; + size_t post_mask; size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); bool all_one = true; - _Atomic(uintptr_t)*field = &bitmap[idx]; - uintptr_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); if ((prev & pre_mask) != pre_mask) all_one = false; while(mid_count-- > 0) { prev = mi_atomic_and_acq_rel(field++, ~mid_mask); @@ -330,14 +331,14 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t // Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) { size_t idx = mi_bitmap_index_field(bitmap_idx); - uintptr_t pre_mask; - uintptr_t mid_mask; - uintptr_t post_mask; + size_t pre_mask; + size_t mid_mask; + size_t post_mask; size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); bool all_zero = true; bool any_zero = false; - _Atomic(uintptr_t)*field = &bitmap[idx]; - uintptr_t prev = mi_atomic_or_acq_rel(field++, pre_mask); + _Atomic(size_t)*field = &bitmap[idx]; + size_t prev = mi_atomic_or_acq_rel(field++, pre_mask); if ((prev & pre_mask) != 0) all_zero = false; if ((prev & pre_mask) != pre_mask) any_zero = true; while (mid_count-- > 0) { @@ -359,14 +360,14 @@ bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t co // `any_ones` is `true` if there was at least one bit set to one. static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) { size_t idx = mi_bitmap_index_field(bitmap_idx); - uintptr_t pre_mask; - uintptr_t mid_mask; - uintptr_t post_mask; + size_t pre_mask; + size_t mid_mask; + size_t post_mask; size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); bool all_ones = true; bool any_ones = false; - _Atomic(uintptr_t)* field = &bitmap[idx]; - uintptr_t prev = mi_atomic_load_relaxed(field++); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_load_relaxed(field++); if ((prev & pre_mask) != pre_mask) all_ones = false; if ((prev & pre_mask) != 0) any_ones = true; while (mid_count-- > 0) { diff --git a/src/bitmap.h b/src/bitmap.h index aae99279..7bd3106c 100644 --- a/src/bitmap.h +++ b/src/bitmap.h @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ---------------------------------------------------------------------------- Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`uintptr_t`) +represeted as an array of fields where each field is a machine word (`size_t`) There are two api's; the standard one cannot have sequences that cross between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). @@ -24,11 +24,11 @@ between the fields. (This is used in arena allocation) Bitmap definition ----------------------------------------------------------- */ -#define MI_BITMAP_FIELD_BITS (8*MI_INTPTR_SIZE) -#define MI_BITMAP_FIELD_FULL (~((uintptr_t)0)) // all bits set +#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE) +#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set -// An atomic bitmap of `uintptr_t` fields -typedef _Atomic(uintptr_t) mi_bitmap_field_t; +// An atomic bitmap of `size_t` fields +typedef _Atomic(size_t) mi_bitmap_field_t; typedef mi_bitmap_field_t* mi_bitmap_t; // A bitmap index is the index of the bit in a bitmap. diff --git a/src/init.c b/src/init.c index 71e7e3e6..20060232 100644 --- a/src/init.c +++ b/src/init.c @@ -359,7 +359,7 @@ bool _mi_is_main_thread(void) { return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id()); } -static _Atomic(uintptr_t) thread_count = ATOMIC_VAR_INIT(1); +static _Atomic(size_t) thread_count = ATOMIC_VAR_INIT(1); size_t _mi_current_thread_count(void) { return mi_atomic_load_relaxed(&thread_count); diff --git a/src/options.c b/src/options.c index eb60cc90..3fd58018 100644 --- a/src/options.c +++ b/src/options.c @@ -19,8 +19,8 @@ terms of the MIT license. A copy of the license can be found in the file #endif -static uintptr_t mi_max_error_count = 16; // stop outputting errors after this -static uintptr_t mi_max_warning_count = 16; // stop outputting warnings after this +static size_t mi_max_error_count = 16; // stop outputting errors after this +static size_t mi_max_warning_count = 16; // stop outputting warnings after this static void mi_add_stderr_output(void); @@ -179,10 +179,10 @@ static void mi_out_stderr(const char* msg, void* arg) { // an output function is registered it is called immediately with // the output up to that point. #ifndef MI_MAX_DELAY_OUTPUT -#define MI_MAX_DELAY_OUTPUT ((uintptr_t)(32*1024)) +#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024)) #endif static char out_buf[MI_MAX_DELAY_OUTPUT+1]; -static _Atomic(uintptr_t) out_len; +static _Atomic(size_t) out_len; static void mi_out_buf(const char* msg, void* arg) { MI_UNUSED(arg); @@ -191,7 +191,7 @@ static void mi_out_buf(const char* msg, void* arg) { size_t n = strlen(msg); if (n==0) return; // claim space - uintptr_t start = mi_atomic_add_acq_rel(&out_len, n); + size_t start = mi_atomic_add_acq_rel(&out_len, n); if (start >= MI_MAX_DELAY_OUTPUT) return; // check bound if (start+n >= MI_MAX_DELAY_OUTPUT) { @@ -254,8 +254,8 @@ static void mi_add_stderr_output() { // -------------------------------------------------------- // Messages, all end up calling `_mi_fputs`. // -------------------------------------------------------- -static _Atomic(uintptr_t) error_count; // = 0; // when >= max_error_count stop emitting errors -static _Atomic(uintptr_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings +static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors +static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings // When overriding malloc, we may recurse into mi_vfprintf if an allocation // inside the C runtime causes another message. diff --git a/src/os.c b/src/os.c index 772f0b52..31ad4119 100644 --- a/src/os.c +++ b/src/os.c @@ -332,11 +332,11 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { mi_assert_internal(!(large_only && !allow_large)); - static _Atomic(uintptr_t) large_page_try_ok; // = 0; + static _Atomic(size_t) large_page_try_ok; // = 0; void* p = NULL; if ((large_only || use_large_os_page(size, try_alignment)) && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { - uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); if (!large_only && try_ok > 0) { // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. @@ -477,8 +477,8 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro #endif // huge page allocation if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) { - static _Atomic(uintptr_t) large_page_try_ok; // = 0; - uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + static _Atomic(size_t) large_page_try_ok; // = 0; + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); if (!large_only && try_ok > 0) { // If the OS is not configured for large OS pages, or the user does not have // enough permission, the `mmap` will always fail (but it might also fail for other reasons). @@ -524,7 +524,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro #endif if (large_only) return p; if (p == NULL) { - mi_atomic_store_release(&large_page_try_ok, (uintptr_t)8); // on error, don't try again for the next N allocations + mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations } } } @@ -930,13 +930,13 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) if (p != start) return false; #else #if defined(MADV_FREE) - static _Atomic(uintptr_t) advice = ATOMIC_VAR_INIT(MADV_FREE); + static _Atomic(size_t) advice = ATOMIC_VAR_INIT(MADV_FREE); int oadvice = (int)mi_atomic_load_relaxed(&advice); int err; while ((err = madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on - mi_atomic_store_release(&advice, (uintptr_t)MADV_DONTNEED); + mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); err = madvise(start, csize, MADV_DONTNEED); } #elif defined(__wasi__) @@ -1118,7 +1118,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) void* p = mi_unix_mmap(addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large); if (p == NULL) return NULL; if (numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes - uintptr_t numa_mask = (1UL << numa_node); + unsigned long numa_mask = (1UL << numa_node); // TODO: does `mbind` work correctly for huge OS pages? should we // use `set_mempolicy` before calling mmap instead? // see: diff --git a/src/region.c b/src/region.c index d99b74af..f864f73b 100644 --- a/src/region.c +++ b/src/region.c @@ -74,7 +74,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, boo // Region info typedef union mi_region_info_u { - uintptr_t value; + size_t value; struct { bool valid; // initialized? bool is_large:1; // allocated in fixed large/huge OS pages @@ -87,21 +87,21 @@ typedef union mi_region_info_u { // A region owns a chunk of REGION_SIZE (256MiB) (virtual) memory with // a bit map with one bit per MI_SEGMENT_SIZE (4MiB) block. typedef struct mem_region_s { - _Atomic(uintptr_t) info; // mi_region_info_t.value + _Atomic(size_t) info; // mi_region_info_t.value _Atomic(void*) start; // start of the memory area mi_bitmap_field_t in_use; // bit per in-use block mi_bitmap_field_t dirty; // track if non-zero per block mi_bitmap_field_t commit; // track if committed per block mi_bitmap_field_t reset; // track if reset per block - _Atomic(uintptr_t) arena_memid; // if allocated from a (huge page) arena - uintptr_t padding; // round to 8 fields + _Atomic(size_t) arena_memid; // if allocated from a (huge page) arena + size_t padding; // round to 8 fields } mem_region_t; // The region map static mem_region_t regions[MI_REGION_MAX]; // Allocated regions -static _Atomic(uintptr_t) regions_count; // = 0; +static _Atomic(size_t) regions_count; // = 0; /* ---------------------------------------------------------------------------- @@ -186,7 +186,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large, mi_assert_internal(!region_large || region_commit); // claim a fresh slot - const uintptr_t idx = mi_atomic_increment_acq_rel(®ions_count); + const size_t idx = mi_atomic_increment_acq_rel(®ions_count); if (idx >= MI_REGION_MAX) { mi_atomic_decrement_acq_rel(®ions_count); _mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats); @@ -197,10 +197,10 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large, // allocated, initialize and claim the initial blocks mem_region_t* r = ®ions[idx]; r->arena_memid = arena_memid; - mi_atomic_store_release(&r->in_use, (uintptr_t)0); + mi_atomic_store_release(&r->in_use, (size_t)0); mi_atomic_store_release(&r->dirty, (is_zero ? 0 : MI_BITMAP_FIELD_FULL)); mi_atomic_store_release(&r->commit, (region_commit ? MI_BITMAP_FIELD_FULL : 0)); - mi_atomic_store_release(&r->reset, (uintptr_t)0); + mi_atomic_store_release(&r->reset, (size_t)0); *bit_idx = 0; _mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL); mi_atomic_store_ptr_release(void,&r->start, start); @@ -451,21 +451,21 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re -----------------------------------------------------------------------------*/ void _mi_mem_collect(mi_os_tld_t* tld) { // free every region that has no segments in use. - uintptr_t rcount = mi_atomic_load_relaxed(®ions_count); + size_t rcount = mi_atomic_load_relaxed(®ions_count); for (size_t i = 0; i < rcount; i++) { mem_region_t* region = ®ions[i]; if (mi_atomic_load_relaxed(®ion->info) != 0) { // if no segments used, try to claim the whole region - uintptr_t m = mi_atomic_load_relaxed(®ion->in_use); + size_t m = mi_atomic_load_relaxed(®ion->in_use); while (m == 0 && !mi_atomic_cas_weak_release(®ion->in_use, &m, MI_BITMAP_FIELD_FULL)) { /* nothing */ }; if (m == 0) { // on success, free the whole region uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ions[i].start); size_t arena_memid = mi_atomic_load_relaxed(®ions[i].arena_memid); - uintptr_t commit = mi_atomic_load_relaxed(®ions[i].commit); + size_t commit = mi_atomic_load_relaxed(®ions[i].commit); memset((void*)®ions[i], 0, sizeof(mem_region_t)); // cast to void* to avoid atomic warning // and release the whole region - mi_atomic_store_release(®ion->info, (uintptr_t)0); + mi_atomic_store_release(®ion->info, (size_t)0); if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) { _mi_abandoned_await_readers(); // ensure no pending reads _mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats); diff --git a/src/segment.c b/src/segment.c index 01fbe022..af72cdf5 100644 --- a/src/segment.c +++ b/src/segment.c @@ -922,13 +922,13 @@ static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL // Maintain these for debug purposes (these counts may be a bit off) -static mi_decl_cache_align _Atomic(uintptr_t) abandoned_count; -static mi_decl_cache_align _Atomic(uintptr_t) abandoned_visited_count; +static mi_decl_cache_align _Atomic(size_t) abandoned_count; +static mi_decl_cache_align _Atomic(size_t) abandoned_visited_count; // We also maintain a count of current readers of the abandoned list // in order to prevent resetting/decommitting segment memory if it might // still be read. -static mi_decl_cache_align _Atomic(uintptr_t) abandoned_readers; // = 0 +static mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0 // Push on the visited list static void mi_abandoned_visited_push(mi_segment_t* segment) { @@ -957,7 +957,7 @@ static bool mi_abandoned_visited_revisit(void) mi_tagged_segment_t afirst; mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); if (mi_tagged_segment_ptr(ts)==NULL) { - uintptr_t count = mi_atomic_load_relaxed(&abandoned_visited_count); + size_t count = mi_atomic_load_relaxed(&abandoned_visited_count); afirst = mi_tagged_segment(first, ts); if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) { mi_atomic_add_relaxed(&abandoned_count, count); @@ -976,7 +976,7 @@ static bool mi_abandoned_visited_revisit(void) // and atomically prepend to the abandoned list // (no need to increase the readers as we don't access the abandoned segments) mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned); - uintptr_t count; + size_t count; do { count = mi_atomic_load_relaxed(&abandoned_visited_count); mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext)); @@ -1005,7 +1005,7 @@ static void mi_abandoned_push(mi_segment_t* segment) { // Wait until there are no more pending reads on segments that used to be in the abandoned list // called for example from `arena.c` before decommitting void _mi_abandoned_await_readers(void) { - uintptr_t n; + size_t n; do { n = mi_atomic_load_acquire(&abandoned_readers); if (n != 0) mi_atomic_yield(); @@ -1352,8 +1352,8 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block // claim it and free mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized. - // paranoia: if this is the last reference, the cas should always succeed - uintptr_t expected_tid = 0; + // paranoia: if this it the last reference, the cas should always succeed + size_t expected_tid = 0; if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) { mi_block_set_next(page, block, page->free); page->free = block;