prefix UNUSED,KiB,MiB,GiB; add mi_threadid_t type; add mi_ssize_t

This commit is contained in:
daan 2021-11-13 14:46:03 -08:00
parent 959845540d
commit 28896e5b19
14 changed files with 198 additions and 166 deletions

View file

@ -122,11 +122,11 @@ static bool use_large_os_page(size_t size, size_t alignment) {
// round to a good OS allocation size (bounded by max 12.5% waste)
size_t _mi_os_good_alloc_size(size_t size) {
size_t align_size;
if (size < 512*KiB) align_size = _mi_os_page_size();
else if (size < 2*MiB) align_size = 64*KiB;
else if (size < 8*MiB) align_size = 256*KiB;
else if (size < 32*MiB) align_size = 1*MiB;
else align_size = 4*MiB;
if (size < 512*MI_KiB) align_size = _mi_os_page_size();
else if (size < 2*MI_MiB) align_size = 64*MI_KiB;
else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
else align_size = 4*MI_MiB;
if (mi_unlikely(size >= (SIZE_MAX - align_size))) return size; // possible overflow?
return _mi_align_up(size, align_size);
}
@ -263,7 +263,7 @@ void _mi_os_init() {
os_page_size = (size_t)result;
os_alloc_granularity = os_page_size;
}
large_os_page_size = 2*MiB; // TODO: can we query the OS for this?
large_os_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
os_detect_overcommit();
}
#endif
@ -414,7 +414,7 @@ static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) {
#else
#define MI_OS_USE_MMAP
static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
UNUSED(try_alignment);
MI_UNUSED(try_alignment);
#if defined(MAP_ALIGNED) // BSD
if (addr == NULL && try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0) {
size_t n = mi_bsr(try_alignment);
@ -494,7 +494,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif
#ifdef MAP_HUGE_1GB
static bool mi_huge_pages_available = true;
if ((size % GiB) == 0 && mi_huge_pages_available) {
if ((size % MI_GiB) == 0 && mi_huge_pages_available) {
lflags |= MAP_HUGE_1GB;
}
else
@ -582,7 +582,7 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
{
if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL;
if ((size%MI_SEGMENT_SIZE) != 0) return NULL;
if (size > 1*GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(KK_HINT_AREA / 1<<30) = 1/4096.
if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(KK_HINT_AREA / 1<<30) = 1/4096.
#if (MI_SECURE>0)
size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas.
#endif
@ -605,7 +605,7 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
// no need for mi_os_get_aligned_hint
#else
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
UNUSED(try_alignment); UNUSED(size);
MI_UNUSED(try_alignment); MI_UNUSED(size);
return NULL;
}
#endif
@ -732,7 +732,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
----------------------------------------------------------- */
void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (size == 0) return NULL;
size = _mi_os_good_alloc_size(size);
@ -741,7 +741,7 @@ void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
}
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (size == 0 || p == NULL) return;
size = _mi_os_good_alloc_size(size);
@ -754,7 +754,7 @@ void _mi_os_free(void* p, size_t size, mi_stats_t* stats) {
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* tld_stats)
{
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
if (size == 0) return NULL;
size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size());
@ -805,7 +805,7 @@ static void mi_mprotect_hint(int err) {
" > sudo sysctl -w vm.max_map_count=262144\n");
}
#else
UNUSED(err);
MI_UNUSED(err);
#endif
}
@ -867,13 +867,13 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
}
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
return mi_os_commitx(addr, size, true, false /* liberal */, is_zero, stats);
}
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
bool is_zero;
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
@ -942,7 +942,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_decommit(addr, size, stats);
@ -953,7 +953,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
}
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
UNUSED(tld_stats);
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
@ -1029,12 +1029,12 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
Support for allocating huge OS pages (1Gib) that are reserved up-front
and possibly associated with a specific NUMA node. (use `numa_node>=0`)
-----------------------------------------------------------------------------*/
#define MI_HUGE_OS_PAGE_SIZE (GiB)
#define MI_HUGE_OS_PAGE_SIZE (MI_GiB)
#if defined(_WIN32) && (MI_INTPTR_SIZE >= 8)
static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
{
mi_assert_internal(size%GiB == 0);
mi_assert_internal(size%MI_GiB == 0);
mi_assert_internal(addr != NULL);
const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
@ -1075,7 +1075,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1);
}
#else
UNUSED(numa_node);
MI_UNUSED(numa_node);
#endif
// otherwise use regular virtual alloc on older windows
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
@ -1092,12 +1092,12 @@ static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, cons
}
#else
static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
UNUSED(start); UNUSED(len); UNUSED(mode); UNUSED(nmask); UNUSED(maxnode); UNUSED(flags);
MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags);
return 0;
}
#endif
static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
mi_assert_internal(size%GiB == 0);
mi_assert_internal(size%MI_GiB == 0);
bool is_large = true;
void* p = mi_unix_mmap(addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
if (p == NULL) return NULL;
@ -1115,7 +1115,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
}
#else
static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
UNUSED(addr); UNUSED(size); UNUSED(numa_node);
MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(numa_node);
return NULL;
}
#endif
@ -1151,7 +1151,7 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
}
#else
static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
UNUSED(pages);
MI_UNUSED(pages);
if (total_size != NULL) *total_size = 0;
return NULL;
}
@ -1352,7 +1352,7 @@ size_t _mi_os_numa_node_count_get(void) {
}
int _mi_os_numa_node_get(mi_os_tld_t* tld) {
UNUSED(tld);
MI_UNUSED(tld);
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0