small fixes for macOS

This commit is contained in:
Daan 2024-12-09 15:16:36 -08:00
parent d5ed0cc71e
commit 351cb0c740
3 changed files with 18 additions and 14 deletions

View file

@ -330,20 +330,18 @@ endif()
# Determine architecture # Determine architecture
set(MI_OPT_ARCH_FLAGS "") set(MI_OPT_ARCH_FLAGS "")
set(MI_ARCH "unknown") set(MI_ARCH "")
if(APPLE) if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
list(FIND CMAKE_OSX_ARCHITECTURES "x86_64" x64_index) CMAKE_GENERATOR_PLATFORM STREQUAL "x64") # msvc
list(FIND CMAKE_OSX_ARCHITECTURES "arm64" arm64_index)
if(x64_index GREATER_EQUAL 0)
set(MI_ARCH "x64")
elseif(arm64_index GREATER_EQUAL 0)
set(MI_ARCH "arm64")
endif()
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM STREQUAL "x64")
set(MI_ARCH "x64") set(MI_ARCH "x64")
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64") elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR
CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64" OR # apple
CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64") # msvc
set(MI_ARCH "arm64") set(MI_ARCH "arm64")
endif() endif()
if(MI_ARCH)
message(STATUS "Architecture: ${MI_ARCH}")
endif()
# Check /proc/cpuinfo for an SV39 MMU and limit the virtual address bits. # Check /proc/cpuinfo for an SV39 MMU and limit the virtual address bits.
# (this will skip the aligned hinting in that case. Issue #939, #949) # (this will skip the aligned hinting in that case. Issue #939, #949)

View file

@ -459,7 +459,7 @@ static inline mi_page_t* _mi_checked_ptr_page(const void* p) {
} }
static inline mi_page_t* _mi_ptr_page(const void* p) { static inline mi_page_t* _mi_ptr_page(const void* p) {
#if MI_DEBUG #if MI_DEBUG || defined(__APPLE__)
return _mi_checked_ptr_page(p); return _mi_checked_ptr_page(p);
#else #else
return _mi_ptr_page_ex(p,NULL); return _mi_ptr_page_ex(p,NULL);

View file

@ -12,6 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
mi_decl_cache_align uint8_t* _mi_page_map = NULL; mi_decl_cache_align uint8_t* _mi_page_map = NULL;
static bool mi_page_map_all_committed = false; static bool mi_page_map_all_committed = false;
static size_t mi_page_map_entries_per_commit_bit = MI_ARENA_SLICE_SIZE; static size_t mi_page_map_entries_per_commit_bit = MI_ARENA_SLICE_SIZE;
static void* mi_page_map_max_address = NULL;
static mi_memid_t mi_page_map_memid; static mi_memid_t mi_page_map_memid;
// (note: we need to initialize statically or otherwise C++ may run a default constructors after process initialization) // (note: we need to initialize statically or otherwise C++ may run a default constructors after process initialization)
@ -23,12 +24,13 @@ bool _mi_page_map_init(void) {
if (vbits >= 48) vbits = 47; if (vbits >= 48) vbits = 47;
// 1 byte per block = 2 GiB for 128 TiB address space (48 bit = 256 TiB address space) // 1 byte per block = 2 GiB for 128 TiB address space (48 bit = 256 TiB address space)
// 64 KiB for 4 GiB address space (on 32-bit) // 64 KiB for 4 GiB address space (on 32-bit)
mi_page_map_max_address = (void*)(MI_PU(1) << vbits);
const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT)); const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT));
mi_page_map_entries_per_commit_bit = _mi_divide_up(page_map_size, MI_BITMAP_DEFAULT_BIT_COUNT); mi_page_map_entries_per_commit_bit = _mi_divide_up(page_map_size, MI_BITMAP_DEFAULT_BIT_COUNT);
// mi_bitmap_init(&mi_page_map_commit, MI_BITMAP_MIN_BIT_COUNT, true); // mi_bitmap_init(&mi_page_map_commit, MI_BITMAP_MIN_BIT_COUNT, true);
mi_page_map_all_committed = (page_map_size <= 1*MI_MiB); // _mi_os_has_overcommit(); // commit on-access on Linux systems? mi_page_map_all_committed = true; // (page_map_size <= 1*MI_MiB); // _mi_os_has_overcommit(); // commit on-access on Linux systems?
_mi_page_map = (uint8_t*)_mi_os_alloc_aligned(page_map_size, 1, mi_page_map_all_committed, true, &mi_page_map_memid); _mi_page_map = (uint8_t*)_mi_os_alloc_aligned(page_map_size, 1, mi_page_map_all_committed, true, &mi_page_map_memid);
if (_mi_page_map==NULL) { if (_mi_page_map==NULL) {
_mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB);
@ -118,8 +120,12 @@ void _mi_page_map_unregister(mi_page_t* page) {
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
// if mi_unlikely(_mi_page_map==NULL) { // happens on macOS during loading
// _mi_page_map_init();
// }
if mi_unlikely(p >= mi_page_map_max_address) return false;
uintptr_t idx = ((uintptr_t)p >> MI_ARENA_SLICE_SHIFT); uintptr_t idx = ((uintptr_t)p >> MI_ARENA_SLICE_SHIFT);
if (!mi_page_map_all_committed || mi_bitmap_is_setN(&mi_page_map_commit, idx/mi_page_map_entries_per_commit_bit, 1)) { if (mi_page_map_all_committed || mi_bitmap_is_setN(&mi_page_map_commit, idx/mi_page_map_entries_per_commit_bit, 1)) {
return (_mi_page_map[idx] != 0); return (_mi_page_map[idx] != 0);
} }
else { else {