diff --git a/.gitignore b/.gitignore
index df1d58eb..b2439f94 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,6 @@
ide/vs20??/*.db
ide/vs20??/*.opendb
ide/vs20??/*.user
-ide/vs20??/*.vcxproj.filters
ide/vs20??/.vs
ide/vs20??/VTune*
out/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7a730557..ca69fc6e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,18 +7,20 @@ set(CMAKE_CXX_STANDARD 17)
option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF)
option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG or SECURE mode, or with Valgrind/ASAN)" OFF)
-option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON)
+option(MI_OVERRIDE "Override the standard malloc interface (i.e. define entry points for 'malloc', 'free', etc)" ON)
option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF)
option(MI_TRACK_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF)
option(MI_TRACK_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF)
option(MI_TRACK_ETW "Compile with Windows event tracing (ETW) support (adds a small overhead)" OFF)
option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF)
+option(MI_OPT_ARCH "Only for optimized builds: turn on architecture specific optimizations (for arm64: '-march=armv8.1-a' (2016))" ON)
option(MI_SEE_ASM "Generate assembly files" OFF)
option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" ON)
option(MI_WIN_REDIRECT "Use redirection module ('mimalloc-redirect') on Windows if compiling mimalloc as a DLL" ON)
-option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanism (Unix)" OFF)
+option(MI_WIN_USE_FIXED_TLS "Use a fixed TLS slot on Windows to avoid extra tests in the malloc fast path" OFF)
+option(MI_LOCAL_DYNAMIC_TLS "Use local-dynamic-tls, a slightly slower but dlopen-compatible thread local storage mechanism (Unix)" OFF)
option(MI_LIBC_MUSL "Set this when linking with musl libc" OFF)
option(MI_BUILD_SHARED "Build shared library" ON)
option(MI_BUILD_STATIC "Build static library" ON)
@@ -26,12 +28,15 @@ option(MI_BUILD_OBJECT "Build object library" ON)
option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
+option(MI_GUARDED "Build with guard pages behind certain object allocations (implies MI_NO_PADDING=ON)" OFF)
option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF)
option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF)
option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF)
option(MI_NO_THP "Disable transparent huge pages support on Linux/Android for the mimalloc process only" OFF)
+option(MI_EXTRA_CPPDEFS "Extra pre-processor definitions (use as `-DMI_EXTRA_CPPDEFS=\"opt1=val1;opt2=val2\"`)" "")
# deprecated options
+option(MI_WIN_USE_FLS "Use Fiber local storage on Windows to detect thread termination (deprecated)" OFF)
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
option(MI_USE_LIBATOMIC "Explicitly link with -latomic (on older systems) (deprecated and detected automatically)" OFF)
@@ -61,30 +66,39 @@ set(mi_sources
set(mi_cflags "")
set(mi_cflags_static "") # extra flags for a static library build
set(mi_cflags_dynamic "") # extra flags for a shared-object library build
-set(mi_defines "")
set(mi_libraries "")
+if(MI_EXTRA_CPPDEFS)
+ set(mi_defines ${MI_EXTRA_CPPDEFS})
+else()
+ set(mi_defines "")
+endif()
+
# -----------------------------------------------------------------------------
-# Convenience: set default build type depending on the build directory
+# Convenience: set default build type and compiler depending on the build directory
# -----------------------------------------------------------------------------
message(STATUS "")
if (NOT CMAKE_BUILD_TYPE)
- if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$" OR MI_DEBUG_FULL)
- message(STATUS "No build type selected, default to: Debug")
+ if ("${CMAKE_BINARY_DIR}" MATCHES ".*((D|d)ebug|asan|tsan|ubsan|valgrind)$" OR MI_DEBUG_FULL)
+ message(STATUS "No build type selected, default to 'Debug'")
set(CMAKE_BUILD_TYPE "Debug")
else()
- message(STATUS "No build type selected, default to: Release")
+ message(STATUS "No build type selected, default to 'Release'")
set(CMAKE_BUILD_TYPE "Release")
endif()
endif()
+if (CMAKE_GENERATOR MATCHES "^Visual Studio.*$")
+ message(STATUS "Note: when building with Visual Studio the build type is specified when building.")
+ message(STATUS "For example: 'cmake --build . --config=Release")
+endif()
+
if("${CMAKE_BINARY_DIR}" MATCHES ".*(S|s)ecure$")
message(STATUS "Default to secure build")
set(MI_SECURE "ON")
endif()
-
# -----------------------------------------------------------------------------
# Process options
# -----------------------------------------------------------------------------
@@ -104,6 +118,14 @@ if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel")
set(MI_USE_CXX "ON")
endif()
+if(CMAKE_BUILD_TYPE MATCHES "Release|RelWithDebInfo")
+ if (NOT MI_OPT_ARCH)
+ message(STATUS "Architecture specific optimizations are disabled (MI_OPT_ARCH=OFF)")
+ endif()
+else()
+ set(MI_OPT_ARCH OFF)
+endif()
+
if(MI_OVERRIDE)
message(STATUS "Override standard malloc (MI_OVERRIDE=ON)")
if(APPLE)
@@ -131,12 +153,6 @@ if(MI_OVERRIDE)
endif()
if(WIN32)
- if (MI_WIN_REDIRECT)
- if (MSVC_C_ARCHITECTURE_ID MATCHES "ARM")
- message(STATUS "Cannot use redirection on Windows ARM (MI_WIN_REDIRECT=OFF)")
- set(MI_WIN_REDIRECT OFF)
- endif()
- endif()
if (NOT MI_WIN_REDIRECT)
# use a negative define for backward compatibility
list(APPEND mi_defines MI_WIN_NOREDIRECT=1)
@@ -152,8 +168,8 @@ if(MI_TRACK_VALGRIND)
CHECK_INCLUDE_FILES("valgrind/valgrind.h;valgrind/memcheck.h" MI_HAS_VALGRINDH)
if (NOT MI_HAS_VALGRINDH)
set(MI_TRACK_VALGRIND OFF)
- message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first")
- message(STATUS "Compile **without** Valgrind support (MI_TRACK_VALGRIND=OFF)")
+ message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first?")
+ message(STATUS "Disabling Valgrind support (MI_TRACK_VALGRIND=OFF)")
else()
message(STATUS "Compile with Valgrind support (MI_TRACK_VALGRIND=ON)")
list(APPEND mi_defines MI_TRACK_VALGRIND=1)
@@ -199,6 +215,15 @@ if(MI_TRACK_ETW)
endif()
endif()
+if(MI_GUARDED)
+ message(STATUS "Compile guard pages behind certain object allocations (MI_GUARDED=ON)")
+ list(APPEND mi_defines MI_GUARDED=1)
+ if(NOT MI_NO_PADDING)
+ message(STATUS " Disabling padding due to guard pages (MI_NO_PADDING=ON)")
+ set(MI_NO_PADDING ON)
+ endif()
+endif()
+
if(MI_SEE_ASM)
message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
list(APPEND mi_cflags -save-temps)
@@ -297,6 +322,48 @@ if(MI_LIBC_MUSL)
list(APPEND mi_defines MI_LIBC_MUSL=1)
endif()
+if(MI_WIN_USE_FLS)
+ message(STATUS "Use the Fiber API to detect thread termination (deprecated) (MI_WIN_USE_FLS=ON)")
+ list(APPEND mi_defines MI_WIN_USE_FLS=1)
+endif()
+
+if(MI_WIN_USE_FIXED_TLS)
+ message(STATUS "Use fixed TLS slot on Windows to avoid extra tests in the malloc fast path (MI_WIN_USE_FIXED_TLS=ON)")
+ list(APPEND mi_defines MI_WIN_USE_FIXED_TLS=1)
+endif()
+
+# Determine architecture
+set(MI_OPT_ARCH_FLAGS "")
+set(MI_ARCH "unknown")
+if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86|i[3456]86)$" OR CMAKE_GENERATOR_PLATFORM MATCHES "^(x86|Win32)$")
+ set(MI_ARCH "x86")
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|x64|amd64|AMD64)$" OR CMAKE_GENERATOR_PLATFORM STREQUAL "x64") # must be before arm64
+ set(MI_ARCH "x64")
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64|armv8.?|ARM64)$" OR CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64")
+ set(MI_ARCH "arm64")
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|armv[34567]|ARM)$")
+ set(MI_ARCH "arm32")
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(riscv|riscv32|riscv64)$")
+ if(CMAKE_SIZEOF_VOID_P==4)
+ set(MI_ARCH "riscv32")
+ else()
+ set(MI_ARCH "riscv64")
+ endif()
+else()
+ set(MI_ARCH ${CMAKE_SYSTEM_PROCESSOR})
+endif()
+message(STATUS "Architecture: ${MI_ARCH}") # (${CMAKE_SYSTEM_PROCESSOR}, ${CMAKE_GENERATOR_PLATFORM}, ${CMAKE_GENERATOR})")
+
+# Check /proc/cpuinfo for an SV39 MMU and limit the virtual address bits.
+# (this will skip the aligned hinting in that case. Issue #939, #949)
+if (EXISTS /proc/cpuinfo)
+ file(STRINGS /proc/cpuinfo mi_sv39_mmu REGEX "^mmu[ \t]+:[ \t]+sv39$")
+ if (mi_sv39_mmu)
+ MESSAGE( STATUS "Set virtual address bits to 39 (SV39 MMU detected)" )
+ list(APPEND mi_defines MI_DEFAULT_VIRTUAL_ADDRESS_BITS=39)
+ endif()
+endif()
+
# On Haiku use `-DCMAKE_INSTALL_PREFIX` instead, issue #788
# if(CMAKE_SYSTEM_NAME MATCHES "Haiku")
# SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib)
@@ -334,14 +401,29 @@ if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM
if(MI_OVERRIDE)
list(APPEND mi_cflags -fno-builtin-malloc)
endif()
+ if(MI_OPT_ARCH)
+ if(MI_ARCH STREQUAL "arm64")
+ set(MI_OPT_ARCH_FLAGS "-march=armv8.1-a") # fast atomics
+ endif()
+ endif()
endif()
if (MSVC AND MSVC_VERSION GREATER_EQUAL 1914)
list(APPEND mi_cflags /Zc:__cplusplus)
+ if(MI_OPT_ARCH)
+ if(MI_ARCH STREQUAL "arm64")
+ set(MI_OPT_ARCH_FLAGS "/arch:armv8.1") # fast atomics
+ endif()
+ endif()
endif()
if(MINGW)
- add_definitions(-D_WIN32_WINNT=0x600)
+ add_definitions(-D_WIN32_WINNT=0x601) # issue #976
+endif()
+
+if(MI_OPT_ARCH_FLAGS)
+ list(APPEND mi_cflags ${MI_OPT_ARCH_FLAGS})
+ message(STATUS "Architecture specific optimization is enabled (with ${MI_OPT_ARCH_FLAGS}) (MI_OPT_ARCH=ON)")
endif()
# extra needed libraries
@@ -462,10 +544,18 @@ if(MI_BUILD_SHARED)
)
if(WIN32 AND MI_WIN_REDIRECT)
# On windows, link and copy the mimalloc redirection dll too.
- if(CMAKE_SIZEOF_VOID_P EQUAL 4)
+ if(CMAKE_GENERATOR_PLATFORM STREQUAL "arm64ec")
+ set(MIMALLOC_REDIRECT_SUFFIX "-arm64ec")
+ elseif(MI_ARCH STREQUAL "x64")
+ set(MIMALLOC_REDIRECT_SUFFIX "")
+ if(CMAKE_SYSTEM_PROCESSOR STREQUAL "ARM64")
+ message(STATUS "Note: x64 code emulated on Windows for arm64 should use an arm64ec build of 'mimalloc-override.dll'")
+ message(STATUS " with 'mimalloc-redirect-arm64ec.dll'. See the 'bin\\readme.md' for more information.")
+ endif()
+ elseif(MI_ARCH STREQUAL "x86")
set(MIMALLOC_REDIRECT_SUFFIX "32")
else()
- set(MIMALLOC_REDIRECT_SUFFIX "")
+ set(MIMALLOC_REDIRECT_SUFFIX "-${MI_ARCH}") # -arm64 etc.
endif()
target_link_libraries(mimalloc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.lib)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index e3689407..bccf7a3f 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -15,7 +15,7 @@ trigger:
jobs:
- job:
- displayName: Windows
+ displayName: Windows 2022
pool:
vmImage:
windows-2022
@@ -43,7 +43,7 @@ jobs:
solution: $(BuildType)/libmimalloc.sln
configuration: '$(MSBuildConfiguration)'
msbuildArguments: -m
- - script: ctest --verbose --timeout 120 -C $(MSBuildConfiguration)
+ - script: ctest --verbose --timeout 240 -C $(MSBuildConfiguration)
workingDirectory: $(BuildType)
displayName: CTest
#- script: $(BuildType)\$(BuildType)\mimalloc-test-stress
@@ -52,7 +52,7 @@ jobs:
# artifact: mimalloc-windows-$(BuildType)
- job:
- displayName: Linux
+ displayName: Ubuntu 22.04
pool:
vmImage:
ubuntu-22.04
@@ -113,7 +113,12 @@ jobs:
CXX: clang++
BuildType: debug-tsan-clang-cxx
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMI_USE_CXX=ON -DMI_DEBUG_TSAN=ON
-
+ Debug Guarded Clang:
+ CC: clang
+ CXX: clang
+ BuildType: debug-guarded-clang
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=RelWithDebInfo -DMI_DEBUG_FULL=ON -DMI_GUARDED=ON
+
steps:
- task: CMake@1
inputs:
@@ -121,17 +126,19 @@ jobs:
cmakeArgs: .. $(cmakeExtraArgs)
- script: make -j$(nproc) -C $(BuildType)
displayName: Make
- - script: ctest --verbose --timeout 180
+ - script: ctest --verbose --timeout 240
workingDirectory: $(BuildType)
displayName: CTest
+ env:
+ MIMALLOC_GUARDED_SAMPLE_RATE: 1000
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-ubuntu-$(BuildType)
- job:
- displayName: macOS
+ displayName: macOS 14 (Sonoma)
pool:
vmImage:
- macOS-latest
+ macOS-14
strategy:
matrix:
Debug:
@@ -150,41 +157,151 @@ jobs:
cmakeArgs: .. $(cmakeExtraArgs)
- script: make -j$(sysctl -n hw.ncpu) -C $(BuildType)
displayName: Make
- - script: ctest --verbose --timeout 120
+ - script: ctest --verbose --timeout 240
workingDirectory: $(BuildType)
displayName: CTest
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-macos-$(BuildType)
-# - job:
-# displayName: Windows-2017
-# pool:
-# vmImage:
-# vs2017-win2016
-# strategy:
-# matrix:
-# Debug:
-# BuildType: debug
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
-# MSBuildConfiguration: Debug
-# Release:
-# BuildType: release
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release
-# MSBuildConfiguration: Release
-# Secure:
-# BuildType: secure
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
-# MSBuildConfiguration: Release
-# steps:
-# - task: CMake@1
-# inputs:
-# workingDirectory: $(BuildType)
-# cmakeArgs: .. $(cmakeExtraArgs)
-# - task: MSBuild@1
-# inputs:
-# solution: $(BuildType)/libmimalloc.sln
-# configuration: '$(MSBuildConfiguration)'
-# - script: |
-# cd $(BuildType)
-# ctest --verbose --timeout 120
-# displayName: CTest
+# ----------------------------------------------------------
+# Other OS versions (just debug mode)
+# ----------------------------------------------------------
+
+- job:
+ displayName: Windows 2019
+ pool:
+ vmImage:
+ windows-2019
+ strategy:
+ matrix:
+ Debug:
+ BuildType: debug
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ MSBuildConfiguration: Debug
+ Release:
+ BuildType: release
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
+ MSBuildConfiguration: Release
+ steps:
+ - task: CMake@1
+ inputs:
+ workingDirectory: $(BuildType)
+ cmakeArgs: .. $(cmakeExtraArgs)
+ - task: MSBuild@1
+ inputs:
+ solution: $(BuildType)/libmimalloc.sln
+ configuration: '$(MSBuildConfiguration)'
+ msbuildArguments: -m
+ - script: ctest --verbose --timeout 240 -C $(MSBuildConfiguration)
+ workingDirectory: $(BuildType)
+ displayName: CTest
+
+- job:
+ displayName: Ubuntu 24.04
+ pool:
+ vmImage:
+ ubuntu-24.04
+ strategy:
+ matrix:
+ Debug:
+ CC: gcc
+ CXX: g++
+ BuildType: debug
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ Debug++:
+ CC: gcc
+ CXX: g++
+ BuildType: debug-cxx
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
+ Debug Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: debug-clang
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ Debug++ Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: debug-clang-cxx
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
+ Release Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: release-clang
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
+ steps:
+ - task: CMake@1
+ inputs:
+ workingDirectory: $(BuildType)
+ cmakeArgs: .. $(cmakeExtraArgs)
+ - script: make -j$(nproc) -C $(BuildType)
+ displayName: Make
+ - script: ctest --verbose --timeout 240
+ workingDirectory: $(BuildType)
+ displayName: CTest
+
+- job:
+ displayName: Ubuntu 20.04
+ pool:
+ vmImage:
+ ubuntu-20.04
+ strategy:
+ matrix:
+ Debug:
+ CC: gcc
+ CXX: g++
+ BuildType: debug
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ Debug++:
+ CC: gcc
+ CXX: g++
+ BuildType: debug-cxx
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
+ Debug Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: debug-clang
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ Debug++ Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: debug-clang-cxx
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
+ Release Clang:
+ CC: clang
+ CXX: clang++
+ BuildType: release-clang
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
+ steps:
+ - task: CMake@1
+ inputs:
+ workingDirectory: $(BuildType)
+ cmakeArgs: .. $(cmakeExtraArgs)
+ - script: make -j$(nproc) -C $(BuildType)
+ displayName: Make
+ - script: ctest --verbose --timeout 240
+ workingDirectory: $(BuildType)
+ displayName: CTest
+
+- job:
+ displayName: macOS 15 (Sequoia)
+ pool:
+ vmImage:
+ macOS-15
+ strategy:
+ matrix:
+ Debug:
+ BuildType: debug
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
+ Release:
+ BuildType: release
+ cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
+ steps:
+ - task: CMake@1
+ inputs:
+ workingDirectory: $(BuildType)
+ cmakeArgs: .. $(cmakeExtraArgs)
+ - script: make -j$(sysctl -n hw.ncpu) -C $(BuildType)
+ displayName: Make
+ - script: ctest --verbose --timeout 240
+ workingDirectory: $(BuildType)
+ displayName: CTest
diff --git a/bin/mimalloc-redirect-arm64.dll b/bin/mimalloc-redirect-arm64.dll
new file mode 100644
index 00000000..455f8394
Binary files /dev/null and b/bin/mimalloc-redirect-arm64.dll differ
diff --git a/bin/mimalloc-redirect-arm64.lib b/bin/mimalloc-redirect-arm64.lib
new file mode 100644
index 00000000..0445ce83
Binary files /dev/null and b/bin/mimalloc-redirect-arm64.lib differ
diff --git a/bin/mimalloc-redirect-arm64ec.dll b/bin/mimalloc-redirect-arm64ec.dll
new file mode 100644
index 00000000..62569b57
Binary files /dev/null and b/bin/mimalloc-redirect-arm64ec.dll differ
diff --git a/bin/mimalloc-redirect-arm64ec.lib b/bin/mimalloc-redirect-arm64ec.lib
new file mode 100644
index 00000000..eb724d74
Binary files /dev/null and b/bin/mimalloc-redirect-arm64ec.lib differ
diff --git a/bin/mimalloc-redirect.dll b/bin/mimalloc-redirect.dll
index a3a3591f..7d0ec33b 100644
Binary files a/bin/mimalloc-redirect.dll and b/bin/mimalloc-redirect.dll differ
diff --git a/bin/mimalloc-redirect.lib b/bin/mimalloc-redirect.lib
index de128bb9..851455a5 100644
Binary files a/bin/mimalloc-redirect.lib and b/bin/mimalloc-redirect.lib differ
diff --git a/bin/mimalloc-redirect32.dll b/bin/mimalloc-redirect32.dll
index 522723e5..cc661036 100644
Binary files a/bin/mimalloc-redirect32.dll and b/bin/mimalloc-redirect32.dll differ
diff --git a/bin/mimalloc-redirect32.lib b/bin/mimalloc-redirect32.lib
index 87f19b8e..45d7297d 100644
Binary files a/bin/mimalloc-redirect32.lib and b/bin/mimalloc-redirect32.lib differ
diff --git a/bin/minject-arm64.exe b/bin/minject-arm64.exe
new file mode 100644
index 00000000..637c95d9
Binary files /dev/null and b/bin/minject-arm64.exe differ
diff --git a/bin/minject.exe b/bin/minject.exe
index dba8f80f..bb445706 100644
Binary files a/bin/minject.exe and b/bin/minject.exe differ
diff --git a/bin/minject32.exe b/bin/minject32.exe
index f837383b..6dcb8da9 100644
Binary files a/bin/minject32.exe and b/bin/minject32.exe differ
diff --git a/bin/readme.md b/bin/readme.md
index 9b121bda..bc115ce1 100644
--- a/bin/readme.md
+++ b/bin/readme.md
@@ -1,27 +1,30 @@
# Windows Override
Dynamically overriding on mimalloc on Windows
-is robust and has the particular advantage to be able to redirect all malloc/free calls that go through
-the (dynamic) C runtime allocator, including those from other DLL's or libraries.
-As it intercepts all allocation calls on a low level, it can be used reliably
+is robust and has the particular advantage to be able to redirect all malloc/free calls
+that go through the (dynamic) C runtime allocator, including those from other DLL's or
+libraries. As it intercepts all allocation calls on a low level, it can be used reliably
on large programs that include other 3rd party components.
-There are four requirements to make the overriding work robustly:
+There are four requirements to make the overriding work well:
1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch).
-2. Link your program explicitly with `mimalloc-override.dll` library.
- To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some
- call to the mimalloc API in the `main` function, like `mi_version()`
- (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project
- for an example on how to use this.
+2. Link your program explicitly with the `mimalloc.lib` export library for
+ the `mimalloc.dll` -- which contains all mimalloc functionality.
+ To ensure the `mimalloc.dll` is actually loaded at run-time it is easiest
+ to insert some call to the mimalloc API in the `main` function, like `mi_version()`
+ (or use the `/include:mi_version` switch on the linker, or
+ similarly, `#pragma comment(linker, "/include:mi_version")` in some source file).
+ See the `mimalloc-test-override` project for an example on how to use this.
-3. The `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put
- in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL).
- The redirection DLL ensures that all calls to the C runtime malloc API get redirected to
- mimalloc functions (which reside in `mimalloc-override.dll`).
+3. The `mimalloc-redirect.dll` must be put in the same folder as the main
+ `mimalloc.dll` at runtime (as it is a dependency of that DLL).
+ The redirection DLL ensures that all calls to the C runtime malloc API get
+ redirected to mimalloc functions (which reside in `mimalloc.dll`).
-4. Ensure the `mimalloc-override.dll` comes as early as possible in the import
+4. Ensure the `mimalloc.dll` comes as early as possible in the import
list of the final executable (so it can intercept all potential allocations).
+ You can use `minject -l ` to check this if needed.
For best performance on Windows with C++, it
is also recommended to also override the `new`/`delete` operations (by including
@@ -29,18 +32,43 @@ is also recommended to also override the `new`/`delete` operations (by including
a single(!) source file in your project).
The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic
-overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected.
+overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully
+redirected.
-## Minject
+### Other Platforms
-We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always
-ensure the the DLL comes first in the import table of the final executable.
+You always link with `mimalloc.dll` but for different platforms you may
+need a specific redirection DLL:
+
+- __x64__: `mimalloc-redirect.dll`.
+- __x86__: `mimalloc-redirect32.dll`. Use for older 32-bit Windows programs.
+- __arm64__: `mimalloc-redirect-arm64.dll`. Use for native Windows arm64 programs.
+- __arm64ec__: `mimalloc-redirect-arm64ec.dll`. The [arm64ec] ABI is "emulation compatible"
+ mode on Windows arm64. Unfortunately we cannot run x64 code emulated on Windows arm64 with
+ the x64 mimalloc override directly (since the C runtime always uses `arm64ec`). Instead:
+ 1. Build the program as normal for x64 and link as normal with the x64
+ `mimalloc.lib` export library.
+ 2. Now separately build `mimalloc.dll` in `arm64ec` mode and _overwrite_ your
+ previous (x64) `mimalloc.dll` -- the loader can handle the mix of arm64ec
+ and x64 code. Now use `mimalloc-redirect-arm64ec.dll` to match your new
+ arm64ec `mimalloc.dll`. The main program stays as is and can be fully x64
+ or contain more arm64ec modules. At runtime, the arm64ec `mimalloc.dll` will
+ run with native arm64 instructions while the rest of the program runs emulated x64.
+
+[arm64ec]: https://learn.microsoft.com/en-us/windows/arm/arm64ec
+
+
+### Minject
+
+We cannot always re-link an executable with `mimalloc.dll`, and similarly, we
+cannot always ensure that the DLL comes first in the import table of the final executable.
In many cases though we can patch existing executables without any recompilation
-if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll`
-into the import table (and put `mimalloc-redirect.dll` in the same folder)
-Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388).
+if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the
+`mimalloc.dll` into the import table (and put `mimalloc-redirect.dll` in the same
+directory) Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388).
-The `minject` program can also do this from the command line, use `minject --help` for options:
+The `minject` program can also do this from the command line
+Use `minject --help` for options:
```
> minject --help
@@ -58,8 +86,8 @@ options:
-l --list only list imported modules
-i --inplace update the exe in-place (make sure there is a backup!)
-f --force always overwrite without prompting
- --postfix=
use
as a postfix to the mimalloc dll (default is 'override')
- e.g. use --postfix=override-debug to link with mimalloc-override-debug.dll
+ --postfix=
use
as a postfix to the mimalloc dll.
+ e.g. use --postfix=debug to link with mimalloc-debug.dll
notes:
Without '--inplace' an injected is generated with the same name ending in '-mi'.
@@ -69,3 +97,6 @@ examples:
> minject --list myprogram.exe
> minject --force --inplace myprogram.exe
```
+
+For x86 32-bit binaries, use `minject32`, and for arm64 binaries use `minject-arm64`.
+
diff --git a/ide/vs2017/mimalloc-override-test.vcxproj b/ide/vs2017/mimalloc-override-test.vcxproj
deleted file mode 100644
index 04c16a9f..00000000
--- a/ide/vs2017/mimalloc-override-test.vcxproj
+++ /dev/null
@@ -1,190 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7868F-750E-4C21-A04D-22707CC66879}
- mimalloc-override-test
- mimalloc-override-test
- 10.0.19041.0
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- false
- Default
- false
-
-
- Console
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
-
-
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
-
-
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-override-test.vcxproj.filters b/ide/vs2017/mimalloc-override-test.vcxproj.filters
deleted file mode 100644
index eb5e70b7..00000000
--- a/ide/vs2017/mimalloc-override-test.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-override.vcxproj b/ide/vs2017/mimalloc-override.vcxproj
deleted file mode 100644
index 6d20eb57..00000000
--- a/ide/vs2017/mimalloc-override.vcxproj
+++ /dev/null
@@ -1,260 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}
- mimalloc-override
- mimalloc-override
- 10.0.19041.0
-
-
-
- DynamicLibrary
- true
- v141
-
-
- DynamicLibrary
- false
- v141
-
-
- DynamicLibrary
- true
- v141
-
-
- DynamicLibrary
- false
- v141
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)"
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)"
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)"
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)"
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-override.vcxproj.filters b/ide/vs2017/mimalloc-override.vcxproj.filters
deleted file mode 100644
index 1adafcfa..00000000
--- a/ide/vs2017/mimalloc-override.vcxproj.filters
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test-stress.vcxproj b/ide/vs2017/mimalloc-test-stress.vcxproj
deleted file mode 100644
index 061b8605..00000000
--- a/ide/vs2017/mimalloc-test-stress.vcxproj
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-stress
- mimalloc-test-stress
- 10.0.19041.0
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- false
- false
- false
- false
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test-stress.vcxproj.filters b/ide/vs2017/mimalloc-test-stress.vcxproj.filters
deleted file mode 100644
index 7c5239e8..00000000
--- a/ide/vs2017/mimalloc-test-stress.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test.vcxproj b/ide/vs2017/mimalloc-test.vcxproj
deleted file mode 100644
index 04bd6537..00000000
--- a/ide/vs2017/mimalloc-test.vcxproj
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7858F-750E-4C21-A04D-22707CC66878}
- mimalloctest
- mimalloc-test
- 10.0.19041.0
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp14
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test.vcxproj.filters b/ide/vs2017/mimalloc-test.vcxproj.filters
deleted file mode 100644
index fca75e1c..00000000
--- a/ide/vs2017/mimalloc-test.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc.sln b/ide/vs2017/mimalloc.sln
deleted file mode 100644
index 515c03f2..00000000
--- a/ide/vs2017/mimalloc.sln
+++ /dev/null
@@ -1,71 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 15
-VisualStudioVersion = 15.0.26228.102
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Debug|x86 = Debug|x86
- Release|x64 = Release|x64
- Release|x86 = Release|x86
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
- GlobalSection(ExtensibilityGlobals) = postSolution
- SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
- EndGlobalSection
-EndGlobal
diff --git a/ide/vs2017/mimalloc.vcxproj b/ide/vs2017/mimalloc.vcxproj
deleted file mode 100644
index ece9a14d..00000000
--- a/ide/vs2017/mimalloc.vcxproj
+++ /dev/null
@@ -1,260 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}
- mimalloc
- 10.0.19041.0
- mimalloc
-
-
-
- StaticLibrary
- true
- v141
-
-
- StaticLibrary
- false
- v141
- true
-
-
- StaticLibrary
- true
- v141
-
-
- StaticLibrary
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- false
-
-
- false
-
-
- false
-
-
- false
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsC
- false
- stdcpp17
-
-
-
-
-
-
-
-
-
-
- Level4
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsCpp
- false
- stdcpp14
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsC
- true
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
- Level4
- MaxSpeed
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsC
- true
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc.vcxproj.filters b/ide/vs2017/mimalloc.vcxproj.filters
deleted file mode 100644
index 8359e0e4..00000000
--- a/ide/vs2017/mimalloc.vcxproj.filters
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-override-test.vcxproj b/ide/vs2019/mimalloc-override-test.vcxproj
deleted file mode 100644
index 7a9202f1..00000000
--- a/ide/vs2019/mimalloc-override-test.vcxproj
+++ /dev/null
@@ -1,190 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7868F-750E-4C21-A04D-22707CC66879}
- mimalloc-override-test
- 10.0
- mimalloc-override-test
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
-
-
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
-
-
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-override.vcxproj b/ide/vs2019/mimalloc-override.vcxproj
deleted file mode 100644
index a84a5178..00000000
--- a/ide/vs2019/mimalloc-override.vcxproj
+++ /dev/null
@@ -1,260 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}
- mimalloc-override
- 10.0
- mimalloc-override
-
-
-
- DynamicLibrary
- true
- v142
-
-
- DynamicLibrary
- false
- v142
-
-
- DynamicLibrary
- true
- v142
-
-
- DynamicLibrary
- false
- v142
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)"
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)"
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)"
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)"
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-override.vcxproj.filters b/ide/vs2019/mimalloc-override.vcxproj.filters
deleted file mode 100644
index 046e5603..00000000
--- a/ide/vs2019/mimalloc-override.vcxproj.filters
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Source Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- {f1fccf27-17b9-42dd-ba51-6070baff85c6}
-
-
- {39cb7e38-69d0-43fb-8406-6a0f7cefc3b4}
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-test-api.vcxproj b/ide/vs2019/mimalloc-test-api.vcxproj
deleted file mode 100644
index 812a9cb1..00000000
--- a/ide/vs2019/mimalloc-test-api.vcxproj
+++ /dev/null
@@ -1,155 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FFF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-api
- 10.0
- mimalloc-test-api
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
diff --git a/ide/vs2019/mimalloc-test-stress.vcxproj b/ide/vs2019/mimalloc-test-stress.vcxproj
deleted file mode 100644
index ef7ab357..00000000
--- a/ide/vs2019/mimalloc-test-stress.vcxproj
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-stress
- 10.0
- mimalloc-test-stress
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- false
- false
- false
- false
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-test.vcxproj b/ide/vs2019/mimalloc-test.vcxproj
deleted file mode 100644
index 13af6ab4..00000000
--- a/ide/vs2019/mimalloc-test.vcxproj
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7858F-750E-4C21-A04D-22707CC66878}
- mimalloctest
- 10.0
- mimalloc-test
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc.sln b/ide/vs2019/mimalloc.sln
deleted file mode 100644
index 6ff01d3b..00000000
--- a/ide/vs2019/mimalloc.sln
+++ /dev/null
@@ -1,81 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29709.97
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Debug|x86 = Debug|x86
- Release|x64 = Release|x64
- Release|x86 = Release|x86
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
- GlobalSection(ExtensibilityGlobals) = postSolution
- SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
- EndGlobalSection
-EndGlobal
diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj
deleted file mode 100644
index 0076b1db..00000000
--- a/ide/vs2019/mimalloc.vcxproj
+++ /dev/null
@@ -1,258 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}
- mimalloc
- 10.0
- mimalloc
-
-
-
- StaticLibrary
- true
- v142
-
-
- StaticLibrary
- false
- v142
- true
-
-
- StaticLibrary
- true
- v142
-
-
- StaticLibrary
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
-
- Level4
- Disabled
- true
- true
- ../../include
- MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsCpp
- false
- Default
-
-
-
-
-
-
-
-
-
-
- Level4
- Disabled
- true
- Default
- ../../include
- MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsCpp
- false
- Default
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Level4
- MaxSpeed
- true
- true
- ../../include
- %(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsCpp
- true
- Default
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
- Level4
- MaxSpeed
- true
- true
- ../../include
- %(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsCpp
- true
- Default
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
- false
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc.vcxproj.filters b/ide/vs2019/mimalloc.vcxproj.filters
deleted file mode 100644
index 98f29289..00000000
--- a/ide/vs2019/mimalloc.vcxproj.filters
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Source Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- {2b556b10-f559-4b2d-896e-142652adbf0c}
-
-
- {852a14ae-6dde-4e95-8077-ca705e97e5af}
-
-
-
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc-lib.vcxproj
similarity index 52%
rename from ide/vs2022/mimalloc.vcxproj
rename to ide/vs2022/mimalloc-lib.vcxproj
index 5efc8fd0..6c652b8a 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc-lib.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -21,9 +37,9 @@
15.0{ABB5EAE7-B3E6-432E-B636-333449892EA6}
- mimalloc
+ mimalloc-lib10.0
- mimalloc
+ mimalloc-lib
@@ -42,12 +58,34 @@
truev143
+
+ StaticLibrary
+ true
+ v143
+
+
+ StaticLibrary
+ true
+ v143
+ StaticLibraryfalsev143true
+
+ StaticLibrary
+ false
+ v143
+ true
+
+
+ StaticLibrary
+ false
+ v143
+ true
+
@@ -62,9 +100,21 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -84,12 +134,36 @@
.libmimalloc-static
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\.libmimalloc-static
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+ Level4
@@ -116,7 +190,61 @@
trueDefault../../include
- MI_DEBUG=4;%(PreprocessorDefinitions);
+ MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);
+ CompileAsCpp
+ false
+ stdcpp20
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Level4
+ Disabled
+ true
+ Default
+ ../../include
+ MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);
+ CompileAsCpp
+ false
+ stdcpp20
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Level4
+ Disabled
+ true
+ Default
+ ../../include
+ MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);CompileAsCppfalsestdcpp20
@@ -198,18 +326,98 @@
+
+
+ Level4
+ MaxSpeed
+ true
+ Default
+ ../../include
+ %(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ false
+ Default
+ CompileAsCpp
+ true
+ stdcpp20
+ CPUExtensionRequirementsARMv81
+ Sync
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Level4
+ MaxSpeed
+ true
+ Default
+ ../../include
+ %(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ false
+ Default
+ CompileAsCpp
+ true
+ stdcpp20
+ CPUExtensionRequirementsARMv81
+ Sync
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+ falsefalse
+ false
+ falsefalsefalse
+ false
+ falsetruetruetrue
+ true
+ truetrue
+ true
+ true
@@ -217,17 +425,27 @@
truetruetrue
+ true
+ truetrue
+ true
+ truefalse
+ false
+ falsetrue
+ true
+ truetruetruetrue
+ true
+ true
@@ -237,14 +455,22 @@
truetruetrue
+ true
+ truetrue
+ true
+ truetruetruetrue
+ true
+ truetrue
+ true
+ true
diff --git a/ide/vs2022/mimalloc.vcxproj.filters b/ide/vs2022/mimalloc-lib.vcxproj.filters
similarity index 96%
rename from ide/vs2022/mimalloc.vcxproj.filters
rename to ide/vs2022/mimalloc-lib.vcxproj.filters
index 54ee0fcb..90703da8 100644
--- a/ide/vs2022/mimalloc.vcxproj.filters
+++ b/ide/vs2022/mimalloc-lib.vcxproj.filters
@@ -16,15 +16,24 @@
Sources
+
+ Sources
+ Sources
+
+ Sources
+ SourcesSources
+
+ Sources
+ Sources
@@ -55,21 +64,12 @@
Sources
-
- Sources
-
-
- Sources
-
-
- Sources
-
-
+ Headers
-
+ Headers
@@ -96,10 +96,10 @@
- {dd2da697-c33c-4348-bf80-a802ebaa06fb}
+ {1430490c-e711-4ace-a1b8-36f4d5105873}
- {8027057b-4b93-4321-b93c-d51dd0c8077b}
+ {461c78ef-04b0-44d1-a0ca-7d488abaa592}
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-override.vcxproj b/ide/vs2022/mimalloc-override-dll.vcxproj
similarity index 52%
rename from ide/vs2022/mimalloc-override.vcxproj
rename to ide/vs2022/mimalloc-override-dll.vcxproj
index e895fa3c..c1849bb2 100644
--- a/ide/vs2022/mimalloc-override.vcxproj
+++ b/ide/vs2022/mimalloc-override-dll.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -21,9 +37,9 @@
15.0{ABB5EAE7-B3E6-432E-B636-333449892EA7}
- mimalloc-override
+ mimalloc-override-dll10.0
- mimalloc-override
+ mimalloc-override-dll
@@ -41,11 +57,31 @@
truev143
+
+ DynamicLibrary
+ true
+ v143
+
+
+ DynamicLibrary
+ true
+ v143
+ DynamicLibraryfalsev143
+
+ DynamicLibrary
+ false
+ v143
+
+
+ DynamicLibrary
+ false
+ v143
+
@@ -60,33 +96,69 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\.dll
- mimalloc-override
+ mimalloc$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\.dll
- mimalloc-override
+ mimalloc$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\.dll
- mimalloc-override
+ mimalloc
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\.dll
- mimalloc-override
+ mimalloc
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc
@@ -98,7 +170,7 @@
MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);MultiThreadedDebugDLLfalse
- Default
+ CompileAsCpp$(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
@@ -126,7 +198,7 @@
MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);MultiThreadedDebugDLLfalse
- Default
+ CompileAsCpp$(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
@@ -144,6 +216,62 @@
copy mimalloc-redirect.dll to the output directory
+
+
+ Level3
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
+ MultiThreadedDebugDLL
+ false
+ CompileAsCpp
+
+
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies)
+
+
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64.dll to the output directory
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
+ MultiThreadedDebugDLL
+ false
+ CompileAsCpp
+
+
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64ec.lib;%(AdditionalDependencies)
+
+
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64ec.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64ec.dll to the output directory
+
+ Level3
@@ -157,7 +285,7 @@
$(IntDir)falseMultiThreadedDLL
- Default
+ CompileAsCppfalse
@@ -189,7 +317,7 @@
$(IntDir)falseMultiThreadedDLL
- Default
+ CompileAsCppfalse
@@ -208,6 +336,72 @@
copy mimalloc-redirect.dll to the output directory
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ MultiThreadedDLL
+ CompileAsCpp
+ false
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies)
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64.dll to the output directory
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ MultiThreadedDLL
+ CompileAsCpp
+ false
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64ec.lib;%(AdditionalDependencies)
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64ec.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64ec.dll to the output directory
+
+
@@ -226,13 +420,21 @@
falsefalsefalse
+ false
+ falsefalse
+ false
+ falsetruetruetrue
+ true
+ truetrue
+ true
+ true
@@ -240,7 +442,11 @@
truetruetrue
+ true
+ truetrue
+ true
+ true
@@ -252,7 +458,11 @@
truetruetrue
+ true
+ truetrue
+ true
+ true
@@ -260,7 +470,11 @@
truetruetrue
+ true
+ truetrue
+ true
+ true
diff --git a/ide/vs2022/mimalloc-override.vcxproj.filters b/ide/vs2022/mimalloc-override-dll.vcxproj.filters
similarity index 95%
rename from ide/vs2022/mimalloc-override.vcxproj.filters
rename to ide/vs2022/mimalloc-override-dll.vcxproj.filters
index a9f66c35..91bdf95c 100644
--- a/ide/vs2022/mimalloc-override.vcxproj.filters
+++ b/ide/vs2022/mimalloc-override-dll.vcxproj.filters
@@ -16,6 +16,9 @@
Sources
+
+ Sources
+ Sources
@@ -25,6 +28,9 @@
Sources
+
+ Sources
+ Sources
@@ -55,12 +61,6 @@
Sources
-
- Sources
-
-
- Sources
-
@@ -99,15 +99,15 @@
- {9ef1cf48-7bb2-4af1-8cc1-603486e08a7a}
+ {262c6c21-e270-4ba6-bd63-4ac999307e4e}
- {cfcf1674-81e3-487a-a8dd-5f956ae4007d}
+ {94b40bdc-a741-45dd-81aa-c05fabcd2970}
- Headers
+ Sources
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-override-test.vcxproj b/ide/vs2022/mimalloc-override-test.vcxproj
index a3c56f7b..427a75ae 100644
--- a/ide/vs2022/mimalloc-override-test.vcxproj
+++ b/ide/vs2022/mimalloc-override-test.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -23,7 +39,7 @@
{FEF7868F-750E-4C21-A04D-22707CC66879}mimalloc-override-test10.0
- mimalloc-override-test
+ mimalloc-test-override
@@ -42,12 +58,34 @@
truev143
+
+ Application
+ true
+ v143
+
+
+ Application
+ true
+ v143
+ Applicationfalsev143true
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +100,21 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +128,26 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ Level3
@@ -128,6 +194,54 @@
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ MultiThreadedDebugDLL
+ Sync
+ Default
+ false
+
+
+ Console
+
+
+ kernel32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ MultiThreadedDebugDLL
+ Sync
+ Default
+ false
+
+
+ Console
+
+
+ kernel32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+ Level3
@@ -176,11 +290,61 @@
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ MultiThreadedDLL
+
+
+ true
+ true
+ Console
+
+
+ kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ MultiThreadedDLL
+
+
+ true
+ true
+ Console
+
+
+ kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
-
+ {abb5eae7-b3e6-432e-b636-333449892ea7}
diff --git a/ide/vs2022/mimalloc-test-api.vcxproj b/ide/vs2022/mimalloc-test-api.vcxproj
index d9b9cae4..b7f97ad2 100644
--- a/ide/vs2022/mimalloc-test-api.vcxproj
+++ b/ide/vs2022/mimalloc-test-api.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -42,12 +58,34 @@
truev143
+
+ Application
+ true
+ v143
+
+
+ Application
+ true
+ v143
+ Applicationfalsev143true
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +100,21 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +128,26 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ Level3
@@ -106,6 +172,30 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+ Level3
@@ -140,19 +230,59 @@
Console
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+ truetruetrue
+ true
+ truetrue
+ true
+ truefalse
+ false
+ false
-
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
diff --git a/ide/vs2022/mimalloc-test-stress.vcxproj b/ide/vs2022/mimalloc-test-stress.vcxproj
index c7e820df..cb761f94 100644
--- a/ide/vs2022/mimalloc-test-stress.vcxproj
+++ b/ide/vs2022/mimalloc-test-stress.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -42,12 +58,34 @@
truev143
+
+ Application
+ true
+ v143
+
+
+ Application
+ true
+ v143
+ Applicationfalsev143true
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +100,21 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +128,26 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ Level3
@@ -106,6 +172,30 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+ Level3
@@ -140,16 +230,56 @@
Console
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ Console
+
+ falsefalse
+ false
+ falsefalsefalse
+ false
+ false
-
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
diff --git a/ide/vs2022/mimalloc-test.vcxproj b/ide/vs2022/mimalloc-test.vcxproj
index 506dd7d4..83202dbe 100644
--- a/ide/vs2022/mimalloc-test.vcxproj
+++ b/ide/vs2022/mimalloc-test.vcxproj
@@ -1,10 +1,26 @@
+
+ Debug
+ ARM64
+
+
+ Debug
+ ARM64EC
+ DebugWin32
+
+ Release
+ ARM64
+
+
+ Release
+ ARM64EC
+ ReleaseWin32
@@ -23,7 +39,7 @@
{FEF7858F-750E-4C21-A04D-22707CC66878}mimalloctest10.0
- mimalloc-test
+ mimalloc-test-static
@@ -42,12 +58,34 @@
truev143
+
+ Application
+ true
+ v143
+
+
+ Application
+ true
+ v143
+ Applicationfalsev143true
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +100,21 @@
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +128,26 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ Level3
@@ -108,6 +174,32 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ stdcpp17
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ stdcpp17
+
+
+ Console
+
+ Level3
@@ -144,14 +236,50 @@
Console
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ stdcpp17
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ stdcpp17
+
+
+ true
+ true
+ Console
+
+
+
+
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
+
+
diff --git a/ide/vs2022/mimalloc.sln b/ide/vs2022/mimalloc.sln
index 6ff01d3b..040af3ac 100644
--- a/ide/vs2022/mimalloc.sln
+++ b/ide/vs2022/mimalloc.sln
@@ -1,81 +1,133 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29709.97
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Debug|x86 = Debug|x86
- Release|x64 = Release|x64
- Release|x86 = Release|x86
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
- GlobalSection(ExtensibilityGlobals) = postSolution
- SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
- EndGlobalSection
-EndGlobal
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.12.35527.113
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-lib", "mimalloc-lib.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-dll", "mimalloc-override-dll.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|ARM64 = Debug|ARM64
+ Debug|ARM64EC = Debug|ARM64EC
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|ARM64 = Release|ARM64
+ Release|ARM64EC = Release|ARM64EC
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.Build.0 = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.ActiveCfg = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.Build.0 = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.Build.0 = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.ActiveCfg = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.Build.0 = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
+ EndGlobalSection
+EndGlobal
diff --git a/include/mimalloc.h b/include/mimalloc.h
index bc743fd7..95a76c2d 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -148,6 +148,7 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
+mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept;
mi_decl_export int mi_version(void) mi_attr_noexcept;
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
@@ -275,7 +276,7 @@ mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
-mi_decl_export void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept;
+mi_decl_export void mi_debug_show_arenas(bool show_inuse) mi_attr_noexcept;
// Experimental: heaps associated with specific memory arena's
typedef int mi_arena_id_t;
@@ -290,7 +291,7 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
#endif
-// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
+// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
// Used for example for separate interpreter's in one process.
typedef void* mi_subproc_id_t;
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
@@ -309,6 +310,12 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool al
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
+// Experimental: objects followed by a guard page.
+// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object.
+// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages.
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed);
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max);
+
// ------------------------------------------------------
// Convenience
@@ -349,7 +356,7 @@ typedef enum mi_option_e {
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
- mi_option_deprecated_segment_reset,
+ mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
@@ -366,6 +373,12 @@ typedef enum mi_option_e {
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
+ mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0)
+ mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0)
+ mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+ mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
+ mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
+ mi_option_target_segments_per_thread, // experimental (=0)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,
@@ -535,7 +548,7 @@ template struct _mi_heap_stl_allocator_common : publi
protected:
std::shared_ptr heap;
template friend struct _mi_heap_stl_allocator_common;
-
+
_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
@@ -552,7 +565,7 @@ private:
template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common {
using typename _mi_heap_stl_allocator_common::size_type;
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called
- mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
@@ -569,7 +582,7 @@ template bool operator!=(const mi_heap_stl_allocator& x,
template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common {
using typename _mi_heap_stl_allocator_common::size_type;
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called
- mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
diff --git a/include/mimalloc/atomic.h b/include/mimalloc/atomic.h
index 530cca01..0d7aaf78 100644
--- a/include/mimalloc/atomic.h
+++ b/include/mimalloc/atomic.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
+Copyright (c) 2018-2024 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -72,6 +72,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_exchange_relaxed(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(relaxed))
#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
@@ -110,6 +111,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
+#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
#else
@@ -118,6 +120,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
+#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
#endif
@@ -402,19 +405,46 @@ static inline void mi_atomic_yield(void) {
// ----------------------------------------------------------------------
-// Locks are only used for abandoned segment visiting in `arena.c`
+// Locks
+// These do not have to be recursive and should be light-weight
+// in-process only locks. Only used for reserving arena's and to
+// maintain the abandoned list.
// ----------------------------------------------------------------------
+#if _MSC_VER
+#pragma warning(disable:26110) // unlock with holding lock
+#endif
+
+#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) )
#if defined(_WIN32)
+#if 1
+#define mi_lock_t SRWLOCK // slim reader-writer lock
+
+static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
+ return TryAcquireSRWLockExclusive(lock);
+}
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ AcquireSRWLockExclusive(lock);
+}
+static inline void mi_lock_release(mi_lock_t* lock) {
+ ReleaseSRWLockExclusive(lock);
+}
+static inline void mi_lock_init(mi_lock_t* lock) {
+ InitializeSRWLock(lock);
+}
+static inline void mi_lock_done(mi_lock_t* lock) {
+ (void)(lock);
+}
+
+#else
#define mi_lock_t CRITICAL_SECTION
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return TryEnterCriticalSection(lock);
}
-static inline bool mi_lock_acquire(mi_lock_t* lock) {
+static inline void mi_lock_acquire(mi_lock_t* lock) {
EnterCriticalSection(lock);
- return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
LeaveCriticalSection(lock);
@@ -426,16 +456,22 @@ static inline void mi_lock_done(mi_lock_t* lock) {
DeleteCriticalSection(lock);
}
+#endif
#elif defined(MI_USE_PTHREADS)
+void _mi_error_message(int err, const char* fmt, ...);
+
#define mi_lock_t pthread_mutex_t
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
return (pthread_mutex_trylock(lock) == 0);
}
-static inline bool mi_lock_acquire(mi_lock_t* lock) {
- return (pthread_mutex_lock(lock) == 0);
+static inline void mi_lock_acquire(mi_lock_t* lock) {
+ const int err = pthread_mutex_lock(lock);
+ if (err != 0) {
+ _mi_error_message(err, "internal error: lock cannot be acquired\n");
+ }
}
static inline void mi_lock_release(mi_lock_t* lock) {
pthread_mutex_unlock(lock);
@@ -447,18 +483,16 @@ static inline void mi_lock_done(mi_lock_t* lock) {
pthread_mutex_destroy(lock);
}
-/*
#elif defined(__cplusplus)
#include
#define mi_lock_t std::mutex
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
- return lock->lock_try_acquire();
+ return lock->try_lock();
}
-static inline bool mi_lock_acquire(mi_lock_t* lock) {
+static inline void mi_lock_acquire(mi_lock_t* lock) {
lock->lock();
- return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
lock->unlock();
@@ -469,7 +503,6 @@ static inline void mi_lock_init(mi_lock_t* lock) {
static inline void mi_lock_done(mi_lock_t* lock) {
(void)(lock);
}
-*/
#else
@@ -482,12 +515,11 @@ static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
}
-static inline bool mi_lock_acquire(mi_lock_t* lock) {
+static inline void mi_lock_acquire(mi_lock_t* lock) {
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
- if (mi_lock_try_acquire(lock)) return true;
+ if (mi_lock_try_acquire(lock)) return;
mi_atomic_yield();
}
- return true;
}
static inline void mi_lock_release(mi_lock_t* lock) {
mi_atomic_store_release(lock, (uintptr_t)0);
@@ -502,6 +534,4 @@ static inline void mi_lock_done(mi_lock_t* lock) {
#endif
-
-
#endif // __MIMALLOC_ATOMIC_H
diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h
index 94e394c2..df358e39 100644
--- a/include/mimalloc/internal.h
+++ b/include/mimalloc/internal.h
@@ -31,16 +31,19 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#define mi_decl_weak
+#define mi_decl_hidden
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
#define mi_decl_weak __attribute__((weak))
+#define mi_decl_hidden __attribute__((visibility("hidden")))
#else
#define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align
#define mi_decl_weak
+#define mi_decl_hidden
#endif
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
@@ -53,82 +56,100 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_externc
#endif
+// "libc.c"
+#include
+void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
+void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
+char _mi_toupper(char c);
+int _mi_strnicmp(const char* s, const char* t, size_t n);
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
+void _mi_strlcat(char* dest, const char* src, size_t dest_size);
+size_t _mi_strlen(const char* s);
+size_t _mi_strnlen(const char* s, size_t max_len);
+bool _mi_getenv(const char* name, char* result, size_t result_size);
// "options.c"
-void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
-void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
-void _mi_warning_message(const char* fmt, ...);
-void _mi_verbose_message(const char* fmt, ...);
-void _mi_trace_message(const char* fmt, ...);
-void _mi_options_init(void);
-void _mi_error_message(int err, const char* fmt, ...);
+void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
+void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
+void _mi_warning_message(const char* fmt, ...);
+void _mi_verbose_message(const char* fmt, ...);
+void _mi_trace_message(const char* fmt, ...);
+void _mi_options_init(void);
+long _mi_option_get_fast(mi_option_t option);
+void _mi_error_message(int err, const char* fmt, ...);
// random.c
-void _mi_random_init(mi_random_ctx_t* ctx);
-void _mi_random_init_weak(mi_random_ctx_t* ctx);
-void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
-void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
-uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
-uintptr_t _mi_heap_random_next(mi_heap_t* heap);
-uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
+void _mi_random_init(mi_random_ctx_t* ctx);
+void _mi_random_init_weak(mi_random_ctx_t* ctx);
+void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
+void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
+uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
+uintptr_t _mi_heap_random_next(mi_heap_t* heap);
+uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
-extern mi_decl_cache_align const mi_page_t _mi_page_empty;
-bool _mi_is_main_thread(void);
-size_t _mi_current_thread_count(void);
-bool _mi_preloading(void); // true while the C runtime is not initialized yet
-void _mi_thread_done(mi_heap_t* heap);
-void _mi_thread_data_collect(void);
-void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
+extern mi_decl_hidden mi_decl_cache_align const mi_page_t _mi_page_empty;
+void _mi_process_load(void);
+void mi_cdecl _mi_process_done(void);
+bool _mi_is_redirected(void);
+bool _mi_allocator_init(const char** message);
+void _mi_allocator_done(void);
+bool _mi_is_main_thread(void);
+size_t _mi_current_thread_count(void);
+bool _mi_preloading(void); // true while the C runtime is not initialized yet
+void _mi_thread_done(mi_heap_t* heap);
+void _mi_thread_data_collect(void);
+void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
+void _mi_heap_guarded_init(mi_heap_t* heap);
// os.c
-void _mi_os_init(void); // called from process init
-void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
-void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
-void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
+void _mi_os_init(void); // called from process init
+void* _mi_os_alloc(size_t size, mi_memid_t* memid);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid);
+void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
-size_t _mi_os_page_size(void);
-size_t _mi_os_good_alloc_size(size_t size);
-bool _mi_os_has_overcommit(void);
-bool _mi_os_has_virtual_reserve(void);
+size_t _mi_os_page_size(void);
+size_t _mi_os_good_alloc_size(size_t size);
+bool _mi_os_has_overcommit(void);
+bool _mi_os_has_virtual_reserve(void);
-bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
-bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-bool _mi_os_protect(void* addr, size_t size);
-bool _mi_os_unprotect(void* addr, size_t size);
-bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
+bool _mi_os_reset(void* addr, size_t size);
+bool _mi_os_commit(void* p, size_t size, bool* is_zero);
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_protect(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_purge(void* p, size_t size);
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size);
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
-void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);
-void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
-bool _mi_os_use_large_page(size_t size, size_t alignment);
-size_t _mi_os_large_page_size(void);
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
+bool _mi_os_use_large_page(size_t size, size_t alignment);
+size_t _mi_os_large_page_size(void);
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
-void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
-void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
-bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
-bool _mi_arena_contains(const void* p);
-void _mi_arenas_collect(bool force_purge, mi_stats_t* stats);
-void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
+void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid);
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_contains(const void* p);
+void _mi_arenas_collect(bool force_purge);
+void _mi_arena_unsafe_destroy_all(void);
-bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
-void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
+bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
+void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
-void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
-void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
+void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
+void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
typedef struct mi_arena_field_cursor_s { // abstract struct
size_t os_list_count; // max entries to visit in the OS abandoned list
@@ -144,61 +165,64 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr
void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current);
// "segment-map.c"
-void _mi_segment_map_allocated_at(const mi_segment_t* segment);
-void _mi_segment_map_freed_at(const mi_segment_t* segment);
+void _mi_segment_map_allocated_at(const mi_segment_t* segment);
+void _mi_segment_map_freed_at(const mi_segment_t* segment);
+void _mi_segment_map_unsafe_destroy(void);
// "segment.c"
-mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
-void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
-void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
-uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
+void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
+void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
#if MI_HUGE_PAGE_ABANDON
-void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#else
-void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif
-void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
-void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
-bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
-bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
+void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
+void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
+bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
+bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// "page.c"
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
-void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
-void _mi_page_unfull(mi_page_t* page);
-void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
-void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
-void _mi_heap_delayed_free_all(mi_heap_t* heap);
-bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
-void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
+void _mi_page_unfull(mi_page_t* page);
+void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
+void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
+void _mi_page_force_abandon(mi_page_t* page);
-void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
-bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
-size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
-void _mi_deferred_free(mi_heap_t* heap, bool force);
+void _mi_heap_delayed_free_all(mi_heap_t* heap);
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
+void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
-void _mi_page_free_collect(mi_page_t* page,bool force);
-void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
+void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
+void _mi_deferred_free(mi_heap_t* heap, bool force);
-size_t _mi_bin_size(uint8_t bin); // for stats
-uint8_t _mi_bin(size_t size); // for stats
+void _mi_page_free_collect(mi_page_t* page,bool force);
+void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
+
+size_t _mi_bin_size(uint8_t bin); // for stats
+uint8_t _mi_bin(size_t size); // for stats
// "heap.c"
-void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
-void _mi_heap_destroy_pages(mi_heap_t* heap);
-void _mi_heap_collect_abandon(mi_heap_t* heap);
-void _mi_heap_set_default_direct(mi_heap_t* heap);
-bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
-void _mi_heap_unsafe_destroy_all(void);
-mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
-void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
-bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
+void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
+void _mi_heap_destroy_pages(mi_heap_t* heap);
+void _mi_heap_collect_abandon(mi_heap_t* heap);
+void _mi_heap_set_default_direct(mi_heap_t* heap);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
+void _mi_heap_unsafe_destroy_all(mi_heap_t* heap);
+mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
+void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
+bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
// "stats.c"
-void _mi_stats_done(mi_stats_t* stats);
+void _mi_stats_done(mi_stats_t* stats);
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
@@ -215,18 +239,6 @@ bool _mi_free_delayed_block(mi_block_t* block);
void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
-// "libc.c"
-#include
-void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
-void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
-char _mi_toupper(char c);
-int _mi_strnicmp(const char* s, const char* t, size_t n);
-void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
-void _mi_strlcat(char* dest, const char* src, size_t dest_size);
-size_t _mi_strlen(const char* s);
-size_t _mi_strnlen(const char* s, size_t max_len);
-bool _mi_getenv(const char* name, char* result, size_t result_size);
-
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
#endif
@@ -322,6 +334,7 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
}
}
+
// Align a pointer upwards
static inline void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
@@ -402,7 +415,7 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
Heap functions
------------------------------------------------------------------------------------------- */
-extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
+extern mi_decl_hidden const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
return (heap->tld->heap_backing == heap);
@@ -410,11 +423,11 @@ static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
- return (heap != &_mi_heap_empty);
+ return (heap != NULL && heap != &_mi_heap_empty);
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
- extern mi_heap_t _mi_heap_main;
+ extern mi_decl_hidden mi_heap_t _mi_heap_main;
mi_assert_internal(_mi_heap_main.cookie != 0);
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
@@ -562,7 +575,7 @@ static inline bool mi_page_immediate_available(const mi_page_t* page) {
}
// is more than 7/8th of a page in use?
-static inline bool mi_page_mostly_used(const mi_page_t* page) {
+static inline bool mi_page_is_mostly_used(const mi_page_t* page) {
if (page==NULL) return true;
uint16_t frac = page->reserved / 8U;
return (page->reserved - page->used <= frac);
@@ -593,6 +606,39 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
page->flags.x.has_aligned = has_aligned;
}
+/* -------------------------------------------------------------------
+ Guarded objects
+------------------------------------------------------------------- */
+#if MI_GUARDED
+static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) {
+ const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block;
+ return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED);
+}
+
+static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) {
+ // this code is written to result in fast assembly as it is on the hot path for allocation
+ const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time..
+ if mi_likely(count != 0) {
+ // no sample
+ heap->guarded_sample_count = count;
+ return false;
+ }
+ else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) {
+ // use guarded allocation
+ heap->guarded_sample_count = heap->guarded_sample_rate; // reset
+ return (heap->guarded_sample_rate != 0);
+ }
+ else {
+ // failed size criteria, rewind count (but don't write to an empty heap)
+ if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; }
+ return false;
+ }
+}
+
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+
+#endif
+
/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
@@ -651,6 +697,16 @@ static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
+static inline uint32_t mi_ptr_encode_canary(const void* null, const void* p, const uintptr_t* keys) {
+ const uint32_t x = (uint32_t)(mi_ptr_encode(null,p,keys));
+ // make the lowest byte 0 to prevent spurious read overflows which could be a security issue (issue #951)
+ #ifdef MI_BIG_ENDIAN
+ return (x & 0x00FFFFFF);
+ #else
+ return (x & 0xFFFFFF00);
+ #endif
+}
+
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
mi_track_mem_defined(block,sizeof(mi_block_t));
mi_block_t* next;
@@ -731,7 +787,7 @@ static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
-#if (MI_INTPTR_SIZE==8)
+#if (MI_INTPTR_SIZE>=8)
// by Sebastiano Vigna, see:
x ^= x >> 30;
x *= 0xbf58476d1ce4e5b9UL;
@@ -753,13 +809,13 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
-int _mi_os_numa_node_get(mi_os_tld_t* tld);
+int _mi_os_numa_node_get(void);
size_t _mi_os_numa_node_count_get(void);
-extern _Atomic(size_t) _mi_numa_node_count;
-static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
+extern mi_decl_hidden _Atomic(size_t) _mi_numa_node_count;
+static inline int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
- else return _mi_os_numa_node_get(tld);
+ else return _mi_os_numa_node_get();
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
@@ -821,16 +877,18 @@ static inline size_t mi_ctz(uintptr_t x) {
}
#else
-static inline size_t mi_ctz32(uint32_t x) {
+
+static inline size_t mi_ctz_generic32(uint32_t x) {
// de Bruijn multiplication, see
- static const unsigned char debruijn[32] = {
+ static const uint8_t debruijn[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
if (x==0) return 32;
- return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
+ return debruijn[(uint32_t)((x & -(int32_t)x) * (uint32_t)(0x077CB531U)) >> 27];
}
-static inline size_t mi_clz32(uint32_t x) {
+
+static inline size_t mi_clz_generic32(uint32_t x) {
// de Bruijn multiplication, see
static const uint8_t debruijn[32] = {
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
@@ -842,28 +900,37 @@ static inline size_t mi_clz32(uint32_t x) {
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
- return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
+ return debruijn[(uint32_t)(x * (uint32_t)(0x07C4ACDDU)) >> 27];
}
-static inline size_t mi_clz(uintptr_t x) {
- if (x==0) return MI_INTPTR_BITS;
-#if (MI_INTPTR_BITS <= 32)
- return mi_clz32((uint32_t)x);
-#else
- size_t count = mi_clz32((uint32_t)(x >> 32));
- if (count < 32) return count;
- return (32 + mi_clz32((uint32_t)x));
-#endif
+static inline size_t mi_ctz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_ctz_generic32((uint32_t)x);
+ #else
+ const uint32_t lo = (uint32_t)x;
+ if (lo != 0) {
+ return mi_ctz_generic32(lo);
+ }
+ else {
+ return (32 + mi_ctz_generic32((uint32_t)(x>>32)));
+ }
+ #endif
}
-static inline size_t mi_ctz(uintptr_t x) {
- if (x==0) return MI_INTPTR_BITS;
-#if (MI_INTPTR_BITS <= 32)
- return mi_ctz32((uint32_t)x);
-#else
- size_t count = mi_ctz32((uint32_t)x);
- if (count < 32) return count;
- return (32 + mi_ctz32((uint32_t)(x>>32)));
-#endif
+
+static inline size_t mi_clz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_clz_generic32((uint32_t)x);
+ #else
+ const uint32_t hi = (uint32_t)(x>>32);
+ if (hi != 0) {
+ return mi_clz_generic32(hi);
+ }
+ else {
+ return 32 + mi_clz_generic32((uint32_t)x);
+ }
+ #endif
}
#endif
@@ -885,8 +952,9 @@ static inline size_t mi_bsr(uintptr_t x) {
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include
extern bool _mi_cpu_has_fsrm;
+extern bool _mi_cpu_has_erms;
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
- if (_mi_cpu_has_fsrm) {
+ if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) {
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
}
else {
@@ -894,7 +962,7 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
}
}
static inline void _mi_memzero(void* dst, size_t n) {
- if (_mi_cpu_has_fsrm) {
+ if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) {
__stosb((unsigned char*)dst, 0, n);
}
else {
diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h
index 640c966f..9a1ab4f9 100644
--- a/include/mimalloc/prim.h
+++ b/include/mimalloc/prim.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -25,6 +25,8 @@ typedef struct mi_os_mem_config_s {
size_t page_size; // default to 4KiB
size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows)
size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB)
+ size_t physical_memory; // physical memory size
+ size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization)
bool has_overcommit; // can we reserve more memory than can be actually committed?
bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc)
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
@@ -41,9 +43,10 @@ int _mi_prim_free(void* addr, size_t size );
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// which will later be committed explicitly using `_mi_prim_commit`.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
+// The `hint_addr` address is either `NULL` or a preferred allocation address but can be ignored.
// pre: !commit => !allow_large
// try_alignment >= _mi_os_page_size() and a power of 2
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
// Commit memory. Returns error code or 0 on success.
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
@@ -116,14 +119,13 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
+
+
//-------------------------------------------------------------------
-// Thread id: `_mi_prim_thread_id()`
-//
-// Getting the thread id should be performant as it is called in the
-// fast path of `_mi_free` and we specialize for various platforms as
-// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
-// We only require _mi_prim_thread_id() to return a unique id
-// for each thread (unequal to zero).
+// Access to TLS (thread local storage) slots.
+// We need fast access to both a unique thread id (in `free.c:mi_free`) and
+// to a thread-local heap pointer (in `alloc.c:mi_malloc`).
+// To achieve this we use specialized code for various platforms.
//-------------------------------------------------------------------
// On some libc + platform combinations we can directly access a thread-local storage (TLS) slot.
@@ -135,14 +137,14 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
// but unfortunately we can not detect support reliably (see issue #883)
// We also use it on Apple OS as we use a TLS slot for the default heap there.
#if defined(__GNUC__) && ( \
- (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \
- || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
)
-#define MI_HAS_TLS_SLOT
+#define MI_HAS_TLS_SLOT 1
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
void* res;
@@ -203,8 +205,52 @@ static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexce
#endif
}
+#elif _WIN32 && MI_WIN_USE_FIXED_TLS && !defined(MI_WIN_USE_FLS)
+
+// On windows we can store the thread-local heap at a fixed TLS slot to avoid
+// thread-local initialization checks in the fast path. This uses a fixed location
+// in the TCB though (last user-reserved slot by default) which may clash with other applications.
+
+#define MI_HAS_TLS_SLOT 2 // 2 = we can reliable initialize the slot (saving a test on each malloc)
+
+#if MI_WIN_USE_FIXED_TLS > 1
+#define MI_TLS_SLOT (MI_WIN_USE_FIXED_TLS)
+#elif MI_SIZE_SIZE == 4
+#define MI_TLS_SLOT (0x710) // Last user-reserved slot
+// #define MI_TLS_SLOT (0xF0C) // Last TlsSlot (might clash with other app reserved slot)
+#else
+#define MI_TLS_SLOT (0x888) // Last user-reserved slot
+// #define MI_TLS_SLOT (0x1678) // Last TlsSlot (might clash with other app reserved slot)
#endif
+static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
+ #if (_M_X64 || _M_AMD64) && !defined(_M_ARM64EC)
+ return (void*)__readgsqword((unsigned long)slot); // direct load at offset from gs
+ #elif _M_IX86 && !defined(_M_ARM64EC)
+ return (void*)__readfsdword((unsigned long)slot); // direct load at offset from fs
+ #else
+ return ((void**)NtCurrentTeb())[slot / sizeof(void*)];
+ #endif
+}
+static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
+ ((void**)NtCurrentTeb())[slot / sizeof(void*)] = value;
+}
+
+#endif
+
+
+
+//-------------------------------------------------------------------
+// Get a fast unique thread id.
+//
+// Getting the thread id should be performant as it is called in the
+// fast path of `_mi_free` and we specialize for various platforms as
+// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
+// We only require _mi_prim_thread_id() to return a unique id
+// for each thread (unequal to zero).
+//-------------------------------------------------------------------
+
+
// Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id
// but unfortunately, it seems we cannot test for this reliably at this time (see issue #883)
// Nevertheless, it seems needed on older graviton platforms (see issue #851).
@@ -248,7 +294,7 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
return (uintptr_t)__builtin_thread_pointer();
}
-#elif defined(MI_HAS_TLS_SLOT)
+#elif MI_HAS_TLS_SLOT
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
#if defined(__BIONIC__)
@@ -275,7 +321,8 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
/* ----------------------------------------------------------------------------------------
-The thread local default heap: `_mi_prim_get_default_heap()`
+Get the thread local default heap: `_mi_prim_get_default_heap()`
+
This is inlined here as it is on the fast path for allocation functions.
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
@@ -312,19 +359,21 @@ static inline mi_heap_t* mi_prim_get_default_heap(void);
#endif
-#if defined(MI_TLS_SLOT)
+#if MI_TLS_SLOT
# if !defined(MI_HAS_TLS_SLOT)
# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined"
# endif
static inline mi_heap_t* mi_prim_get_default_heap(void) {
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
+ #if MI_HAS_TLS_SLOT == 1 // check if the TLS slot is initialized
if mi_unlikely(heap == NULL) {
#ifdef __GNUC__
__asm(""); // prevent conditional load of the address of _mi_heap_empty
#endif
heap = (mi_heap_t*)&_mi_heap_empty;
}
+ #endif
return heap;
}
@@ -366,7 +415,4 @@ static inline mi_heap_t* mi_prim_get_default_heap(void) {
#endif // mi_prim_get_default_heap()
-
-
-
#endif // MIMALLOC_PRIM_H
diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h
index 2545c6d2..788d1731 100644
--- a/include/mimalloc/types.h
+++ b/include/mimalloc/types.h
@@ -72,6 +72,13 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
#endif
+// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
+// Padding should be disabled when using guard pages
+// #define MI_GUARDED 1
+#if defined(MI_GUARDED)
+#define MI_PADDING 0
+#endif
+
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// The padding can detect buffer overflow on free.
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
@@ -225,6 +232,13 @@ typedef struct mi_block_s {
mi_encoded_t next;
} mi_block_t;
+#if MI_GUARDED
+// we always align guarded pointers in a block at an offset
+// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones
+#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0))
+#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED)
+#endif
+
// The delayed flags are used for efficient multi-threaded free-ing
typedef enum mi_delayed_e {
@@ -248,7 +262,7 @@ typedef union mi_page_flags_s {
#else
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
typedef union mi_page_flags_s {
- uint16_t full_aligned;
+ uint32_t full_aligned;
struct {
uint8_t in_full;
uint8_t has_aligned;
@@ -363,7 +377,7 @@ static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
typedef struct mi_memid_os_info {
void* base; // actual base address of the block (used for offset aligned allocations)
- size_t alignment; // alignment at allocation
+ size_t size; // full allocation size
} mi_memid_os_info_t;
typedef struct mi_memid_arena_info {
@@ -402,7 +416,8 @@ typedef struct mi_segment_s {
// segment fields
struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init`
struct mi_segment_s* prev;
- bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
+ bool was_reclaimed; // true if it was reclaimed (used to limit reclaim-on-free reclamation)
+ bool dont_free; // can be temporarily true to ensure the segment is not freed
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
@@ -488,6 +503,13 @@ struct mi_heap_s {
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
+ #if MI_GUARDED
+ size_t guarded_size_min; // minimal size for guarded objects
+ size_t guarded_size_max; // maximal size for guarded objects
+ size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages)
+ size_t guarded_sample_seed; // starting sample count
+ size_t guarded_sample_count; // current sample count (counting down to 0)
+ #endif
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
};
@@ -580,24 +602,34 @@ typedef struct mi_stats_s {
mi_stat_counter_t arena_count;
mi_stat_counter_t arena_crossover_count;
mi_stat_counter_t arena_rollback_count;
+ mi_stat_counter_t guarded_alloc_count;
#if MI_STAT>1
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
#endif
} mi_stats_t;
+// add to stat keeping track of the peak
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
+// adjust stat in special cases to compensate for double counting
+void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount);
+void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
+// counters can just be increased
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT)
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
+#define mi_stat_adjust_increase(stat,amount) _mi_stat_adjust_increase( &(stat), amount)
+#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount)
#else
-#define mi_stat_increase(stat,amount) (void)0
-#define mi_stat_decrease(stat,amount) (void)0
-#define mi_stat_counter_increase(stat,amount) (void)0
+#define mi_stat_increase(stat,amount) ((void)0)
+#define mi_stat_decrease(stat,amount) ((void)0)
+#define mi_stat_counter_increase(stat,amount) ((void)0)
+#define mi_stat_adjuct_increase(stat,amount) ((void)0)
+#define mi_stat_adjust_decrease(stat,amount) ((void)0)
#endif
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
@@ -633,12 +665,6 @@ typedef struct mi_segment_queue_s {
mi_segment_t* last;
} mi_segment_queue_t;
-// OS thread local data
-typedef struct mi_os_tld_s {
- size_t region_idx; // start point for next allocation
- mi_stats_t* stats; // points to tld stats
-} mi_os_tld_t;
-
// Segments thread local data
typedef struct mi_segments_tld_s {
mi_segment_queue_t small_free; // queue of segments with free small pages
@@ -651,7 +677,6 @@ typedef struct mi_segments_tld_s {
size_t reclaim_count;// number of reclaimed (abandoned) segments
mi_subproc_t* subproc; // sub-process this thread belongs to.
mi_stats_t* stats; // points to tld stats
- mi_os_tld_t* os; // points to os tld
} mi_segments_tld_t;
// Thread local data
@@ -661,7 +686,6 @@ struct mi_tld_s {
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_segments_tld_t segments; // segment tld
- mi_os_tld_t os; // os tld
mi_stats_t stats; // statistics
};
diff --git a/readme.md b/readme.md
index 44e4c261..9f5178d0 100644
--- a/readme.md
+++ b/readme.md
@@ -164,7 +164,7 @@ The `mimalloc` project builds a static library (in `out/msvc-x64`), while the
`mimalloc-override` project builds a DLL for overriding malloc
in the entire program.
-## macOS, Linux, BSD, etc.
+## Linux, macOS, BSD, etc.
We use [`cmake`](https://cmake.org)1 as the build system:
@@ -200,13 +200,26 @@ free lists, etc., as:
> make
```
This will name the shared library as `libmimalloc-secure.so`.
-Use `ccmake`2 instead of `cmake`
-to see and customize all the available build options.
+Use `cmake ../.. -LH` to see all the available build options.
-Notes:
-1. Install CMake: `sudo apt-get install cmake`
-2. Install CCMake: `sudo apt-get install cmake-curses-gui`
+The examples use the default compiler. If you like to use another, use:
+```
+> CC=clang CXX=clang++ cmake ../..
+```
+## Cmake with Visual Studio
+
+You can also use cmake on Windows. Open a Visual Studio development prompt
+and invoke `cmake` with the right [generator](https://cmake.org/cmake/help/latest/generator/Visual%20Studio%2017%202022.html)
+and architecture, like:
+```
+> cmake ..\.. -G "Visual Studio 17 2022" -A x64 -DMI_OVERRIDE=ON
+```
+
+The cmake build type is specified when actually building, for example:
+```
+> cmake --build . --config=Release
+```
## Single source
@@ -240,7 +253,7 @@ to link with the static library. See `test\CMakeLists.txt` for an example.
For best performance in C++ programs, it is also recommended to override the
global `new` and `delete` operators. For convenience, mimalloc provides
-[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project.
+[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project.
In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator`
interface.
@@ -415,27 +428,33 @@ Note that certain security restrictions may apply when doing this from
the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash).
-### Dynamic Override on Windows
+# Windows Override
-Dynamically overriding on mimalloc on Windows
-is robust and has the particular advantage to be able to redirect all malloc/free calls that go through
-the (dynamic) C runtime allocator, including those from other DLL's or libraries.
-As it intercepts all allocation calls on a low level, it can be used reliably
+Dynamically overriding on mimalloc on Windows
+is robust and has the particular advantage to be able to redirect all malloc/free calls
+that go through the (dynamic) C runtime allocator, including those from other DLL's or
+libraries. As it intercepts all allocation calls on a low level, it can be used reliably
on large programs that include other 3rd party components.
-There are four requirements to make the overriding work robustly:
+There are four requirements to make the overriding work well:
1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch).
-2. Link your program explicitly with `mimalloc-override.dll` library.
- To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some
- call to the mimalloc API in the `main` function, like `mi_version()`
- (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project
- for an example on how to use this.
-3. The [`mimalloc-redirect.dll`](bin) (or `mimalloc-redirect32.dll`) must be put
- in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL).
- The redirection DLL ensures that all calls to the C runtime malloc API get redirected to
- mimalloc functions (which reside in `mimalloc-override.dll`).
-4. Ensure the `mimalloc-override.dll` comes as early as possible in the import
+
+2. Link your program explicitly with the `mimalloc.lib` export library for the `mimalloc.dll`.
+ (which must be compiled with `-DMI_OVERRIDE=ON`, which is the default though).
+ To ensure the `mimalloc.dll` is actually loaded at run-time it is easiest
+ to insert some call to the mimalloc API in the `main` function, like `mi_version()`
+ (or use the `/include:mi_version` switch on the linker command, or
+ similarly, `#pragma comment(linker, "/include:mi_version")` in some source file).
+ See the `mimalloc-test-override` project for an example on how to use this.
+
+3. The `mimalloc-redirect.dll` must be put in the same folder as the main
+ `mimalloc.dll` at runtime (as it is a dependency of that DLL).
+ The redirection DLL ensures that all calls to the C runtime malloc API get
+ redirected to mimalloc functions (which reside in `mimalloc.dll`).
+
+4. Ensure the `mimalloc.dll` comes as early as possible in the import
list of the final executable (so it can intercept all potential allocations).
+ You can use `minject -l ` to check this if needed.
For best performance on Windows with C++, it
is also recommended to also override the `new`/`delete` operations (by including
@@ -443,15 +462,14 @@ is also recommended to also override the `new`/`delete` operations (by including
a single(!) source file in your project).
The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic
-overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected.
+overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully
+redirected.
+
+For different platforms than x64, you may need a specific [redirection dll](bin).
+Furthermore, we cannot always re-link an executable or ensure `mimalloc.dll` comes
+first in the import table. In such cases the [`minject`](bin) tool can be used
+to patch the executable's import tables.
-We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always
-ensure the the DLL comes first in the import table of the final executable.
-In many cases though we can patch existing executables without any recompilation
-if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll`
-into the import table (and put `mimalloc-redirect.dll` in the same folder)
-Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388) or
-the [`minject`](bin) program.
## Static override
@@ -469,7 +487,7 @@ object file. For example:
Another way to override statically that works on all platforms, is to
link statically to mimalloc (as shown in the introduction) and include a
header file in each source file that re-defines `malloc` etc. to `mi_malloc`.
-This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-override.h). This only works reliably though if all sources are
+This is provided by [`mimalloc-override.h`](include/mimalloc-override.h). This only works reliably though if all sources are
under your control or otherwise mixing of pointers from different heaps may occur!
@@ -505,9 +523,13 @@ you also need to tell `valgrind` to not intercept those calls itself, and use:
By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed
used and not the standard allocator. Even though the [Valgrind option][valgrind-soname]
-is called `--soname-synonyms`, this also
-works when overriding with a static library or object file. Unfortunately, it is not possible to
-dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`.
+is called `--soname-synonyms`, this also works when overriding with a static library or object file.
+To dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`, use:
+
+```
+> valgrind --trace-children=yes --soname-synonyms=somalloc=*mimalloc* /usr/bin/env LD_PRELOAD=/usr/lib/libmimalloc.so --
+```
+
See also the `test/test-wrong.c` file to test with `valgrind`.
Valgrind support is in its initial development -- please report any issues.
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index 20c36044..6b0a33c1 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -24,6 +24,33 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
}
+#if MI_GUARDED
+static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept {
+ // use over allocation for guarded blocksl
+ mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX);
+ const size_t oversize = size + alignment - 1;
+ void* base = _mi_heap_malloc_guarded(heap, oversize, zero);
+ void* p = mi_align_up_ptr(base, alignment);
+ mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size);
+ mi_assert_internal(mi_usable_size(p) >= size);
+ mi_assert_internal(_mi_is_aligned(p, alignment));
+ return p;
+}
+
+static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
+ const size_t rate = heap->guarded_sample_rate;
+ // only write if `rate!=0` so we don't write to the constant `_mi_heap_empty`
+ if (rate != 0) { heap->guarded_sample_rate = 0; }
+ void* p = _mi_heap_malloc_zero(heap, size, zero);
+ if (rate != 0) { heap->guarded_sample_rate = rate; }
+ return p;
+}
+#else
+static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
+ return _mi_heap_malloc_zero(heap, size, zero);
+}
+#endif
+
// Fallback aligned allocation that over-allocates -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
@@ -38,12 +65,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
if mi_unlikely(offset != 0) {
// todo: cannot support offset alignment for very large alignments yet
- #if MI_DEBUG > 0
+#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
- #endif
+#endif
return NULL;
}
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
+ // note: no guarded as alignment > 0
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
// zero afterwards as only the area from the aligned_p may be committed!
if (p == NULL) return NULL;
@@ -51,9 +79,10 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
else {
// otherwise over-allocate
oversize = size + alignment - 1;
- p = _mi_heap_malloc_zero(heap, oversize, zero);
+ p = mi_heap_malloc_zero_no_guarded(heap, oversize, zero);
if (p == NULL) return NULL;
}
+ mi_page_t* page = _mi_ptr_page(p);
// .. and align within the allocation
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
@@ -62,17 +91,27 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
mi_assert_internal(adjust < alignment);
void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) {
- mi_page_t* page = _mi_ptr_page(p);
mi_page_set_has_aligned(page, true);
+ #if MI_GUARDED
+ // set tag to aligned so mi_usable_size works with guard pages
+ if (adjust >= sizeof(mi_block_t)) {
+ mi_block_t* const block = (mi_block_t*)p;
+ block->next = MI_BLOCK_TAG_ALIGNED;
+ }
+ #endif
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
}
// todo: expand padding if overallocated ?
- mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
- mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
+ mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size);
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
+ #if MI_DEBUG > 1
+ mi_page_t* const apage = _mi_ptr_page(aligned_p);
+ void* unalign_p = _mi_page_ptr_unalign(apage, aligned_p);
+ mi_assert_internal(p == unalign_p);
+ #endif
// now zero the block if needed
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
@@ -85,6 +124,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
if (p != aligned_p) {
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
+ #if MI_GUARDED
+ mi_track_mem_defined(p, sizeof(mi_block_t));
+ #endif
}
return aligned_p;
}
@@ -94,27 +136,27 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t*
{
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
// we don't allocate more than MI_MAX_ALLOC_SIZE (see )
- if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
+ if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
-
+
// use regular allocation if it is guaranteed to fit the alignment constraints.
// this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist
// a page with the right block size, and if we always use the over-alloc fallback that would never happen.
if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) {
- void* p = _mi_heap_malloc_zero(heap, size, zero);
+ void* p = mi_heap_malloc_zero_no_guarded(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
- const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
+ const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
if mi_likely(is_aligned_or_null) {
return p;
}
else {
// this should never happen if the `mi_malloc_is_naturally_aligned` check is correct..
mi_assert(false);
- mi_free(p);
+ mi_free(p);
}
}
@@ -122,6 +164,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t*
return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero);
}
+
// Primitive aligned allocation
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
@@ -132,11 +175,17 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
#endif
return NULL;
}
-
+
+ #if MI_GUARDED
+ if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) {
+ return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero);
+ }
+ #endif
+
// try first if there happens to be a small block available with just the right alignment
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
- const size_t padsize = size + MI_PADDING_SIZE;
+ const size_t padsize = size + MI_PADDING_SIZE;
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
if mi_likely(page->free != NULL) {
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
@@ -305,3 +354,5 @@ mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
}
+
+
diff --git a/src/alloc-override.c b/src/alloc-override.c
index 12837cdd..b5109ded 100644
--- a/src/alloc-override.c
+++ b/src/alloc-override.c
@@ -248,7 +248,7 @@ extern "C" {
// Forward Posix/Unix calls as well
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize)
size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p)
- #if !defined(__ANDROID__) && !defined(__FreeBSD__)
+ #if !defined(__ANDROID__) && !defined(__FreeBSD__) && !defined(__DragonFly__)
size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p)
#else
size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p)
@@ -289,8 +289,8 @@ mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_r
void __libc_free(void* p) MI_FORWARD0(mi_free, p)
void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
-#elif defined(__GLIBC__) && defined(__linux__)
- // forward __libc interface (needed for glibc-based Linux distributions)
+#elif defined(__linux__)
+ // forward __libc interface (needed for glibc-based and musl-based Linux distributions)
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size)
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size)
void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size)
diff --git a/src/alloc.c b/src/alloc.c
index 1eee1f2f..a093f108 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -31,17 +31,22 @@ terms of the MIT license. A copy of the license can be found in the file
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
+
+ // check the free list
mi_block_t* const block = page->free;
if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero, 0);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
+
// pop from the free list
page->free = mi_block_next(page, block);
page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
+ mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
+
#if MI_DEBUG>3
- if (page->free_is_zero) {
+ if (page->free_is_zero && size > sizeof(*block)) {
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
}
#endif
@@ -54,7 +59,10 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
+ mi_assert_internal(!mi_page_is_huge(page));
+ #if MI_PADDING
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
+ #endif
if (page->free_is_zero) {
block->next = 0;
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
@@ -91,7 +99,7 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
#endif
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
- padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
+ padding->canary = mi_ptr_encode_canary(page,block,page->keys);
padding->delta = (uint32_t)(delta);
#if MI_PADDING_CHECK
if (!mi_page_is_huge(page)) {
@@ -113,17 +121,27 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
return _mi_page_malloc_zero(heap,page,size,true);
}
+#if MI_GUARDED
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+#endif
+
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL);
+ mi_assert(size <= MI_SMALL_SIZE_MAX);
#if MI_DEBUG
const uintptr_t tid = _mi_thread_id();
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif
- mi_assert(size <= MI_SMALL_SIZE_MAX);
- #if (MI_PADDING)
+ #if (MI_PADDING || MI_GUARDED)
if (size == 0) { size = sizeof(void*); }
#endif
+ #if MI_GUARDED
+ if (mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
+ }
+ #endif
+ // get page in constant time, and allocate from it
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero);
@@ -153,15 +171,23 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
// The main allocation function
extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
+ // fast path for small objects
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
mi_assert_internal(huge_alignment == 0);
return mi_heap_malloc_small_zero(heap, size, zero);
}
+ #if MI_GUARDED
+ else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) {
+ return _mi_heap_malloc_guarded(heap, size, zero);
+ }
+ #endif
else {
+ // regular allocation
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
mi_track_malloc(p,size,zero);
+
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
@@ -577,6 +603,82 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
}
}
+#if MI_GUARDED
+// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true).
+// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`)
+// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size`
+
+static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
+ // TODO: we can still make padding work by moving it out of the guard page area
+ mi_page_t* const page = _mi_ptr_page(block);
+ mi_page_set_has_aligned(page, true);
+ block->next = MI_BLOCK_TAG_GUARDED;
+
+ // set guard page at the end of the block
+ mi_segment_t* const segment = _mi_page_segment(page);
+ const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
+ const size_t os_page_size = _mi_os_page_size();
+ mi_assert_internal(block_size >= obj_size + os_page_size + sizeof(mi_block_t));
+ if (block_size < obj_size + os_page_size + sizeof(mi_block_t)) {
+ // should never happen
+ mi_free(block);
+ return NULL;
+ }
+ uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size;
+ mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
+ if (segment->allow_decommit && _mi_is_aligned(guard_page, os_page_size)) {
+ _mi_os_protect(guard_page, os_page_size);
+ }
+ else {
+ _mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", block, block_size);
+ }
+
+ // align pointer just in front of the guard page
+ size_t offset = block_size - os_page_size - obj_size;
+ mi_assert_internal(offset > sizeof(mi_block_t));
+ if (offset > MI_BLOCK_ALIGNMENT_MAX) {
+ // give up to place it right in front of the guard page if the offset is too large for unalignment
+ offset = MI_BLOCK_ALIGNMENT_MAX;
+ }
+ void* p = (uint8_t*)block + offset;
+ mi_track_align(block, p, offset, obj_size);
+ mi_track_mem_defined(block, sizeof(mi_block_t));
+ return p;
+}
+
+mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
+{
+ #if defined(MI_PADDING_SIZE)
+ mi_assert(MI_PADDING_SIZE==0);
+ #endif
+ // allocate multiple of page size ending in a guard page
+ // ensure minimal alignment requirement?
+ const size_t os_page_size = _mi_os_page_size();
+ const size_t obj_size = (mi_option_is_enabled(mi_option_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE));
+ const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE);
+ const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size);
+ mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
+ if (block==NULL) return NULL;
+ void* const p = mi_block_ptr_set_guarded(block, obj_size);
+
+ // stats
+ mi_track_malloc(p, size, zero);
+ if (p != NULL) {
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
+ #if MI_STAT>1
+ mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
+ #endif
+ _mi_stat_counter_increase(&heap->tld->stats.guarded_alloc_count, 1);
+ }
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
+ return p;
+}
+#endif
+
// ------------------------------------------------------
// ensure explicit external inline definitions are emitted!
// ------------------------------------------------------
diff --git a/src/arena-abandon.c b/src/arena-abandon.c
index eaa8c7c9..460c80fc 100644
--- a/src/arena-abandon.c
+++ b/src/arena-abandon.c
@@ -120,11 +120,7 @@ static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) {
mi_assert(segment->memid.memkind != MI_MEM_ARENA);
// not in an arena; we use a list of abandoned segments
mi_subproc_t* const subproc = segment->subproc;
- if (!mi_lock_acquire(&subproc->abandoned_os_lock)) {
- _mi_error_message(EFAULT, "internal error: failed to acquire the abandoned (os) segment lock to mark abandonment");
- // we can continue but cannot visit/reclaim such blocks..
- }
- else {
+ mi_lock(&subproc->abandoned_os_lock) {
// push on the tail of the list (important for the visitor)
mi_segment_t* prev = subproc->abandoned_os_list_tail;
mi_assert_internal(prev == NULL || prev->abandoned_os_next == NULL);
@@ -138,7 +134,6 @@ static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) {
mi_atomic_increment_relaxed(&subproc->abandoned_os_list_count);
mi_atomic_increment_relaxed(&subproc->abandoned_count);
// and release the lock
- mi_lock_release(&subproc->abandoned_os_lock);
}
return;
}
@@ -148,7 +143,7 @@ static void mi_arena_segment_os_mark_abandoned(mi_segment_t* segment) {
void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
{
mi_assert_internal(segment->used == segment->abandoned);
- mi_atomic_store_release(&segment->thread_id, 0); // mark as abandoned for multi-thread free's
+ mi_atomic_store_release(&segment->thread_id, (uintptr_t)0); // mark as abandoned for multi-thread free's
if mi_unlikely(segment->memid.memkind != MI_MEM_ARENA) {
mi_arena_segment_os_mark_abandoned(segment);
return;
@@ -237,7 +232,7 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_s
static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_cursor_t* previous) {
const size_t max_arena = mi_arena_get_count();
size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx);
- size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1;
+ size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx);
// visit arena's (from the previous cursor)
for (; previous->start < previous->end; previous->start++, field_idx = 0, bit_idx = 0) {
// index wraps around
@@ -251,7 +246,7 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_
if mi_unlikely(field != 0) { // skip zero fields quickly
// we only take the arena lock if there are actually abandoned segments present
if (!has_lock && mi_option_is_enabled(mi_option_visit_abandoned)) {
- has_lock = (previous->visit_all ? mi_lock_acquire(&arena->abandoned_visit_lock) : mi_lock_try_acquire(&arena->abandoned_visit_lock));
+ has_lock = (previous->visit_all ? (mi_lock_acquire(&arena->abandoned_visit_lock),true) : mi_lock_try_acquire(&arena->abandoned_visit_lock));
if (!has_lock) {
if (previous->visit_all) {
_mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the visitor lock");
@@ -266,11 +261,12 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_next_field(mi_arena_field_
// pre-check if the bit is set
size_t mask = ((size_t)1 << bit_idx);
if mi_unlikely((field & mask) == mask) {
- previous->bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
- mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, previous->bitmap_idx);
+ mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
+ mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, bitmap_idx);
if (segment != NULL) {
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
+ previous->bitmap_idx = mi_bitmap_index_create_ex(field_idx, bit_idx + 1); // start at next one for the next iteration
return segment;
}
}
@@ -288,8 +284,8 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_next_list(mi_arena_field_c
// we only allow one thread per sub-process to do to visit guarded by the `abandoned_os_visit_lock`.
// The lock is released when the cursor is released.
if (!previous->hold_visit_lock) {
- previous->hold_visit_lock = (previous->visit_all ? mi_lock_acquire(&previous->subproc->abandoned_os_visit_lock)
- : mi_lock_try_acquire(&previous->subproc->abandoned_os_visit_lock));
+ previous->hold_visit_lock = (previous->visit_all ? (mi_lock_acquire(&previous->subproc->abandoned_os_visit_lock),true)
+ : mi_lock_try_acquire(&previous->subproc->abandoned_os_visit_lock));
if (!previous->hold_visit_lock) {
if (previous->visit_all) {
_mi_error_message(EFAULT, "internal error: failed to visit all abandoned segments due to failure to acquire the OS visitor lock");
@@ -300,21 +296,15 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_next_list(mi_arena_field_c
// One list entry at a time
while (previous->os_list_count > 0) {
previous->os_list_count--;
- const bool has_lock = mi_lock_acquire(&previous->subproc->abandoned_os_lock); // this could contend with concurrent OS block abandonment and reclaim from `free`
- if (has_lock) {
- mi_segment_t* segment = previous->subproc->abandoned_os_list;
- // pop from head of the list, a subsequent mark will push at the end (and thus we iterate through os_list_count entries)
- if (segment == NULL || mi_arena_segment_os_clear_abandoned(segment, false /* we already have the lock */)) {
- mi_lock_release(&previous->subproc->abandoned_os_lock);
- return segment;
- }
- // already abandoned, try again
+ mi_lock_acquire(&previous->subproc->abandoned_os_lock); // this could contend with concurrent OS block abandonment and reclaim from `free`
+ mi_segment_t* segment = previous->subproc->abandoned_os_list;
+ // pop from head of the list, a subsequent mark will push at the end (and thus we iterate through os_list_count entries)
+ if (segment == NULL || mi_arena_segment_os_clear_abandoned(segment, false /* we already have the lock */)) {
mi_lock_release(&previous->subproc->abandoned_os_lock);
+ return segment;
}
- else {
- _mi_error_message(EFAULT, "failed to acquire abandoned OS list lock during abandoned block visit\n");
- return NULL;
- }
+ // already abandoned, try again
+ mi_lock_release(&previous->subproc->abandoned_os_lock);
}
// done
mi_assert_internal(previous->os_list_count == 0);
diff --git a/src/arena.c b/src/arena.c
index e0223e7f..f430aa27 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -33,7 +33,7 @@ The arena allocation needs to be thread safe and we use an atomic bitmap to allo
typedef struct mi_arena_s {
mi_arena_id_t id; // arena id; 0 for non-specific
mi_memid_t memid; // memid of the memory area
- _Atomic(uint8_t*)start; // the start of the memory area
+ _Atomic(uint8_t*) start; // the start of the memory area
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
size_t meta_size; // size of the arena structure itself (including its bitmaps)
@@ -42,12 +42,13 @@ typedef struct mi_arena_s {
bool exclusive; // only allow allocations if specifically for this arena
bool is_large; // memory area consists of large- or huge OS pages (always committed)
mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited
- _Atomic(size_t)search_idx; // optimization to start the search for free blocks
- _Atomic(mi_msecs_t)purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
- mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
- mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
- mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
- mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
+ _Atomic(size_t) search_idx; // optimization to start the search for free blocks
+ _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be purged from `blocks_purge`.
+
+ mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
+ mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
+ mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
+ mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
// do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
} mi_arena_t;
@@ -60,6 +61,7 @@ typedef struct mi_arena_s {
// The available arenas
static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
+static mi_decl_cache_align _Atomic(int64_t) mi_arenas_purge_expire; // set if there exist purgeable arenas
#define MI_IN_ARENA_C
#include "arena-abandon.c"
@@ -186,7 +188,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
if (p != NULL) return p;
// or fall back to the OS
- p = _mi_os_alloc(size, memid, &_mi_stats_main);
+ p = _mi_os_alloc(size, memid);
if (p == NULL) return NULL;
// zero the OS memory if needed
@@ -199,7 +201,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
if (mi_memkind_is_os(memid.memkind)) {
- _mi_os_free(p, size, memid, &_mi_stats_main);
+ _mi_os_free(p, size, memid);
}
else {
mi_assert(memid.memkind == MI_MEM_STATIC);
@@ -216,10 +218,10 @@ void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
----------------------------------------------------------- */
// claim the `blocks_inuse` bits
-static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
+static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
- if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) {
+ if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
@@ -232,13 +234,13 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
- bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
+ bool commit, mi_memid_t* memid)
{
MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
mi_bitmap_index_t bitmap_index;
- if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL;
+ if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it!
void* p = mi_arena_block_start(arena, bitmap_index);
@@ -268,7 +270,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) {
bool commit_zero = false;
- if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
+ if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
memid->initially_committed = false;
}
else {
@@ -286,10 +288,10 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// allocate in a specific arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
- bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
+ bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED_RELEASE(alignment);
- mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
+ mi_assert(alignment <= MI_SEGMENT_ALIGN);
const size_t bcount = mi_block_count_of_size(size);
const size_t arena_index = mi_arena_id_index(arena_id);
mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
@@ -307,7 +309,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
}
// try to allocate
- void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
+ void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
return p;
}
@@ -316,7 +318,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large,
- mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
+ mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@@ -326,21 +328,21 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) {
- void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
else {
// try numa affine allocation
for (size_t i = 0; i < max_arena; i++) {
- void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
// try from another numa node instead..
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
for (size_t i = 0; i < max_arena; i++) {
- void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@@ -349,11 +351,10 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
}
// try to reserve a fresh arena space
-static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
+static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *arena_id)
{
if (_mi_preloading()) return false; // use OS only while pre loading
- if (req_arena_id != _mi_arena_id_none()) return false;
-
+
const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
@@ -385,27 +386,28 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
- mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
+ mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
- mi_assert_internal(memid != NULL && tld != NULL);
+ mi_assert_internal(memid != NULL);
mi_assert_internal(size > 0);
*memid = _mi_memid_none();
- const int numa_node = _mi_os_numa_node(tld); // current numa node
+ const int numa_node = _mi_os_numa_node(); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
- if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed?
- if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
- void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ if (!mi_option_is_enabled(mi_option_disallow_arena_alloc)) { // is arena allocation allowed?
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0)
+ {
+ void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
if (req_arena_id == _mi_arena_id_none()) {
mi_arena_id_t arena_id = 0;
- if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
+ if (mi_arena_reserve(size, allow_large, &arena_id)) {
// and try allocate in there
mi_assert_internal(req_arena_id == _mi_arena_id_none());
- p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@@ -420,16 +422,16 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// finally, fall back to the OS
if (align_offset > 0) {
- return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
+ return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
}
else {
- return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
}
-void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
- return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
+ return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
}
@@ -455,7 +457,7 @@ static long mi_arena_purge_delay(void) {
// reset or decommit in an arena and update the committed/decommit bitmaps
// assumes we own the area (i.e. blocks_in_use is claimed by us)
-static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned);
@@ -464,7 +466,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
- needs_recommit = _mi_os_purge(p, size, stats);
+ needs_recommit = _mi_os_purge(p, size);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
@@ -472,8 +474,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
- needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
- if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
+ needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, 0);
}
// clear the purged blocks
@@ -486,23 +487,26 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
// Note: assumes we (still) own the area as we may purge immediately
-static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_purge != NULL);
const long delay = mi_arena_purge_delay();
if (delay < 0) return; // is purging allowed at all?
if (_mi_preloading() || delay == 0) {
// decommit directly
- mi_arena_purge(arena, bitmap_idx, blocks, stats);
+ mi_arena_purge(arena, bitmap_idx, blocks);
}
else {
- // schedule decommit
- mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
- if (expire != 0) {
- mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay
+ // schedule purge
+ const mi_msecs_t expire = _mi_clock_now() + delay;
+ mi_msecs_t expire0 = 0;
+ if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
+ // expiration was not yet set
+ // maybe set the global arenas expire as well (if it wasn't set already)
+ mi_atomic_casi64_strong_acq_rel(&mi_arenas_purge_expire, &expire0, expire);
}
else {
- mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
+ // already an expiration was set
}
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
}
@@ -511,7 +515,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
// purge a range of blocks
// return true if the full range was purged.
// assumes we own the area (i.e. blocks_in_use is claimed by us)
-static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
+static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
const size_t endidx = startidx + bitlen;
size_t bitidx = startidx;
bool all_purged = false;
@@ -524,7 +528,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
if (count > 0) {
// found range to be purged
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
- mi_arena_purge(arena, range_idx, count, stats);
+ mi_arena_purge(arena, range_idx, count);
if (count == bitlen) {
all_purged = true;
}
@@ -535,16 +539,18 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
}
// returns true if anything was purged
-static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
+static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{
- if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
+ // check pre-conditions
+ if (arena->memid.is_pinned) return false;
+
+ // expired yet?
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
- if (expire == 0) return false;
- if (!force && expire > now) return false;
+ if (!force && (expire == 0 || expire > now)) return false;
// reset expire (if not already set concurrently)
mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0);
-
+
// potential purges scheduled, walk through the bitmap
bool any_purged = false;
bool full_purge = true;
@@ -571,7 +577,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
- if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
+ if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
full_purge = false;
}
any_purged = true;
@@ -591,9 +597,15 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
return any_purged;
}
-static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
+static void mi_arenas_try_purge( bool force, bool visit_all )
+{
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
+ // check if any arena needs purging?
+ const mi_msecs_t now = _mi_clock_now();
+ mi_msecs_t arenas_expire = mi_atomic_load_acquire(&mi_arenas_purge_expire);
+ if (!force && (arenas_expire == 0 || arenas_expire < now)) return;
+
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
if (max_arena == 0) return;
@@ -601,17 +613,26 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
{
- mi_msecs_t now = _mi_clock_now();
- size_t max_purge_count = (visit_all ? max_arena : 1);
+ // increase global expire: at most one purge per delay cycle
+ mi_atomic_store_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay());
+ size_t max_purge_count = (visit_all ? max_arena : 2);
+ bool all_visited = true;
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
- if (mi_arena_try_purge(arena, now, force, stats)) {
- if (max_purge_count <= 1) break;
+ if (mi_arena_try_purge(arena, now, force)) {
+ if (max_purge_count <= 1) {
+ all_visited = false;
+ break;
+ }
max_purge_count--;
}
}
}
+ if (all_visited) {
+ // all arena's were visited and purged: reset global expire
+ mi_atomic_store_release(&mi_arenas_purge_expire, 0);
+ }
}
}
@@ -620,8 +641,8 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free
----------------------------------------------------------- */
-void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
- mi_assert_internal(size > 0 && stats != NULL);
+void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
+ mi_assert_internal(size > 0);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
@@ -636,7 +657,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
- _mi_os_free(p, size, memid, stats);
+ _mi_os_free(p, size, memid);
}
else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena
@@ -681,7 +702,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
- mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
+ mi_arena_schedule_purge(arena, bitmap_idx, blocks);
}
// and make it available to others again
@@ -697,7 +718,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
}
// purge expired decommits
- mi_arenas_try_purge(false, false, stats);
+ mi_arenas_try_purge(false, false);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
@@ -711,7 +732,7 @@ static void mi_arenas_unsafe_destroy(void) {
mi_lock_done(&arena->abandoned_visit_lock);
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
- _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
+ _mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
}
else {
new_max_arena = i;
@@ -726,15 +747,15 @@ static void mi_arenas_unsafe_destroy(void) {
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
-void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) {
- mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
+void _mi_arenas_collect(bool force_purge) {
+ mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
-void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
+void _mi_arena_unsafe_destroy_all(void) {
mi_arenas_unsafe_destroy();
- _mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas
+ _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
@@ -838,11 +859,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
mi_memid_t memid;
- void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
+ void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
- _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
+ _mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}
@@ -890,11 +911,11 @@ static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_
return inuse_count;
}
-void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept {
+void mi_debug_show_arenas(bool show_inuse) mi_attr_noexcept {
size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
size_t inuse_total = 0;
- size_t abandoned_total = 0;
- size_t purge_total = 0;
+ //size_t abandoned_total = 0;
+ //size_t purge_total = 0;
for (size_t i = 0; i < max_arenas; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena == NULL) break;
@@ -905,16 +926,16 @@ void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge)
if (arena->blocks_committed != NULL) {
mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count);
}
- if (show_abandoned) {
- abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
- }
- if (show_purge && arena->blocks_purge != NULL) {
- purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
- }
+ //if (show_abandoned) {
+ // abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count);
+ //}
+ //if (show_purge && arena->blocks_purge != NULL) {
+ // purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count);
+ //}
}
if (show_inuse) _mi_verbose_message("total inuse blocks : %zu\n", inuse_total);
- if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total);
- if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total);
+ //if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total);
+ //if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total);
}
@@ -938,7 +959,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
- _mi_os_free(p, hsize, memid, &_mi_stats_main);
+ _mi_os_free(p, hsize, memid);
return ENOMEM;
}
return 0;
diff --git a/src/bitmap.c b/src/bitmap.c
index 976ba72c..98f6ab7b 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -182,7 +182,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Try to atomically claim a sequence of `count` bits starting from the field
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
-static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
+static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
@@ -242,7 +242,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// claimed!
- mi_stat_counter_increase(stats->arena_crossover_count,1);
+ mi_stat_counter_increase(_mi_stats_main.arena_crossover_count,1);
*bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
return true;
@@ -262,10 +262,10 @@ rollback:
newmap = (map & ~initial_mask);
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
}
- mi_stat_counter_increase(stats->arena_rollback_count,1);
+ mi_stat_counter_increase(_mi_stats_main.arena_rollback_count,1);
// retry? (we make a recursive call instead of goto to be able to use const declarations)
if (retries <= 2) {
- return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats);
+ return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
}
else {
return false;
@@ -275,7 +275,7 @@ rollback:
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
-bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) {
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
mi_assert_internal(count > 0);
if (count <= 2) {
// we don't bother with crossover fields for small counts
@@ -295,7 +295,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
}
*/
// if that fails, then try to claim across fields
- if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) {
+ if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
return true;
}
}
diff --git a/src/bitmap.h b/src/bitmap.h
index a1e7686a..d60668cb 100644
--- a/src/bitmap.h
+++ b/src/bitmap.h
@@ -35,9 +35,13 @@ typedef mi_bitmap_field_t* mi_bitmap_t;
typedef size_t mi_bitmap_index_t;
// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create_ex(size_t idx, size_t bitidx) {
+ mi_assert_internal(bitidx <= MI_BITMAP_FIELD_BITS);
+ return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
+}
static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
- return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
+ return mi_bitmap_index_create_ex(idx,bitidx);
}
// Get the field index from a bit index.
@@ -90,7 +94,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
-bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats);
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
diff --git a/src/free.c b/src/free.c
index c6221fe7..f2e30b65 100644
--- a/src/free.c
+++ b/src/free.c
@@ -34,11 +34,11 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
if (track_stats) { mi_stat_free(page, block); }
- #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
-
+
// actual free: push on the local free list
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
@@ -51,8 +51,8 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
}
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
-// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
-// `page_start` and `block_size` fields; however these are constant and the page won't be
+// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
+// `page_start` and `block_size` fields; however these are constant and the page won't be
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
mi_assert_internal(page!=NULL && p!=NULL);
@@ -69,16 +69,30 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
return (mi_block_t*)((uintptr_t)p - adjust);
}
+// forward declaration for a MI_GUARDED build
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard(page, block, p); }
+}
+#else
+static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p);
+}
+#endif
+
// free a local pointer (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
MI_UNUSED(segment);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
+ mi_block_check_unguard(page, block, p);
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
}
// free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
+ mi_block_check_unguard(page, block, p);
mi_free_block_mt(page, segment, block);
}
@@ -95,17 +109,17 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
-#if (MI_DEBUG>0)
- if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
+ #if (MI_DEBUG>0)
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
-#endif
+ #endif
mi_segment_t* const segment = _mi_ptr_segment(p);
if mi_unlikely(segment==NULL) return segment;
-#if (MI_DEBUG>0)
+ #if (MI_DEBUG>0)
if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
@@ -113,13 +127,13 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
-#endif
-#if (MI_DEBUG>0 || MI_SECURE>=4)
+ #endif
+ #if (MI_DEBUG>0 || MI_SECURE>=4)
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
-#endif
+ #endif
return segment;
}
@@ -231,11 +245,12 @@ static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)
{
// first see if the segment was abandoned and if we can reclaim it into our thread
- if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
+ if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 &&
#if MI_HUGE_PAGE_ABANDON
segment->page_kind != MI_PAGE_HUGE &&
#endif
- mi_atomic_load_relaxed(&segment->thread_id) == 0)
+ mi_atomic_load_relaxed(&segment->thread_id) == 0 && // segment is abandoned?
+ mi_prim_get_default_heap() != (mi_heap_t*)&_mi_heap_empty) // and we did not already exit this thread (without this check, a fresh heap will be initalized (issue #944))
{
// the segment is abandoned, try to reclaim it into our heap
if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) {
@@ -291,7 +306,13 @@ static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* p
const size_t size = mi_page_usable_size_of(page, block);
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
- return (size - adjust);
+ const size_t aligned_size = (size - adjust);
+ #if MI_GUARDED
+ if (mi_block_ptr_is_guarded(block, p)) {
+ return aligned_size - _mi_os_page_size();
+ }
+ #endif
+ return aligned_size;
}
static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
@@ -401,7 +422,7 @@ static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* bloc
uintptr_t keys[2];
keys[0] = page->keys[0];
keys[1] = page->keys[1];
- bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
+ bool ok = (mi_ptr_encode_canary(page,block,keys) == canary && *delta <= *bsize);
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
return ok;
}
@@ -518,3 +539,23 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page); MI_UNUSED(block);
}
#endif
+
+
+// Remove guard page when building with MI_GUARDED
+#if MI_GUARDED
+static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) {
+ MI_UNUSED(p);
+ mi_assert_internal(mi_block_ptr_is_guarded(block, p));
+ mi_assert_internal(mi_page_has_aligned(page));
+ mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t));
+ mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED);
+
+ const size_t bsize = mi_page_block_size(page);
+ const size_t psize = _mi_os_page_size();
+ mi_assert_internal(bsize > psize);
+ mi_assert_internal(_mi_page_segment(page)->allow_decommit);
+ void* gpage = (uint8_t*)block + bsize - psize;
+ mi_assert_internal(_mi_is_aligned(gpage, psize));
+ _mi_os_unprotect(gpage, psize);
+}
+#endif
diff --git a/src/heap.c b/src/heap.c
index 0d716f91..f856a426 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -32,7 +32,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
#if MI_DEBUG>1
size_t total = heap->page_count;
size_t count = 0;
- #endif
+ #endif
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
@@ -59,7 +59,7 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
MI_UNUSED(pq);
mi_assert_internal(mi_page_heap(page) == heap);
mi_segment_t* segment = _mi_page_segment(page);
- mi_assert_internal(segment->thread_id == heap->thread_id);
+ mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == heap->thread_id);
mi_assert_expensive(_mi_page_is_valid(page));
return true;
}
@@ -164,9 +164,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
if (force && is_main_thread && mi_heap_is_backing(heap)) {
_mi_thread_data_collect(); // collect thread data cache
}
-
+
// collect arenas (this is program wide so don't force purges on abandonment of threads)
- _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
+ _mi_arenas_collect(collect == MI_FORCE /* force purge? */);
}
void _mi_heap_collect_abandon(mi_heap_t* heap) {
@@ -221,6 +221,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap);
+ _mi_heap_guarded_init(heap);
// push on the thread local heaps list
heap->next = heap->tld->heaps;
heap->tld->heaps = heap;
@@ -240,7 +241,7 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
}
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
- // don't reclaim abandoned memory or otherwise destroy is unsafe
+ // don't reclaim abandoned memory or otherwise destroy is unsafe
return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
}
@@ -369,7 +370,13 @@ void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap->no_reclaim);
mi_assert_expensive(mi_heap_is_valid(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+ #if MI_GUARDED
+ // _mi_warning_message("'mi_heap_destroy' called but MI_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap);
+ mi_heap_delete(heap);
+ return;
+ #else
if (!heap->no_reclaim) {
+ _mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap);
// don't free in case it may contain reclaimed pages
mi_heap_delete(heap);
}
@@ -382,12 +389,14 @@ void mi_heap_destroy(mi_heap_t* heap) {
_mi_heap_destroy_pages(heap);
mi_heap_free(heap);
}
+ #endif
}
// forcefully destroy all heaps in the current thread
-void _mi_heap_unsafe_destroy_all(void) {
- mi_heap_t* bheap = mi_heap_get_backing();
- mi_heap_t* curr = bheap->tld->heaps;
+void _mi_heap_unsafe_destroy_all(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
+ if (heap == NULL) return;
+ mi_heap_t* curr = heap->tld->heaps;
while (curr != NULL) {
mi_heap_t* next = curr->next;
if (curr->no_reclaim) {
@@ -438,6 +447,12 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_heap_reset_pages(from);
}
+// are two heaps compatible with respect to heap-tag, exclusive arena etc.
+static bool mi_heaps_are_compatible(mi_heap_t* heap1, mi_heap_t* heap2) {
+ return (heap1->tag == heap2->tag && // store same kind of objects
+ heap1->arena_id == heap2->arena_id); // same arena preference
+}
+
// Safe delete a heap without freeing any still allocated blocks in that heap.
void mi_heap_delete(mi_heap_t* heap)
{
@@ -446,9 +461,10 @@ void mi_heap_delete(mi_heap_t* heap)
mi_assert_expensive(mi_heap_is_valid(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
- if (!mi_heap_is_backing(heap)) {
+ mi_heap_t* bheap = heap->tld->heap_backing;
+ if (bheap != heap && mi_heaps_are_compatible(bheap,heap)) {
// transfer still used pages to the backing heap
- mi_heap_absorb(heap->tld->heap_backing, heap);
+ mi_heap_absorb(bheap, heap);
}
else {
// the backing heap abandons its pages
@@ -536,13 +552,14 @@ void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) {
mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
- *shift = 64 - mi_clz(divisor - 1);
- *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
+ *shift = MI_INTPTR_BITS - mi_clz(divisor - 1);
+ *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
}
static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) {
mi_assert_internal(n <= UINT32_MAX);
- return ((((uint64_t)n * magic) >> 32) + n) >> shift;
+ const uint64_t hi = ((uint64_t)n * magic) >> 32;
+ return (size_t)((hi + n) >> shift);
}
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg) {
@@ -581,7 +598,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
// create a bitmap of free blocks.
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
- const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
+ const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
memset(free_map, 0, bmapsize * sizeof(intptr_t));
if (page->capacity % MI_INTPTR_BITS != 0) {
// mark left-over bits at the end as free
@@ -591,7 +608,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
}
// fast repeated division by the block size
- uint64_t magic;
+ uint64_t magic;
size_t shift;
mi_get_fast_divisor(bsize, &magic, &shift);
@@ -665,7 +682,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
mi_heap_area_ex_t xarea;
xarea.page = page;
- _mi_heap_area_init(&xarea.area, page);
+ _mi_heap_area_init(&xarea.area, page);
return fun(heap, &xarea, arg);
}
diff --git a/src/init.c b/src/init.c
index ead5a147..734bf5de 100644
--- a/src/init.c
+++ b/src/init.c
@@ -86,7 +86,8 @@ const mi_page_t _mi_page_empty = {
MI_STAT_COUNT_NULL(), \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
- { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 } \
MI_STAT_COUNT_END_NULL()
// --------------------------------------------------------
@@ -111,6 +112,9 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
NULL, // next
false, // can reclaim
0, // tag
+ #if MI_GUARDED
+ 0, 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`)
+ #endif
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY
};
@@ -132,9 +136,8 @@ static mi_decl_cache_align mi_tld_t tld_main = {
&_mi_heap_main, &_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
0, 0, 0, 0, 0, &mi_subproc_default,
- &tld_main.stats, &tld_main.os
+ &tld_main.stats
}, // segments
- { 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats
};
@@ -151,6 +154,9 @@ mi_decl_cache_align mi_heap_t _mi_heap_main = {
NULL, // next heap
false, // can reclaim
0, // tag
+ #if MI_GUARDED
+ 0, 0, 0, 0, 0,
+ #endif
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY
};
@@ -159,6 +165,45 @@ bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
mi_stats_t _mi_stats_main = { MI_STATS_NULL };
+#if MI_GUARDED
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
+ heap->guarded_sample_seed = seed;
+ if (heap->guarded_sample_seed == 0) {
+ heap->guarded_sample_seed = _mi_heap_random_next(heap);
+ }
+ heap->guarded_sample_rate = sample_rate;
+ if (heap->guarded_sample_rate >= 1) {
+ heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
+ }
+ heap->guarded_sample_count = heap->guarded_sample_seed; // count down samples
+}
+
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
+ heap->guarded_size_min = min;
+ heap->guarded_size_max = (min > max ? min : max);
+}
+
+void _mi_heap_guarded_init(mi_heap_t* heap) {
+ mi_heap_guarded_set_sample_rate(heap,
+ (size_t)mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX),
+ (size_t)mi_option_get(mi_option_guarded_sample_seed));
+ mi_heap_guarded_set_size_bound(heap,
+ (size_t)mi_option_get_clamp(mi_option_guarded_min, 0, LONG_MAX),
+ (size_t)mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX) );
+}
+#else
+mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
+ MI_UNUSED(heap); MI_UNUSED(sample_rate); MI_UNUSED(seed);
+}
+
+mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {
+ MI_UNUSED(heap); MI_UNUSED(min); MI_UNUSED(max);
+}
+void _mi_heap_guarded_init(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
+#endif
+
static void mi_heap_main_init(void) {
if (_mi_heap_main.cookie == 0) {
@@ -174,6 +219,7 @@ static void mi_heap_main_init(void) {
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
mi_lock_init(&mi_subproc_default.abandoned_os_lock);
mi_lock_init(&mi_subproc_default.abandoned_os_visit_lock);
+ _mi_heap_guarded_init(&_mi_heap_main);
}
}
@@ -211,11 +257,10 @@ void mi_subproc_delete(mi_subproc_id_t subproc_id) {
mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id);
// check if there are no abandoned segments still..
bool safe_to_delete = false;
- if (mi_lock_acquire(&subproc->abandoned_os_lock)) {
+ mi_lock(&subproc->abandoned_os_lock) {
if (subproc->abandoned_os_list == NULL) {
safe_to_delete = true;
}
- mi_lock_release(&subproc->abandoned_os_lock);
}
if (!safe_to_delete) return;
// safe to release
@@ -273,10 +318,10 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
// if that fails, allocate as meta data
if (td == NULL) {
mi_memid_t memid;
- td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) {
// if this fails, try once more. (issue #257)
- td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) {
// really out of memory
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
@@ -306,7 +351,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
}
}
// if that fails, just free it directly
- _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
+ _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid);
}
void _mi_thread_data_collect(void) {
@@ -316,7 +361,7 @@ void _mi_thread_data_collect(void) {
if (td != NULL) {
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) {
- _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main);
+ _mi_os_free(td, sizeof(mi_thread_data_t), td->memid);
}
}
}
@@ -353,8 +398,6 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->heaps = NULL;
tld->segments.subproc = &mi_subproc_default;
tld->segments.stats = &tld->stats;
- tld->segments.os = &tld->os;
- tld->os.stats = &tld->stats;
}
// Free the thread local default heap (called from `mi_thread_done`)
@@ -508,54 +551,15 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
// --------------------------------------------------------
// Run functions on process init/done, and thread init/done
// --------------------------------------------------------
-static void mi_cdecl mi_process_done(void);
-
static bool os_preloading = true; // true until this module is initialized
-static bool mi_redirected = false; // true if malloc redirects to mi_malloc
// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
bool mi_decl_noinline _mi_preloading(void) {
return os_preloading;
}
-mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
- return mi_redirected;
-}
-
-// Communicate with the redirection module on Windows
-#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
-#ifdef __cplusplus
-extern "C" {
-#endif
-mi_decl_export void _mi_redirect_entry(DWORD reason) {
- // called on redirection; careful as this may be called before DllMain
- if (reason == DLL_PROCESS_ATTACH) {
- mi_redirected = true;
- }
- else if (reason == DLL_PROCESS_DETACH) {
- mi_redirected = false;
- }
- else if (reason == DLL_THREAD_DETACH) {
- mi_thread_done();
- }
-}
-__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
-__declspec(dllimport) void mi_cdecl mi_allocator_done(void);
-#ifdef __cplusplus
-}
-#endif
-#else
-static bool mi_allocator_init(const char** message) {
- if (message != NULL) *message = NULL;
- return true;
-}
-static void mi_allocator_done(void) {
- // nothing to do
-}
-#endif
-
-// Called once by the process loader
-static void mi_process_load(void) {
+// Called once by the process loader from `src/prim/prim.c`
+void _mi_process_load(void) {
mi_heap_main_init();
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
@@ -563,17 +567,14 @@ static void mi_process_load(void) {
#endif
os_preloading = false;
mi_assert_internal(_mi_is_main_thread());
- #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521)
- atexit(&mi_process_done);
- #endif
_mi_options_init();
mi_process_setup_auto_thread_done();
mi_process_init();
- if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
+ if (_mi_is_redirected()) _mi_verbose_message("malloc is redirected.\n");
// show message from the redirector (if present)
const char* msg = NULL;
- mi_allocator_init(&msg);
+ _mi_allocator_init(&msg);
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
_mi_fputs(NULL,NULL,NULL,msg);
}
@@ -585,12 +586,15 @@ static void mi_process_load(void) {
#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include
mi_decl_cache_align bool _mi_cpu_has_fsrm = false;
+mi_decl_cache_align bool _mi_cpu_has_erms = false;
static void mi_detect_cpu_features(void) {
- // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
+ // FSRM for fast short rep movsb/stosb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
+ // EMRS for fast enhanced rep movsb/stosb support
int32_t cpu_info[4];
__cpuid(cpu_info, 7);
_mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see
+ _mi_cpu_has_erms = ((cpu_info[2] & (1 << 9)) != 0); // bit 9 of ECX : see
}
#else
static void mi_detect_cpu_features(void) {
@@ -651,7 +655,7 @@ void mi_process_init(void) mi_attr_noexcept {
}
// Called when the process is done (through `at_exit`)
-static void mi_cdecl mi_process_done(void) {
+void mi_cdecl _mi_process_done(void) {
// only shutdown if we were initialized
if (!_mi_process_is_initialized) return;
// ensure we are called once
@@ -659,15 +663,20 @@ static void mi_cdecl mi_process_done(void) {
if (process_done) return;
process_done = true;
+ // get the default heap so we don't need to acces thread locals anymore
+ mi_heap_t* heap = mi_prim_get_default_heap(); // use prim to not initialize any heap
+ mi_assert_internal(heap != NULL);
+
// release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
_mi_prim_thread_done_auto_done();
+
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG || !defined(MI_SHARED_LIB))
// free all memory if possible on process exit. This is not needed for a stand-alone process
// but should be done if mimalloc is statically linked into another shared library which
// is repeatedly loaded/unloaded, see issue #281.
- mi_collect(true /* force */ );
+ mi_heap_collect(heap, true /* force */ );
#endif
#endif
@@ -675,72 +684,17 @@ static void mi_cdecl mi_process_done(void) {
// since after process_done there might still be other code running that calls `free` (like at_exit routines,
// or C-runtime termination code.
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
- mi_collect(true /* force */);
- _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
- _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
+ mi_heap_collect(heap, true /* force */);
+ _mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!)
+ _mi_arena_unsafe_destroy_all();
+ _mi_segment_map_unsafe_destroy();
}
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}
- mi_allocator_done();
+ _mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore
}
-
-
-#if defined(_WIN32) && defined(MI_SHARED_LIB)
- // Windows DLL: easy to hook into process_init and thread_done
- __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
- MI_UNUSED(reserved);
- MI_UNUSED(inst);
- if (reason==DLL_PROCESS_ATTACH) {
- mi_process_load();
- }
- else if (reason==DLL_PROCESS_DETACH) {
- mi_process_done();
- }
- else if (reason==DLL_THREAD_DETACH) {
- if (!mi_is_redirected()) {
- mi_thread_done();
- }
- }
- return TRUE;
- }
-
-#elif defined(_MSC_VER)
- // MSVC: use data section magic for static libraries
- // See
- static int _mi_process_init(void) {
- mi_process_load();
- return 0;
- }
- typedef int(*_mi_crt_callback_t)(void);
- #if defined(_M_X64) || defined(_M_ARM64)
- __pragma(comment(linker, "/include:" "_mi_msvc_initu"))
- #pragma section(".CRT$XIU", long, read)
- #else
- __pragma(comment(linker, "/include:" "__mi_msvc_initu"))
- #endif
- #pragma data_seg(".CRT$XIU")
- mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init };
- #pragma data_seg()
-
-#elif defined(__cplusplus)
- // C++: use static initialization to detect process start
- static bool _mi_process_init(void) {
- mi_process_load();
- return (_mi_heap_main.thread_id != 0);
- }
- static bool mi_initialized = _mi_process_init();
-
-#elif defined(__GNUC__) || defined(__clang__)
- // GCC,Clang: use the constructor attribute
- static void __attribute__((constructor)) _mi_process_init(void) {
- mi_process_load();
- }
-
-#else
-#pragma message("define a way to call mi_process_load on your platform")
-#endif
diff --git a/src/libc.c b/src/libc.c
index dd6b4007..ce541f1b 100644
--- a/src/libc.c
+++ b/src/libc.c
@@ -130,7 +130,7 @@ static void mi_out_alignright(char fill, char* start, size_t len, size_t extra,
}
-static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* end)
+static void mi_out_num(uintmax_t x, size_t base, char prefix, char** out, char* end)
{
if (x == 0 || base == 0 || base > 16) {
if (prefix != 0) { mi_outc(prefix, out, end); }
@@ -206,12 +206,13 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) {
}
else if (c == 'p' || c == 'x' || c == 'u') {
// unsigned
- uintptr_t x = 0;
+ uintmax_t x = 0;
if (c == 'x' || c == 'u') {
if (numtype == 'z') x = va_arg(args, size_t);
else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t
- else if (numtype == 'L') x = (uintptr_t)va_arg(args, unsigned long long);
- else x = va_arg(args, unsigned long);
+ else if (numtype == 'L') x = va_arg(args, unsigned long long);
+ else if (numtype == 'l') x = va_arg(args, unsigned long);
+ else x = va_arg(args, unsigned int);
}
else if (c == 'p') {
x = va_arg(args, uintptr_t);
@@ -228,20 +229,21 @@ void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) {
}
else if (c == 'i' || c == 'd') {
// signed
- intptr_t x = 0;
+ intmax_t x = 0;
if (numtype == 'z') x = va_arg(args, intptr_t );
else if (numtype == 't') x = va_arg(args, ptrdiff_t);
- else if (numtype == 'L') x = (intptr_t)va_arg(args, long long);
- else x = va_arg(args, long);
+ else if (numtype == 'L') x = va_arg(args, long long);
+ else if (numtype == 'l') x = va_arg(args, long);
+ else x = va_arg(args, int);
char pre = 0;
if (x < 0) {
pre = '-';
- if (x > INTPTR_MIN) { x = -x; }
+ if (x > INTMAX_MIN) { x = -x; }
}
else if (numplus != 0) {
pre = numplus;
}
- mi_out_num((uintptr_t)x, 10, pre, &out, end);
+ mi_out_num((uintmax_t)x, 10, pre, &out, end);
}
else if (c >= ' ' && c <= '~') {
// unknown format
diff --git a/src/options.c b/src/options.c
index 71c43e9c..5d50d091 100644
--- a/src/options.c
+++ b/src/options.c
@@ -47,6 +47,58 @@ typedef struct mi_option_desc_s {
#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
+// Some options can be set at build time for statically linked libraries
+// (use `-DMI_EXTRA_CPPDEFS="opt1=val1;opt2=val2"`)
+//
+// This is useful if we cannot pass them as environment variables
+// (and setting them programmatically would be too late)
+
+#ifndef MI_DEFAULT_VERBOSE
+#define MI_DEFAULT_VERBOSE 0
+#endif
+
+#ifndef MI_DEFAULT_EAGER_COMMIT
+#define MI_DEFAULT_EAGER_COMMIT 1
+#endif
+
+#ifndef MI_DEFAULT_ARENA_EAGER_COMMIT
+#define MI_DEFAULT_ARENA_EAGER_COMMIT 2
+#endif
+
+// in KiB
+#ifndef MI_DEFAULT_ARENA_RESERVE
+ #if (MI_INTPTR_SIZE>4)
+ #define MI_DEFAULT_ARENA_RESERVE 1024L*1024L
+ #else
+ #define MI_DEFAULT_ARENA_RESERVE 128L*1024L
+ #endif
+#endif
+
+#ifndef MI_DEFAULT_DISALLOW_ARENA_ALLOC
+#define MI_DEFAULT_DISALLOW_ARENA_ALLOC 0
+#endif
+
+#ifndef MI_DEFAULT_ALLOW_LARGE_OS_PAGES
+#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 0
+#endif
+
+#ifndef MI_DEFAULT_RESERVE_HUGE_OS_PAGES
+#define MI_DEFAULT_RESERVE_HUGE_OS_PAGES 0
+#endif
+
+#ifndef MI_DEFAULT_RESERVE_OS_MEMORY
+#define MI_DEFAULT_RESERVE_OS_MEMORY 0
+#endif
+
+#ifndef MI_DEFAULT_GUARDED_SAMPLE_RATE
+#if MI_GUARDED
+#define MI_DEFAULT_GUARDED_SAMPLE_RATE 4000
+#else
+#define MI_DEFAULT_GUARDED_SAMPLE_RATE 0
+#endif
+#endif
+
+
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
@@ -56,16 +108,21 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(show_errors) },
#endif
{ 0, UNINIT, MI_OPTION(show_stats) },
- { 0, UNINIT, MI_OPTION(verbose) },
+ { MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) },
- // the following options are experimental and not all combinations make sense.
- { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
- { 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
+ // some of the following options are experimental and not all combinations are allowed.
+ { MI_DEFAULT_EAGER_COMMIT,
+ UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
+ { MI_DEFAULT_ARENA_EAGER_COMMIT,
+ UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
{ 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit)
- { 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
- { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
+ { MI_DEFAULT_ALLOW_LARGE_OS_PAGES,
+ UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
+ { MI_DEFAULT_RESERVE_HUGE_OS_PAGES,
+ UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
{-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
- { 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
+ { MI_DEFAULT_RESERVE_OS_MEMORY,
+ UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_purge) }, // purge free page memory when a thread terminates
@@ -83,22 +140,24 @@ static mi_option_desc_t options[_mi_option_last] =
{ 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
{ 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try.
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
- #if (MI_INTPTR_SIZE>4)
- { 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
- #else
- { 128L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // =128MiB on 32-bit
- #endif
-
+ { MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
- { 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
+ { MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
#if defined(MI_VISIT_ABANDONED)
{ 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim.
#else
{ 0, UNINIT, MI_OPTION(visit_abandoned) },
#endif
+ { 0, UNINIT, MI_OPTION(guarded_min) }, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects
+ { MI_GiB, UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects
+ { 0, UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0)
+ { MI_DEFAULT_GUARDED_SAMPLE_RATE,
+ UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
+ { 0, UNINIT, MI_OPTION(guarded_sample_seed)},
+ { 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
};
static void mi_option_init(mi_option_desc_t* desc);
@@ -108,8 +167,7 @@ static bool mi_option_has_size_in_kib(mi_option_t option) {
}
void _mi_options_init(void) {
- // called on process load; should not be called before the CRT is initialized!
- // (e.g. do not call this from process_init as that may run before CRT initialization)
+ // called on process load
mi_add_stderr_output(); // now it safe to use stderr for output
for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_t option = (mi_option_t)i;
@@ -122,8 +180,26 @@ void _mi_options_init(void) {
}
mi_max_error_count = mi_option_get(mi_option_max_errors);
mi_max_warning_count = mi_option_get(mi_option_max_warnings);
+ #if MI_GUARDED
+ if (mi_option_get(mi_option_guarded_sample_rate) > 0) {
+ if (mi_option_is_enabled(mi_option_allow_large_os_pages)) {
+ mi_option_disable(mi_option_allow_large_os_pages);
+ _mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n");
+ }
+ }
+ _mi_verbose_message("guarded build: %s\n", mi_option_get(mi_option_guarded_sample_rate) != 0 ? "enabled" : "disabled");
+ #endif
}
+long _mi_option_get_fast(mi_option_t option) {
+ mi_assert(option >= 0 && option < _mi_option_last);
+ mi_option_desc_t* desc = &options[option];
+ mi_assert(desc->option == option); // index should match the option
+ //mi_assert(desc->init != UNINIT);
+ return desc->value;
+}
+
+
mi_decl_nodiscard long mi_option_get(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
if (option < 0 || option >= _mi_option_last) return 0;
@@ -141,7 +217,6 @@ mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long ma
}
mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
- mi_assert_internal(mi_option_has_size_in_kib(option));
const long x = mi_option_get(option);
size_t size = (x < 0 ? 0 : (size_t)x);
if (mi_option_has_size_in_kib(option)) {
@@ -157,6 +232,13 @@ void mi_option_set(mi_option_t option, long value) {
mi_assert(desc->option == option); // index should match the option
desc->value = value;
desc->init = INITIALIZED;
+ // ensure min/max range; be careful to not recurse.
+ if (desc->option == mi_option_guarded_min && _mi_option_get_fast(mi_option_guarded_max) < value) {
+ mi_option_set(mi_option_guarded_max, value);
+ }
+ else if (desc->option == mi_option_guarded_max && _mi_option_get_fast(mi_option_guarded_min) > value) {
+ mi_option_set(mi_option_guarded_min, value);
+ }
}
void mi_option_set_default(mi_option_t option, long value) {
@@ -506,8 +588,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
value = (size > LONG_MAX ? LONG_MAX : (long)size);
}
if (*end == 0) {
- desc->value = value;
- desc->init = INITIALIZED;
+ mi_option_set(desc->option, value);
}
else {
// set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
diff --git a/src/os.c b/src/os.c
index 4babd8da..77469775 100644
--- a/src/os.c
+++ b/src/os.c
@@ -9,18 +9,38 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc/atomic.h"
#include "mimalloc/prim.h"
+#define mi_os_stat_increase(stat,amount) _mi_stat_increase(&_mi_stats_main.stat, amount)
+#define mi_os_stat_decrease(stat,amount) _mi_stat_decrease(&_mi_stats_main.stat, amount)
+#define mi_os_stat_counter_increase(stat,inc) _mi_stat_counter_increase(&_mi_stats_main.stat, inc)
/* -----------------------------------------------------------
- Initialization.
+ Initialization.
----------------------------------------------------------- */
+#ifndef MI_DEFAULT_VIRTUAL_ADDRESS_BITS
+#if MI_INTPTR_SIZE < 8
+#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 32
+#else
+#define MI_DEFAULT_VIRTUAL_ADDRESS_BITS 48
+#endif
+#endif
+
+#ifndef MI_DEFAULT_PHYSICAL_MEMORY
+#if MI_INTPTR_SIZE < 8
+#define MI_DEFAULT_PHYSICAL_MEMORY 4*MI_GiB
+#else
+#define MI_DEFAULT_PHYSICAL_MEMORY 32*MI_GiB
+#endif
+#endif
static mi_os_mem_config_t mi_os_mem_config = {
- 4096, // page size
- 0, // large page size (usually 2MiB)
- 4096, // allocation granularity
- true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
- false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
- true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
+ 4096, // page size
+ 0, // large page size (usually 2MiB)
+ 4096, // allocation granularity
+ MI_DEFAULT_PHYSICAL_MEMORY,
+ MI_DEFAULT_VIRTUAL_ADDRESS_BITS,
+ true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
+ false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
+ true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
};
bool _mi_os_has_overcommit(void) {
@@ -68,8 +88,8 @@ void _mi_os_init(void) {
/* -----------------------------------------------------------
Util
-------------------------------------------------------------- */
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
@@ -91,9 +111,10 @@ static void* mi_align_down_ptr(void* p, size_t alignment) {
aligned hinting
-------------------------------------------------------------- */
-// On 64-bit systems, we can do efficient aligned allocation by using
-// the 2TiB to 30TiB area to allocate those.
-#if (MI_INTPTR_SIZE >= 8)
+// On systems with enough virtual address bits, we can do efficient aligned allocation by using
+// the 2TiB to 30TiB area to allocate those. If we have at least 46 bits of virtual address
+// space (64TiB) we use this technique. (but see issue #939)
+#if (MI_INTPTR_SIZE >= 8) && !defined(MI_NO_ALIGNED_HINT)
static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
// Return a MI_SEGMENT_SIZE aligned address that is probably available.
@@ -110,6 +131,7 @@ static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size)
{
if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL;
+ if (mi_os_mem_config.virtual_address_bits < 46) return NULL; // < 64TiB virtual address space
size = _mi_align_up(size, MI_SEGMENT_SIZE);
if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096.
#if (MI_SECURE>0)
@@ -137,45 +159,50 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
}
#endif
-
/* -----------------------------------------------------------
Free memory
-------------------------------------------------------------- */
-static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
+static void mi_os_free_huge_os_pages(void* p, size_t size);
-static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
+static void mi_os_prim_free(void* addr, size_t size, size_t commit_size) {
mi_assert_internal((size % _mi_os_page_size()) == 0);
if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
int err = _mi_prim_free(addr, size);
if (err != 0) {
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
}
- if (still_committed) { _mi_stat_decrease(&stats->committed, size); }
- _mi_stat_decrease(&stats->reserved, size);
+ if (commit_size > 0) {
+ mi_os_stat_decrease(committed, commit_size);
+ }
+ mi_os_stat_decrease(reserved, size);
}
-void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats) {
- if (stats == NULL) stats = &_mi_stats_main;
+void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
if (mi_memkind_is_os(memid.memkind)) {
- size_t csize = _mi_os_good_alloc_size(size);
+ size_t csize = memid.mem.os.size;
+ if (csize==0) { _mi_os_good_alloc_size(size); }
+ size_t commit_size = (still_committed ? csize : 0);
void* base = addr;
// different base? (due to alignment)
- if (memid.mem.os.base != NULL) {
- mi_assert(memid.mem.os.base <= addr);
- mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr);
+ if (memid.mem.os.base != base) {
+ mi_assert(memid.mem.os.base <= addr);
base = memid.mem.os.base;
- csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base);
+ const size_t diff = (uint8_t*)addr - (uint8_t*)memid.mem.os.base;
+ if (memid.mem.os.size==0) {
+ csize += diff;
+ }
+ if (still_committed) {
+ commit_size -= diff; // the (addr-base) part was already un-committed
+ }
}
// free it
if (memid.memkind == MI_MEM_OS_HUGE) {
mi_assert(memid.is_pinned);
- mi_os_free_huge_os_pages(base, csize, stats);
+ mi_os_free_huge_os_pages(base, csize);
}
else {
- mi_os_prim_free(base, csize, still_committed, stats);
+ mi_os_prim_free(base, csize, (still_committed ? commit_size : 0));
}
}
else {
@@ -184,9 +211,8 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
}
}
-void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
- if (stats == NULL) stats = &_mi_stats_main;
- _mi_os_free_ex(p, size, true, memid, stats);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
+ _mi_os_free_ex(p, size, true, memid);
}
@@ -195,7 +221,8 @@ void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
-------------------------------------------------------------- */
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
-static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) {
+// Also `hint_addr` is a hint and may be ignored.
+static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_zero != NULL);
mi_assert_internal(is_large != NULL);
@@ -204,18 +231,18 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
*is_zero = false;
void* p = NULL;
- int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
+ int err = _mi_prim_alloc(hint_addr, size, try_alignment, commit, allow_large, is_large, is_zero, &p);
if (err != 0) {
- _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
+ _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large);
}
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- mi_stat_counter_increase(stats->mmap_calls, 1);
+
+
+ mi_os_stat_counter_increase(mmap_calls, 1);
if (p != NULL) {
- _mi_stat_increase(&stats->reserved, size);
+ mi_os_stat_increase(reserved, size);
if (commit) {
- _mi_stat_increase(&stats->committed, size);
+ mi_os_stat_increase(committed, size);
// seems needed for asan (or `mimalloc-test-api` fails)
#ifdef MI_TRACK_ASAN
if (*is_zero) { mi_track_mem_defined(p,size); }
@@ -226,10 +253,14 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
return p;
}
+static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
+ return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero);
+}
+
// Primitive aligned allocation from the OS.
// This function guarantees the allocated memory is aligned.
-static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) {
+static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) {
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_large != NULL);
@@ -239,8 +270,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
- // try first with a hint (this will be aligned directly on Win 10+ or BSD)
- void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
+ // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
+ void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero);
if (p == NULL) return NULL;
// aligned already?
@@ -249,14 +280,16 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
}
else {
// if not aligned, free it, overallocate, and unmap around it
+ #if !MI_TRACK_ASAN
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
- mi_os_prim_free(p, size, commit, stats);
+ #endif
+ if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0)); }
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment;
if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block
// over-allocate uncommitted (virtual) memory
- p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
+ p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero);
if (p == NULL) return NULL;
// set p to the aligned part in the full region
@@ -267,22 +300,22 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// explicitly commit only the aligned part
if (commit) {
- _mi_os_commit(p, size, NULL, stats);
+ _mi_os_commit(p, size, NULL);
}
}
else { // mmap can free inside an allocation
// overallocate...
- p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
+ p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero);
if (p == NULL) return NULL;
- // and selectively unmap parts around the over-allocated area.
+ // and selectively unmap parts around the over-allocated area.
void* aligned_p = mi_align_up_ptr(p, alignment);
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size;
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
- if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); }
- if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
+ if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); }
+ if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); }
// we can return the aligned pointer on `mmap` systems
p = aligned_p;
*base = aligned_p; // since we freed the pre part, `*base == p`.
@@ -298,37 +331,36 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
OS API: alloc and alloc_aligned
----------------------------------------------------------- */
-void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
+void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
*memid = _mi_memid_none();
if (size == 0) return NULL;
- if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size);
bool os_is_large = false;
bool os_is_zero = false;
- void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
+ void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
if (p != NULL) {
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
}
return p;
}
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats)
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid)
{
MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
*memid = _mi_memid_none();
if (size == 0) return NULL;
- if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size());
bool os_is_large = false;
bool os_is_zero = false;
void* os_base = NULL;
- void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats );
+ void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
if (p != NULL) {
*memid = _mi_memid_create_os(commit, os_is_zero, os_is_large);
memid->mem.os.base = os_base;
- memid->mem.os.alignment = alignment;
+ // memid->mem.os.alignment = alignment;
+ memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned
}
return p;
}
@@ -341,29 +373,28 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
to use the actual start of the memory region.
----------------------------------------------------------- */
-void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) {
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) {
mi_assert(offset <= MI_SEGMENT_SIZE);
mi_assert(offset <= size);
mi_assert((alignment % _mi_os_page_size()) == 0);
*memid = _mi_memid_none();
- if (stats == NULL) stats = &_mi_stats_main;
if (offset > MI_SEGMENT_SIZE) return NULL;
if (offset == 0) {
// regular aligned allocation
- return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats);
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
else {
// overallocate to align at an offset
const size_t extra = _mi_align_up(offset, alignment) - offset;
const size_t oversize = size + extra;
- void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats);
+ void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid);
if (start == NULL) return NULL;
void* const p = (uint8_t*)start + extra;
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
// decommit the overallocation at the start
if (commit && extra > _mi_os_page_size()) {
- _mi_os_decommit(start, extra, stats);
+ _mi_os_decommit(start, extra);
}
return p;
}
@@ -397,12 +428,10 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
+bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) {
if (is_zero != NULL) { *is_zero = false; }
- _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
- _mi_stat_counter_increase(&stats->commit_calls, 1);
+ mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit
+ mi_os_stat_counter_increase(commit_calls, 1);
// page align range
size_t csize;
@@ -428,11 +457,13 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
return true;
}
-static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
+ return _mi_os_commit_ex(addr, size, is_zero, size);
+}
+
+static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) {
mi_assert_internal(needs_recommit!=NULL);
- _mi_stat_decrease(&stats->committed, size);
+ mi_os_stat_decrease(committed, stat_size);
// page align
size_t csize;
@@ -449,9 +480,9 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
return (err == 0);
}
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
+bool _mi_os_decommit(void* addr, size_t size) {
bool needs_recommit;
- return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats);
+ return mi_os_decommit_ex(addr, size, &needs_recommit, size);
}
@@ -459,13 +490,13 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
-bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
+bool _mi_os_reset(void* addr, size_t size) {
// page align conservatively within the range
size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
- _mi_stat_increase(&stats->reset, csize);
- _mi_stat_counter_increase(&stats->reset_calls, 1);
+ mi_os_stat_increase(reset, csize);
+ mi_os_stat_counter_increase(reset_calls, 1);
#if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
memset(start, 0, csize); // pretend it is eagerly reset
@@ -481,22 +512,22 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
-bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size)
{
if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
- _mi_stat_counter_increase(&stats->purge_calls, 1);
- _mi_stat_increase(&stats->purged, size);
+ mi_os_stat_counter_increase(purge_calls, 1);
+ mi_os_stat_increase(purged, size);
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
!_mi_preloading()) // don't decommit during preloading (unsafe)
{
bool needs_recommit = true;
- mi_os_decommit_ex(p, size, &needs_recommit, stats);
+ mi_os_decommit_ex(p, size, &needs_recommit, stat_size);
return needs_recommit;
}
else {
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
- _mi_os_reset(p, size, stats);
+ _mi_os_reset(p, size);
}
return false; // needs no recommit
}
@@ -504,8 +535,8 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
-bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
- return _mi_os_purge_ex(p, size, true, stats);
+bool _mi_os_purge(void* p, size_t size) {
+ return _mi_os_purge_ex(p, size, true, size);
}
@@ -613,15 +644,15 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// no success, issue a warning and break
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
- mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main);
+ mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
}
break;
}
// success, record it
page++; // increase before timeout check (see issue #711)
- _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
- _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
+ mi_os_stat_increase(committed, MI_HUGE_OS_PAGE_SIZE);
+ mi_os_stat_increase(reserved, MI_HUGE_OS_PAGE_SIZE);
// check for timeout
if (max_msecs > 0) {
@@ -655,11 +686,11 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
-static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
+static void mi_os_free_huge_os_pages(void* p, size_t size) {
if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) {
- mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats);
+ mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE);
size -= MI_HUGE_OS_PAGE_SIZE;
base += MI_HUGE_OS_PAGE_SIZE;
}
@@ -688,8 +719,7 @@ size_t _mi_os_numa_node_count_get(void) {
return count;
}
-int _mi_os_numa_node_get(mi_os_tld_t* tld) {
- MI_UNUSED(tld);
+int _mi_os_numa_node_get(void) {
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0
diff --git a/src/page-queue.c b/src/page-queue.c
index 02a8008d..67b54650 100644
--- a/src/page-queue.c
+++ b/src/page-queue.c
@@ -259,8 +259,16 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
heap->page_count++;
}
+static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
+ mi_assert_internal(mi_page_heap(page) == heap);
+ mi_assert_internal(mi_page_queue_contains(queue, page));
+ if (queue->first == page) return;
+ mi_page_queue_remove(queue, page);
+ mi_page_queue_push(heap, queue, page);
+ mi_assert_internal(queue->first == page);
+}
-static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page));
@@ -273,6 +281,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
(mi_page_is_huge(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page);
+
+ // delete from `from`
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == from->last) from->last = page->prev;
@@ -283,22 +293,59 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_heap_queue_first_update(heap, from);
}
- page->prev = to->last;
- page->next = NULL;
- if (to->last != NULL) {
- mi_assert_internal(heap == mi_page_heap(to->last));
- to->last->next = page;
- to->last = page;
+ // insert into `to`
+ if (enqueue_at_end) {
+ // enqueue at the end
+ page->prev = to->last;
+ page->next = NULL;
+ if (to->last != NULL) {
+ mi_assert_internal(heap == mi_page_heap(to->last));
+ to->last->next = page;
+ to->last = page;
+ }
+ else {
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
}
else {
- to->first = page;
- to->last = page;
- mi_heap_queue_first_update(heap, to);
+ if (to->first != NULL) {
+ // enqueue at 2nd place
+ mi_assert_internal(heap == mi_page_heap(to->first));
+ mi_page_t* next = to->first->next;
+ page->prev = to->first;
+ page->next = next;
+ to->first->next = page;
+ if (next != NULL) {
+ next->prev = page;
+ }
+ else {
+ to->last = page;
+ }
+ }
+ else {
+ // enqueue at the head (singleton list)
+ page->prev = NULL;
+ page->next = NULL;
+ to->first = page;
+ to->last = page;
+ mi_heap_queue_first_update(heap, to);
+ }
}
mi_page_set_in_full(page, mi_page_queue_is_full(to));
}
+static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end */, page);
+}
+
+static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) {
+ // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`)
+ mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end of the `to` queue? */, page);
+}
+
// Only called from `mi_heap_absorb`.
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
mi_assert_internal(mi_heap_contains_queue(heap,pq));
diff --git a/src/page.c b/src/page.c
index 96d1b24c..e1c07a93 100644
--- a/src/page.c
+++ b/src/page.c
@@ -276,7 +276,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif
- mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
+ mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
@@ -357,7 +357,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_page_set_in_full(page, false); // to get the right queue
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_set_in_full(page, true);
- mi_page_queue_enqueue_from(pq, pqfull, page);
+ mi_page_queue_enqueue_from_full(pq, pqfull, page);
}
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
@@ -403,6 +403,27 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
_mi_segment_page_abandon(page,segments_tld);
}
+// force abandon a page
+void _mi_page_force_abandon(mi_page_t* page) {
+ mi_heap_t* heap = mi_page_heap(page);
+ // mark page as not using delayed free
+ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
+
+ // ensure this page is no longer in the heap delayed free list
+ _mi_heap_delayed_free_all(heap);
+ // We can still access the page meta-info even if it is freed as we ensure
+ // in `mi_segment_force_abandon` that the segment is not freed (yet)
+ if (page->capacity == 0) return; // it may have been freed now
+
+ // and now unlink it from the page queue and abandon (or free)
+ mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
+ if (mi_page_all_free(page)) {
+ _mi_page_free(page, pq, false);
+ }
+ else {
+ _mi_page_abandon(page, pq);
+ }
+}
// Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
@@ -448,6 +469,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
+ #if MI_RETIRE_CYCLES > 0
const size_t bsize = mi_page_block_size(page);
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue?
@@ -463,7 +485,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
return; // don't free after all
}
}
-
+ #endif
_mi_page_free(page, pq, false);
}
@@ -709,6 +731,17 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
Find pages with free blocks
-------------------------------------------------------------*/
+// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
+#define MI_MAX_CANDIDATE_SEARCH (4)
+
+// is the page not yet used up to its reserved space?
+static bool mi_page_is_expandable(const mi_page_t* page) {
+ mi_assert_internal(page != NULL);
+ mi_assert_internal(page->capacity <= page->reserved);
+ return (page->capacity < page->reserved);
+}
+
+
// Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
@@ -716,39 +749,77 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
#if MI_STAT
size_t count = 0;
#endif
+ size_t candidate_count = 0; // we reset this on the first candidate to limit the search
+ mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first;
+
while (page != NULL)
{
mi_page_t* next = page->next; // remember next
#if MI_STAT
count++;
#endif
+ candidate_count++;
- // 0. collect freed blocks by us and other threads
+ // collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
- // 1. if the page contains free blocks, we are done
- if (mi_page_immediate_available(page)) {
+ #if MI_MAX_CANDIDATE_SEARCH > 1
+ // search up to N pages for a best candidate
+
+ // is the local free list non-empty?
+ const bool immediate_available = mi_page_immediate_available(page);
+
+ // if the page is completely full, move it to the `mi_pages_full`
+ // queue so we don't visit long-lived pages too often.
+ if (!immediate_available && !mi_page_is_expandable(page)) {
+ mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
+ mi_page_to_full(page, pq);
+ }
+ else {
+ // the page has free space, make it a candidate
+ // we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
+ if (page_candidate == NULL) {
+ page_candidate = page;
+ candidate_count = 0;
+ }
+ // prefer to reuse fuller pages (in the hope the less used page gets freed)
+ else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page) && !mi_page_is_expandable(page)) {
+ page_candidate = page;
+ }
+ // if we find a non-expandable candidate, or searched for N pages, return with the best candidate
+ if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
+ mi_assert_internal(page_candidate!=NULL);
+ break;
+ }
+ }
+ #else
+ // first-fit algorithm
+ // If the page contains free blocks, we are done
+ if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
break; // pick this one
}
- // 2. Try to extend
- if (page->capacity < page->reserved) {
- mi_page_extend_free(heap, page, heap->tld);
- mi_assert_internal(mi_page_immediate_available(page));
- break;
- }
-
- // 3. If the page is completely full, move it to the `mi_pages_full`
+ // If the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often.
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
+ #endif
page = next;
} // for each page
mi_heap_stat_counter_increase(heap, searches, count);
+ // set the page to the best candidate
+ if (page_candidate != NULL) {
+ page = page_candidate;
+ }
+ if (page != NULL && !mi_page_immediate_available(page)) {
+ mi_assert_internal(mi_page_is_expandable(page));
+ mi_page_extend_free(heap, page, heap->tld);
+ }
+
if (page == NULL) {
_mi_heap_collect_retired(heap, false); // perhaps make a page available
page = mi_page_fresh(heap, pq);
@@ -758,10 +829,14 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
}
else {
- mi_assert(pq->first == page);
+ // move the page to the front of the queue
+ mi_page_queue_move_to_front(heap, pq, page);
page->retire_expire = 0;
+ // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
+
+
return page;
}
@@ -769,7 +844,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// Find a page with free blocks of `size`.
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
- mi_page_queue_t* pq = mi_page_queue(heap,size);
+ mi_page_queue_t* pq = mi_page_queue(heap, size);
+
+ // check the first page: we even do this with candidate search or otherwise we re-search every time
mi_page_t* page = pq->first;
if (page != NULL) {
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
@@ -788,6 +865,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
return page; // fast path
}
}
+
return mi_page_queue_find_free_ex(heap, pq, true);
}
@@ -912,7 +990,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_page_block_size(page) >= size);
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
- if mi_unlikely(zero && page->block_size == 0) {
+ if mi_unlikely(zero && mi_page_is_huge(page)) {
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
void* p = _mi_page_malloc(heap, page, size);
mi_assert_internal(p != NULL);
diff --git a/src/prim/emscripten/prim.c b/src/prim/emscripten/prim.c
index 944c0cb4..82147de7 100644
--- a/src/prim/emscripten/prim.c
+++ b/src/prim/emscripten/prim.c
@@ -71,8 +71,8 @@ int _mi_prim_free(void* addr, size_t size) {
extern void* emmalloc_memalign(size_t alignment, size_t size);
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
- MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit);
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+ MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr);
*is_large = false;
// TODO: Track the highest address ever seen; first uses of it are zeroes.
// That assumes no one else uses sbrk but us (they could go up,
diff --git a/src/prim/osx/alloc-override-zone.c b/src/prim/osx/alloc-override-zone.c
index 1515b886..d3af170d 100644
--- a/src/prim/osx/alloc-override-zone.c
+++ b/src/prim/osx/alloc-override-zone.c
@@ -418,9 +418,9 @@ static inline malloc_zone_t* mi_get_default_zone(void)
}
#if defined(__clang__)
-__attribute__((constructor(0)))
+__attribute__((constructor(101))) // highest priority
#else
-__attribute__((constructor)) // seems not supported by g++-11 on the M1
+__attribute__((constructor)) // priority level is not supported by gcc
#endif
__attribute__((used))
static void _mi_macos_override_malloc(void) {
diff --git a/src/prim/prim.c b/src/prim/prim.c
index 3b7d3736..2002853f 100644
--- a/src/prim/prim.c
+++ b/src/prim/prim.c
@@ -25,3 +25,52 @@ terms of the MIT license. A copy of the license can be found in the file
#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)
#endif
+
+// Generic process initialization
+#ifndef MI_PRIM_HAS_PROCESS_ATTACH
+#if defined(__GNUC__) || defined(__clang__)
+ // gcc,clang: use the constructor/destructor attribute
+ // which for both seem to run before regular constructors/destructors
+ #if defined(__clang__)
+ #define mi_attr_constructor __attribute__((constructor(101)))
+ #define mi_attr_destructor __attribute__((destructor(101)))
+ #else
+ #define mi_attr_constructor __attribute__((constructor))
+ #define mi_attr_destructor __attribute__((destructor))
+ #endif
+ static void mi_attr_constructor mi_process_attach(void) {
+ _mi_process_load();
+ }
+ static void mi_attr_destructor mi_process_detach(void) {
+ _mi_process_done();
+ }
+#elif defined(__cplusplus)
+ // C++: use static initialization to detect process start/end
+ // This is not guaranteed to be first/last but the best we can generally do?
+ struct mi_init_done_t {
+ mi_init_done_t() {
+ _mi_process_load();
+ }
+ ~mi_init_done_t() {
+ _mi_process_done();
+ }
+ };
+ static mi_init_done_t mi_init_done;
+ #else
+ #pragma message("define a way to call _mi_process_load/done on your platform")
+#endif
+#endif
+
+// Generic allocator init/done callback
+#ifndef MI_PRIM_HAS_ALLOCATOR_INIT
+bool _mi_is_redirected(void) {
+ return false;
+}
+bool _mi_allocator_init(const char** message) {
+ if (message != NULL) { *message = NULL; }
+ return true;
+}
+void _mi_allocator_done(void) {
+ // nothing to do
+}
+#endif
diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c
index 78080e88..e61904d8 100644
--- a/src/prim/unix/prim.c
+++ b/src/prim/unix/prim.c
@@ -27,6 +27,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include // mmap
#include // sysconf
#include // open, close, read, access
+#include // getenv, arc4random_buf
#if defined(__linux__)
#include
@@ -139,6 +140,12 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
if (psize > 0) {
config->page_size = (size_t)psize;
config->alloc_granularity = (size_t)psize;
+ #if defined(_SC_PHYS_PAGES)
+ long pphys = sysconf(_SC_PHYS_PAGES);
+ if (pphys > 0 && (size_t)pphys < (SIZE_MAX/(size_t)psize)) {
+ config->physical_memory = (size_t)pphys * (size_t)psize;
+ }
+ #endif
}
config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
config->has_overcommit = unix_detect_overcommit();
@@ -181,10 +188,11 @@ int _mi_prim_free(void* addr, size_t size ) {
static int unix_madvise(void* addr, size_t size, int advice) {
#if defined(__sun)
- return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520)
+ int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520)
#else
- return madvise(addr, size, advice);
+ int res = madvise(addr, size, advice);
#endif
+ return (res==0 ? 0 : errno);
}
static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
@@ -331,7 +339,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
// when large OS pages are enabled for mimalloc, we call `madvise` anyways.
if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) {
- *is_large = true; // possibly
+ // *is_large = true; // possibly
};
}
#elif defined(__sun)
@@ -340,7 +348,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
cmd.mha_pagesize = _mi_os_large_page_size();
cmd.mha_cmd = MHA_MAPSIZE_VA;
if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
- *is_large = true;
+ // *is_large = true; // possibly
}
}
#endif
@@ -350,14 +358,14 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
}
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(commit || !allow_large);
mi_assert_internal(try_alignment > 0);
*is_zero = true;
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
- *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
+ *addr = unix_mmap(hint_addr, size, try_alignment, protect_flags, false, allow_large, is_large);
return (*addr != NULL ? 0 : errno);
}
@@ -773,7 +781,6 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
defined(__sun) || \
(defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7))
-#include
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
arc4random_buf(buf, buf_len);
return true;
diff --git a/src/prim/wasi/prim.c b/src/prim/wasi/prim.c
index 5d7a8132..e1e7de5e 100644
--- a/src/prim/wasi/prim.c
+++ b/src/prim/wasi/prim.c
@@ -119,8 +119,8 @@ static void* mi_prim_mem_grow(size_t size, size_t try_alignment) {
}
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
- MI_UNUSED(allow_large); MI_UNUSED(commit);
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+ MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr);
*is_large = false;
*is_zero = false;
*addr = mi_prim_mem_grow(size, try_alignment);
diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c
index 22f787de..745224c2 100644
--- a/src/prim/windows/prim.c
+++ b/src/prim/windows/prim.c
@@ -118,6 +118,18 @@ void _mi_prim_mem_init( mi_os_mem_config_t* config )
GetSystemInfo(&si);
if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; }
if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; }
+ // get virtual address bits
+ if ((uintptr_t)si.lpMaximumApplicationAddress > 0) {
+ const size_t vbits = MI_INTPTR_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress);
+ config->virtual_address_bits = vbits;
+ }
+ // get physical memory
+ ULONGLONG memInKiB = 0;
+ if (GetPhysicallyInstalledSystemMemory(&memInKiB)) {
+ if (memInKiB > 0 && memInKiB < (SIZE_MAX / MI_KiB)) {
+ config->physical_memory = memInKiB * MI_KiB;
+ }
+ }
// get the VirtualAlloc2 function
HINSTANCE hDll;
hDll = LoadLibrary(TEXT("kernelbase.dll"));
@@ -191,7 +203,7 @@ static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_ali
}
#endif
// on modern Windows try use VirtualAlloc2 for aligned allocation
- if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
reqs.Alignment = try_alignment;
MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
@@ -279,14 +291,14 @@ static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DW
return p;
}
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(commit || !allow_large);
mi_assert_internal(try_alignment > 0);
*is_zero = true;
int flags = MEM_RESERVE;
if (commit) { flags |= MEM_COMMIT; }
- *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
+ *addr = win_virtual_alloc(hint_addr, size, try_alignment, flags, false, allow_large, is_large);
return (*addr != NULL ? 0 : (int)GetLastError());
}
@@ -499,8 +511,7 @@ void _mi_prim_process_info(mi_process_info_t* pinfo)
}
// get process info
- PROCESS_MEMORY_COUNTERS info;
- memset(&info, 0, sizeof(info));
+ PROCESS_MEMORY_COUNTERS info; _mi_memzero_var(info);
if (pGetProcessMemoryInfo != NULL) {
pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
}
@@ -602,59 +613,205 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
#endif // MI_USE_RTLGENRANDOM
+
+
//----------------------------------------------------------------
-// Thread init/done
+// Process & Thread Init/Done
//----------------------------------------------------------------
-#if !defined(MI_SHARED_LIB)
-
-// use thread local storage keys to detect thread ending
-// note: another design could be to use special linker sections (see issue #869)
-#include
-#if (_WIN32_WINNT < 0x600) // before Windows Vista
-WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
-WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
-WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
-WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
-#endif
-
-static DWORD mi_fls_key = (DWORD)(-1);
-
-static void NTAPI mi_fls_done(PVOID value) {
- mi_heap_t* heap = (mi_heap_t*)value;
- if (heap != NULL) {
- _mi_thread_done(heap);
- FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672
+static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
+ MI_UNUSED(reserved);
+ MI_UNUSED(module);
+ #if MI_TLS_SLOT >= 2
+ if ((reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) && mi_prim_get_default_heap() == NULL) {
+ _mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty);
}
+ #endif
+ if (reason==DLL_PROCESS_ATTACH) {
+ _mi_process_load();
+ }
+ else if (reason==DLL_PROCESS_DETACH) {
+ _mi_process_done();
+ }
+ else if (reason==DLL_THREAD_DETACH && !_mi_is_redirected()) {
+ _mi_thread_done(NULL);
+ }
}
-void _mi_prim_thread_init_auto_done(void) {
- mi_fls_key = FlsAlloc(&mi_fls_done);
-}
-void _mi_prim_thread_done_auto_done(void) {
- // call thread-done on all threads (except the main thread) to prevent
- // dangling callback pointer if statically linked with a DLL; Issue #208
- FlsFree(mi_fls_key);
-}
+#if defined(MI_SHARED_LIB)
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
-void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
- mi_assert_internal(mi_fls_key != (DWORD)(-1));
- FlsSetValue(mi_fls_key, heap);
-}
+ // Windows DLL: easy to hook into process_init and thread_done
+ __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
+ mi_win_main((PVOID)inst,reason,reserved);
+ return TRUE;
+ }
-#else
+ // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event.
+ void _mi_prim_thread_init_auto_done(void) { }
+ void _mi_prim_thread_done_auto_done(void) { }
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+ }
-// Dll; nothing to do as in that case thread_done is handled through the DLL_THREAD_DETACH event.
+#elif !defined(MI_WIN_USE_FLS)
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
-void _mi_prim_thread_init_auto_done(void) {
-}
+ static void NTAPI mi_win_main_attach(PVOID module, DWORD reason, LPVOID reserved) {
+ if (reason == DLL_PROCESS_ATTACH || reason == DLL_THREAD_ATTACH) {
+ mi_win_main(module, reason, reserved);
+ }
+ }
+ static void NTAPI mi_win_main_detach(PVOID module, DWORD reason, LPVOID reserved) {
+ if (reason == DLL_PROCESS_DETACH || reason == DLL_THREAD_DETACH) {
+ mi_win_main(module, reason, reserved);
+ }
+ }
-void _mi_prim_thread_done_auto_done(void) {
-}
+ // Set up TLS callbacks in a statically linked library by using special data sections.
+ // See
+ // We use 2 entries to ensure we call attach events before constructors
+ // are called, and detach events after destructors are called.
+ #if defined(__cplusplus)
+ extern "C" {
+ #endif
-void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
- MI_UNUSED(heap);
-}
+ #if defined(_WIN64)
+ #pragma comment(linker, "/INCLUDE:_tls_used")
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback_pre")
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback_post")
+ #pragma const_seg(".CRT$XLB")
+ extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[];
+ const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach };
+ #pragma const_seg()
+ #pragma const_seg(".CRT$XLY")
+ extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[];
+ const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach };
+ #pragma const_seg()
+ #else
+ #pragma comment(linker, "/INCLUDE:__tls_used")
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback_pre")
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback_post")
+ #pragma data_seg(".CRT$XLB")
+ PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach };
+ #pragma data_seg()
+ #pragma data_seg(".CRT$XLY")
+ PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach };
+ #pragma data_seg()
+ #endif
+ #if defined(__cplusplus)
+ }
+ #endif
+
+ // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event.
+ void _mi_prim_thread_init_auto_done(void) { }
+ void _mi_prim_thread_done_auto_done(void) { }
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+ }
+
+#else // deprecated: statically linked, use fiber api
+
+ #if defined(_MSC_VER) // on clang/gcc use the constructor attribute (in `src/prim/prim.c`)
+ // MSVC: use data section magic for static libraries
+ // See
+ #define MI_PRIM_HAS_PROCESS_ATTACH 1
+
+ static int mi_process_attach(void) {
+ mi_win_main(NULL,DLL_PROCESS_ATTACH,NULL);
+ atexit(&_mi_process_done);
+ return 0;
+ }
+ typedef int(*mi_crt_callback_t)(void);
+ #if defined(_WIN64)
+ #pragma comment(linker, "/INCLUDE:_mi_tls_callback")
+ #pragma section(".CRT$XIU", long, read)
+ #else
+ #pragma comment(linker, "/INCLUDE:__mi_tls_callback")
+ #endif
+ #pragma data_seg(".CRT$XIU")
+ mi_decl_externc mi_crt_callback_t _mi_tls_callback[] = { &mi_process_attach };
+ #pragma data_seg()
+ #endif
+
+ // use the fiber api for calling `_mi_thread_done`.
+ #include
+ #if (_WIN32_WINNT < 0x600) // before Windows Vista
+ WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
+ WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
+ WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
+ WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
+ #endif
+
+ static DWORD mi_fls_key = (DWORD)(-1);
+
+ static void NTAPI mi_fls_done(PVOID value) {
+ mi_heap_t* heap = (mi_heap_t*)value;
+ if (heap != NULL) {
+ _mi_thread_done(heap);
+ FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672
+ }
+ }
+
+ void _mi_prim_thread_init_auto_done(void) {
+ mi_fls_key = FlsAlloc(&mi_fls_done);
+ }
+
+ void _mi_prim_thread_done_auto_done(void) {
+ // call thread-done on all threads (except the main thread) to prevent
+ // dangling callback pointer if statically linked with a DLL; Issue #208
+ FlsFree(mi_fls_key);
+ }
+
+ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ mi_assert_internal(mi_fls_key != (DWORD)(-1));
+ FlsSetValue(mi_fls_key, heap);
+ }
+#endif
+
+// ----------------------------------------------------
+// Communicate with the redirection module on Windows
+// ----------------------------------------------------
+#if defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
+ #define MI_PRIM_HAS_ALLOCATOR_INIT 1
+
+ static bool mi_redirected = false; // true if malloc redirects to mi_malloc
+
+ bool _mi_is_redirected(void) {
+ return mi_redirected;
+ }
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+ mi_decl_export void _mi_redirect_entry(DWORD reason) {
+ // called on redirection; careful as this may be called before DllMain
+ #if MI_TLS_SLOT >= 2
+ if ((reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) && mi_prim_get_default_heap() == NULL) {
+ _mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty);
+ }
+ #endif
+ if (reason == DLL_PROCESS_ATTACH) {
+ mi_redirected = true;
+ }
+ else if (reason == DLL_PROCESS_DETACH) {
+ mi_redirected = false;
+ }
+ else if (reason == DLL_THREAD_DETACH) {
+ _mi_thread_done(NULL);
+ }
+ }
+ __declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
+ __declspec(dllimport) void mi_cdecl mi_allocator_done(void);
+ #ifdef __cplusplus
+ }
+ #endif
+ bool _mi_allocator_init(const char** message) {
+ return mi_allocator_init(message);
+ }
+ void _mi_allocator_done(void) {
+ mi_allocator_done();
+ }
#endif
diff --git a/src/segment-map.c b/src/segment-map.c
index 8927a8bd..ce754e98 100644
--- a/src/segment-map.c
+++ b/src/segment-map.c
@@ -22,7 +22,7 @@ terms of the MIT license. A copy of the license can be found in the file
#elif (MI_INTPTR_SIZE > 4)
#define MI_SEGMENT_MAP_MAX_ADDRESS (48*1024ULL*MI_GiB) // 48 TiB
#else
-#define MI_SEGMENT_MAP_MAX_ADDRESS (MAX_UINT32)
+#define MI_SEGMENT_MAP_MAX_ADDRESS (UINT32_MAX)
#endif
#define MI_SEGMENT_MAP_PART_SIZE (MI_INTPTR_SIZE*MI_KiB - 128) // 128 > sizeof(mi_memid_t) !
@@ -55,11 +55,12 @@ static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bo
if (part == NULL) {
if (!create_on_demand) return NULL;
mi_memid_t memid;
- part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid, NULL);
+ part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid);
if (part == NULL) return NULL;
+ part->memid = memid;
mi_segmap_part_t* expected = NULL;
if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) {
- _mi_os_free(part, sizeof(mi_segmap_part_t), memid, NULL);
+ _mi_os_free(part, sizeof(mi_segmap_part_t), memid);
part = expected;
if (part == NULL) return NULL;
}
@@ -124,3 +125,12 @@ static bool mi_is_valid_pointer(const void* p) {
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
return mi_is_valid_pointer(p);
}
+
+void _mi_segment_map_unsafe_destroy(void) {
+ for (size_t i = 0; i < MI_SEGMENT_MAP_MAX_PARTS; i++) {
+ mi_segmap_part_t* part = mi_atomic_exchange_ptr_relaxed(mi_segmap_part_t, &mi_segment_map[i], NULL);
+ if (part != NULL) {
+ _mi_os_free(part, sizeof(mi_segmap_part_t), part->memid);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/segment.c b/src/segment.c
index b9bdb9b7..3f23374b 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -189,7 +189,7 @@ static void mi_segment_protect_range(void* p, size_t size, bool protect) {
}
}
-static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* tld) {
+static void mi_segment_protect(mi_segment_t* segment, bool protect) {
// add/remove guard pages
if (MI_SECURE != 0) {
// in secure mode, we set up a protected page in between the segment info and the page data
@@ -207,7 +207,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
if (protect && !segment->memid.initially_committed) {
if (protect) {
// ensure secure page is committed
- if (_mi_os_commit(start, os_psize, NULL, tld->stats)) { // if this fails that is ok (as it is an unaccessible page)
+ if (_mi_os_commit(start, os_psize, NULL)) { // if this fails that is ok (as it is an unaccessible page)
mi_segment_protect_range(start, os_psize, protect);
}
}
@@ -241,23 +241,23 @@ static void mi_page_purge(mi_segment_t* segment, mi_page_t* page, mi_segments_tl
if (!segment->allow_purge) return;
mi_assert_internal(page->used == 0);
mi_assert_internal(page->free == NULL);
- mi_assert_expensive(!mi_pages_purge_contains(page, tld));
+ mi_assert_expensive(!mi_pages_purge_contains(page, tld)); MI_UNUSED(tld);
size_t psize;
void* start = mi_segment_raw_page_start(segment, page, &psize);
- const bool needs_recommit = _mi_os_purge(start, psize, tld->stats);
+ const bool needs_recommit = _mi_os_purge(start, psize);
if (needs_recommit) { page->is_committed = false; }
}
static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
if (page->is_committed) return true;
mi_assert_internal(segment->allow_decommit);
- mi_assert_expensive(!mi_pages_purge_contains(page, tld));
+ mi_assert_expensive(!mi_pages_purge_contains(page, tld)); MI_UNUSED(tld);
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
- bool ok = _mi_os_commit(start, psize + gsize, &is_zero, tld->stats);
+ bool ok = _mi_os_commit(start, psize + gsize, &is_zero);
if (!ok) return false; // failed to commit!
page->is_committed = true;
page->used = 0;
@@ -436,6 +436,8 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
mi_assert_internal((uintptr_t)p % block_size == 0);
}
}
+ mi_assert_internal(_mi_is_aligned(p, MI_MAX_ALIGN_SIZE));
+ mi_assert_internal(block_size == 0 || block_size > MI_MAX_ALIGN_GUARANTEE || _mi_is_aligned(p,block_size));
if (page_size != NULL) *page_size = psize;
mi_assert_internal(_mi_ptr_page(p) == page);
@@ -446,13 +448,18 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
{
- const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
+ const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
size_t guardsize = 0;
size_t isize = 0;
+
if (MI_SECURE == 0) {
// normally no guard pages
+ #if MI_GUARDED
+ isize = _mi_align_up(minsize, _mi_os_page_size());
+ #else
isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
+ #endif
}
else {
// in secure mode, we set up a protected page in between the segment info
@@ -460,7 +467,7 @@ static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_
const size_t page_size = _mi_os_page_size();
isize = _mi_align_up(minsize, page_size);
guardsize = page_size;
- required = _mi_align_up(required, page_size);
+ //required = _mi_align_up(required, isize + guardsize);
}
if (info_size != NULL) *info_size = isize;
@@ -495,7 +502,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
if (MI_SECURE != 0) {
mi_assert_internal(!segment->memid.is_pinned);
- mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
+ mi_segment_protect(segment, false); // ensure no more guard pages are set
}
bool fully_committed = true;
@@ -509,7 +516,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
MI_UNUSED(fully_committed);
mi_assert_internal((fully_committed && committed_size == segment_size) || (!fully_committed && committed_size < segment_size));
- _mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
+ _mi_arena_free(segment, segment_size, committed_size, segment->memid);
}
// called from `heap_collect`.
@@ -530,7 +537,7 @@ void _mi_segments_collect(bool force, mi_segments_tld_t* tld) {
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id,
size_t pre_size, size_t info_size, bool commit, size_t segment_size,
- mi_segments_tld_t* tld, mi_os_tld_t* tld_os)
+ mi_segments_tld_t* tld)
{
mi_memid_t memid;
bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
@@ -542,7 +549,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
segment_size = segment_size + (align_offset - pre_size); // adjust the segment size
}
- mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os);
+ mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid);
if (segment == NULL) {
return NULL; // failed to allocate
}
@@ -550,10 +557,10 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
if (!memid.initially_committed) {
// ensure the initial info is committed
mi_assert_internal(!memid.is_pinned);
- bool ok = _mi_os_commit(segment, pre_size, NULL, tld_os->stats);
+ bool ok = _mi_os_commit(segment, pre_size, NULL);
if (!ok) {
// commit failed; we cannot touch the memory: free the segment directly and return `NULL`
- _mi_arena_free(segment, segment_size, 0, memid, tld_os->stats);
+ _mi_arena_free(segment, segment_size, 0, memid);
return NULL;
}
}
@@ -571,7 +578,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, size_t page_alignment,
- mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+ mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
{
// required is only > 0 for huge page allocations
mi_assert_internal((required > 0 && page_kind > MI_PAGE_LARGE)|| (required==0 && page_kind <= MI_PAGE_LARGE));
@@ -603,7 +610,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
const bool init_commit = eager; // || (page_kind >= MI_PAGE_LARGE);
// Allocate the segment from the OS (segment_size can change due to alignment)
- mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, init_commit, init_segment_size, tld, os_tld);
+ mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, init_commit, init_segment_size, tld);
if (segment == NULL) return NULL;
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
mi_assert_internal(segment->memid.is_pinned ? segment->memid.initially_committed : true);
@@ -631,7 +638,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
segment->cookie = _mi_ptr_cookie(segment);
// set protection
- mi_segment_protect(segment, true, tld->os);
+ mi_segment_protect(segment, true);
// insert in free lists for small and medium pages
if (page_kind <= MI_PAGE_MEDIUM) {
@@ -645,6 +652,10 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
MI_UNUSED(force);
mi_assert(segment != NULL);
+
+ // in `mi_segment_force_abandon` we set this to true to ensure the segment's memory stays valid
+ if (segment->dont_free) return;
+
// don't purge as we are freeing now
mi_segment_remove_all_purges(segment, false /* don't force as we are about to free */, tld);
mi_segment_remove_from_free_queue(segment, tld);
@@ -945,6 +956,9 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
+ const long target = _mi_option_get_fast(mi_option_target_segments_per_thread);
+ if (target > 0 && (size_t)target <= heap->tld->segments.count) return false; // don't reclaim if going above the target count
+
// don't reclaim more from a `free` call than half the current segments
// this is to prevent a pure free-ing thread to start owning too many segments
// (but not for out-of-arena segments as that is the main way to be reclaimed for those)
@@ -969,6 +983,13 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
_mi_arena_field_cursor_done(¤t);
}
+
+static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) {
+ const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024);
+ if (ptarget != NULL) { *ptarget = target; }
+ return (target == 0 || tld->count < target);
+}
+
static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) {
// limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries.
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
@@ -991,7 +1012,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
mi_segment_t* segment = NULL;
mi_arena_field_cursor_t current;
_mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t);
- while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
+ while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL))
{
mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process
segment->abandoned_visits++;
@@ -1016,8 +1037,8 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
break;
}
- else if (segment->abandoned_visits >= 3 && is_suitable) {
- // always reclaim on 3rd visit to limit the list length.
+ else if (segment->abandoned_visits > 3 && is_suitable) {
+ // always reclaim on 3rd visit to limit the abandoned segment count.
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
else {
@@ -1031,15 +1052,104 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
}
+/* -----------------------------------------------------------
+ Force abandon a segment that is in use by our thread
+----------------------------------------------------------- */
+
+// force abandon a segment
+static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* tld)
+{
+ mi_assert_internal(segment->abandoned < segment->used);
+ mi_assert_internal(!segment->dont_free);
+
+ // ensure the segment does not get free'd underneath us (so we can check if a page has been freed in `mi_page_force_abandon`)
+ segment->dont_free = true;
+
+ // for all pages
+ for (size_t i = 0; i < segment->capacity; i++) {
+ mi_page_t* page = &segment->pages[i];
+ if (page->segment_in_use) {
+ // abandon the page if it is still in-use (this will free the page if possible as well (but not our segment))
+ mi_assert_internal(segment->used > 0);
+ if (segment->used == segment->abandoned+1) {
+ // the last page.. abandon and return as the segment will be abandoned after this
+ // and we should no longer access it.
+ segment->dont_free = false;
+ _mi_page_force_abandon(page);
+ return;
+ }
+ else {
+ // abandon and continue
+ _mi_page_force_abandon(page);
+ }
+ }
+ }
+ segment->dont_free = false;
+ mi_assert(segment->used == segment->abandoned);
+ mi_assert(segment->used == 0);
+ if (segment->used == 0) { // paranoia
+ // all free now
+ mi_segment_free(segment, false, tld);
+ }
+ else {
+ // perform delayed purges
+ mi_pages_try_purge(false /* force? */, tld);
+ }
+}
+
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) {
+ if (target <= 1) return;
+ const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75%
+ // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages
+ for (int i = 0; i < 64 && tld->count >= min_target; i++) {
+ mi_page_t* page = heap->pages[MI_BIN_FULL].first;
+ while (page != NULL && mi_page_is_huge(page)) {
+ page = page->next;
+ }
+ if (page==NULL) {
+ break;
+ }
+ mi_segment_t* segment = _mi_page_segment(page);
+ mi_segment_force_abandon(segment, tld);
+ mi_assert_internal(page != heap->pages[MI_BIN_FULL].first); // as it is just abandoned
+ }
+}
+
+// try abandon segments.
+// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use.
+static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) {
+ // we call this when we are about to add a fresh segment so we should be under our target segment count.
+ size_t target = 0;
+ if (segment_count_is_within_target(tld, &target)) return;
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
+void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
+ mi_collect(true);
+ mi_heap_t* heap = mi_heap_get_default();
+ mi_segments_tld_t* tld = &heap->tld->segments;
+ size_t target = target_size / MI_SEGMENT_SIZE;
+ if (target == 0) {
+ target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024);
+ }
+ mi_segments_try_abandon_to_target(heap, target, tld);
+}
+
/* -----------------------------------------------------------
Reclaim or allocate
----------------------------------------------------------- */
-static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld)
{
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
+ // try to abandon some segments to increase reuse between threads
+ mi_segments_try_abandon(heap,tld);
+
// 1. try to reclaim an abandoned segment
bool reclaimed;
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
@@ -1054,7 +1164,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
return segment;
}
// 2. otherwise allocate a fresh segment
- return mi_segment_alloc(0, page_kind, page_shift, 0, heap->arena_id, tld, os_tld);
+ return mi_segment_alloc(0, page_kind, page_shift, 0, heap->arena_id, tld);
}
@@ -1093,11 +1203,11 @@ static mi_page_t* mi_segment_page_try_alloc_in_queue(mi_heap_t* heap, mi_page_ki
return NULL;
}
-static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
+static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld) {
mi_page_t* page = mi_segment_page_try_alloc_in_queue(heap, kind, tld);
if (page == NULL) {
// possibly allocate or reclaim a fresh segment
- mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld);
+ mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld);
if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
mi_assert_internal(segment->page_kind==kind);
mi_assert_internal(segment->used < segment->capacity);
@@ -1112,20 +1222,20 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p
return page;
}
-static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld);
+static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
+ return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld);
}
-static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld, os_tld);
+static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
+ return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld);
}
/* -----------------------------------------------------------
large page allocation
----------------------------------------------------------- */
-static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld);
+static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) {
+ mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld);
if (segment == NULL) return NULL;
mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page != NULL);
@@ -1135,9 +1245,9 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size
return page;
}
-static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
{
- mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, req_arena_id, tld, os_tld);
+ mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, req_arena_id, tld);
if (segment == NULL) return NULL;
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
#if MI_HUGE_PAGE_ABANDON
@@ -1161,7 +1271,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
- _mi_os_reset(decommit_start, decommit_size, os_tld->stats); // do not decommit as it may be in a region
+ _mi_os_reset(decommit_start, decommit_size); // do not decommit as it may be in a region
}
return page;
@@ -1208,7 +1318,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
if (usize > sizeof(mi_block_t)) {
usize = usize - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
- _mi_os_reset(p, usize, &_mi_stats_main);
+ _mi_os_reset(p, usize);
}
}
}
@@ -1218,26 +1328,26 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
Page allocation
----------------------------------------------------------- */
-mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) {
mi_page_t* page;
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
mi_assert_internal(_mi_is_power_of_two(page_alignment));
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
//mi_assert_internal((MI_SEGMENT_SIZE % page_alignment) == 0);
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
- page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld);
+ page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld);
}
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
- page = mi_segment_small_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segment_small_page_alloc(heap, block_size, tld);
}
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
- page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segment_medium_page_alloc(heap, block_size, tld);
}
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX /* || mi_is_good_fit(block_size, MI_LARGE_PAGE_SIZE - sizeof(mi_segment_t)) */ ) {
- page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segment_large_page_alloc(heap, block_size, tld);
}
else {
- page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld);
+ page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld);
}
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
diff --git a/src/stats.c b/src/stats.c
index 99cf89c5..8566e8f2 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -26,7 +26,7 @@ static bool mi_is_in_main(void* stat) {
static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
if (amount == 0) return;
- if (mi_is_in_main(stat))
+ if mi_unlikely(mi_is_in_main(stat))
{
// add atomically (for abandoned pages)
int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount);
@@ -51,6 +51,27 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
}
}
+// Adjust stats to compensate; for example before committing a range,
+// first adjust downwards with parts that were already committed so
+// we avoid double counting.
+static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
+ if (amount == 0) return;
+ if mi_unlikely(mi_is_in_main(stat))
+ {
+ // adjust atomically
+ mi_atomic_addi64_relaxed(&stat->current, amount);
+ mi_atomic_addi64_relaxed(&stat->allocated, amount);
+ mi_atomic_addi64_relaxed(&stat->freed, amount);
+ }
+ else {
+ // don't affect the peak
+ stat->current += amount;
+ // add to both
+ stat->allocated += amount;
+ stat->freed += amount;
+ }
+}
+
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
if (mi_is_in_main(stat)) {
mi_atomic_addi64_relaxed( &stat->count, 1 );
@@ -70,6 +91,14 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
mi_stat_update(stat, -((int64_t)amount));
}
+void _mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount) {
+ mi_stat_adjust(stat, (int64_t)amount);
+}
+
+void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
+ mi_stat_adjust(stat, -((int64_t)amount));
+}
+
// must be thread safe as it is called from stats_merge
static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) {
if (stat==src) return;
@@ -118,6 +147,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
mi_stat_counter_add(&stats->searches, &src->searches, 1);
mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1);
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
+ mi_stat_counter_add(&stats->guarded_alloc_count, &src->guarded_alloc_count, 1);
#if MI_STAT>1
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) {
@@ -342,6 +372,7 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
mi_stat_counter_print(&stats->commit_calls, "commits", out, arg);
mi_stat_counter_print(&stats->reset_calls, "resets", out, arg);
mi_stat_counter_print(&stats->purge_calls, "purges", out, arg);
+ mi_stat_counter_print(&stats->guarded_alloc_count, "guarded", out, arg);
mi_stat_print(&stats->threads, "threads", -1, out, arg);
mi_stat_counter_print_avg(&stats->searches, "searches", out, arg);
_mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count());
diff --git a/test/main-override-static.c b/test/main-override-static.c
index bf1cc416..34860717 100644
--- a/test/main-override-static.c
+++ b/test/main-override-static.c
@@ -7,10 +7,13 @@
#include
#include // redefines malloc etc.
+static void mi_bins(void);
+
static void double_free1();
static void double_free2();
static void corrupt_free();
static void block_overflow1();
+static void block_overflow2();
static void invalid_free();
static void test_aslr(void);
static void test_process_info(void);
@@ -18,16 +21,21 @@ static void test_reserved(void);
static void negative_stat(void);
static void alloc_huge(void);
static void test_heap_walk(void);
+static void test_canary_leak(void);
+// static void test_large_pages(void);
int main() {
mi_version();
mi_stats_reset();
+ // test_large_pages();
// detect double frees and heap corruption
// double_free1();
// double_free2();
// corrupt_free();
// block_overflow1();
+ // block_overflow2();
+ test_canary_leak();
// test_aslr();
// invalid_free();
// test_reserved();
@@ -35,6 +43,9 @@ int main() {
// test_heap_walk();
// alloc_huge();
+ // mi_bins();
+
+
void* p1 = malloc(78);
void* p2 = malloc(24);
free(p1);
@@ -61,7 +72,7 @@ int main() {
//mi_stats_print(NULL);
// test_process_info();
-
+
return 0;
}
@@ -76,6 +87,12 @@ static void block_overflow1() {
free(p);
}
+static void block_overflow2() {
+ uint8_t* p = (uint8_t*)mi_malloc(16);
+ p[17] = 0;
+ free(p);
+}
+
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf
@@ -216,6 +233,50 @@ static void test_heap_walk(void) {
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
}
+static void test_canary_leak(void) {
+ char* p = mi_mallocn_tp(char,23);
+ for(int i = 0; i < 23; i++) {
+ p[i] = '0'+i;
+ }
+ puts(p);
+ free(p);
+}
+
+// Experiment with huge OS pages
+#if 0
+
+#include
+#include
+#include
+#include
+
+static void test_large_pages(void) {
+ mi_memid_t memid;
+
+ #if 0
+ size_t pages_reserved;
+ size_t page_size;
+ uint8_t* p = (uint8_t*)_mi_os_alloc_huge_os_pages(1, -1, 30000, &pages_reserved, &page_size, &memid);
+ const size_t req_size = pages_reserved * page_size;
+ #else
+ const size_t req_size = 64*MI_MiB;
+ uint8_t* p = (uint8_t*)_mi_os_alloc(req_size,&memid,NULL);
+ #endif
+
+ p[0] = 1;
+
+ //_mi_os_protect(p, _mi_os_page_size());
+ //_mi_os_unprotect(p, _mi_os_page_size());
+ //_mi_os_decommit(p, _mi_os_page_size(), NULL);
+ if (madvise(p, req_size, MADV_HUGEPAGE) == 0) {
+ printf("advised huge pages\n");
+ _mi_os_decommit(p, _mi_os_page_size(), NULL);
+ };
+ _mi_os_free(p, req_size, memid, NULL);
+}
+
+#endif
+
// ----------------------------
// bin size experiments
// ------------------------------
@@ -234,11 +295,11 @@ static void test_heap_walk(void) {
static inline uint8_t mi_bsr32(uint32_t x);
#if defined(_MSC_VER)
-#include
+//#include
#include
static inline uint8_t mi_bsr32(uint32_t x) {
uint32_t idx;
- _BitScanReverse((DWORD*)&idx, x);
+ _BitScanReverse(&idx, x);
return idx;
}
#elif defined(__GNUC__) || defined(__clang__)
@@ -262,7 +323,7 @@ static inline uint8_t mi_bsr32(uint32_t x) {
}
#endif
-/*
+
// Bit scan reverse: return the index of the highest bit.
uint8_t _mi_bsr(uintptr_t x) {
if (x == 0) return 0;
@@ -275,7 +336,7 @@ uint8_t _mi_bsr(uintptr_t x) {
# error "define bsr for non-32 or 64-bit platforms"
#endif
}
-*/
+
static inline size_t _mi_wsize_from_size(size_t size) {
@@ -352,11 +413,20 @@ static inline uint8_t _mi_bin4(size_t size) {
return bin;
}
-static size_t _mi_binx4(size_t bsize) {
- if (bsize==0) return 0;
- uint8_t b = mi_bsr32((uint32_t)bsize);
- if (b <= 1) return bsize;
- size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01);
+static size_t _mi_binx4(size_t wsize) {
+ size_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+ else if (wsize <= 8) {
+ // bin = (wsize+1)&~1; // round to double word sizes
+ bin = (uint8_t)wsize;
+ }
+ else {
+ uint8_t b = mi_bsr32((uint32_t)wsize);
+ if (b <= 1) return wsize;
+ bin = ((b << 1) | (wsize >> (b - 1))&0x01) + 3;
+ }
return bin;
}
@@ -368,22 +438,40 @@ static size_t _mi_binx8(size_t bsize) {
return bin;
}
+
+static inline size_t mi_bin(size_t wsize) {
+ uint8_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+ else if (wsize <= 8) {
+ // bin = (wsize+1)&~1; // round to double word sizes
+ bin = (uint8_t)wsize;
+ }
+ else {
+ wsize--;
+ // find the highest bit
+ uint8_t b = (uint8_t)mi_bsr32((uint32_t)wsize); // note: wsize != 0
+ // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
+ // - adjust with 3 because we use do not round the first 8 sizes
+ // which each get an exact bin
+ bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
+ }
+ return bin;
+}
+
+
static void mi_bins(void) {
//printf(" QNULL(1), /* 0 */ \\\n ");
size_t last_bin = 0;
- size_t min_bsize = 0;
- size_t last_bsize = 0;
- for (size_t bsize = 1; bsize < 2*1024; bsize++) {
- size_t size = bsize * 64 * 1024;
- size_t bin = _mi_binx8(bsize);
+ for (size_t wsize = 1; wsize <= (4*1024*1024) / 8 + 1024; wsize++) {
+ size_t bin = mi_bin(wsize);
if (bin != last_bin) {
- printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin);
- //printf("QNULL(%6zd), ", wsize);
- //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin);
+ //printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_wsize, last_wsize, last_bin);
+ printf("QNULL(%6zd), ", wsize-1);
+ if (last_bin%8 == 0) printf("/* %zu */ \\\n ", last_bin);
last_bin = bin;
- min_bsize = bsize;
}
- last_bsize = bsize;
}
}
#endif
diff --git a/test/main-override.cpp b/test/main-override.cpp
index fc7f70f0..c4300420 100644
--- a/test/main-override.cpp
+++ b/test/main-override.cpp
@@ -11,7 +11,7 @@
#include
#include
-#include
+//#include
#include
#ifdef _WIN32
@@ -37,28 +37,35 @@ static void tsan_numa_test(); // issue #414
static void strdup_test(); // issue #445
static void heap_thread_free_huge();
static void test_std_string(); // issue #697
-
+static void test_thread_local(); // issue #944
+// static void test_mixed0(); // issue #942
+static void test_mixed1(); // issue #942
static void test_stl_allocators();
int main() {
- // mi_stats_reset(); // ignore earlier allocations
-
- test_std_string();
+ mi_stats_reset(); // ignore earlier allocations
+ various_tests();
+ test_mixed1();
+
+ //test_std_string();
+ //test_thread_local();
// heap_thread_free_huge();
/*
heap_thread_free_large();
heap_no_delete();
heap_late_free();
padding_shrink();
- various_tests();
+
tsan_numa_test();
+ */
+ /*
strdup_test();
test_stl_allocators();
test_mt_shutdown();
*/
//fail_aslr();
- // mi_stats_print(NULL);
+ mi_stats_print(NULL);
return 0;
}
@@ -101,6 +108,9 @@ static void various_tests() {
t = new (tbuf) Test(42);
t->~Test();
delete[] tbuf;
+
+ const char* ptr = ::_Getdays(); // test _base overrid
+ free((void*)ptr);
}
class Static {
@@ -177,6 +187,89 @@ static void test_stl_allocators() {
#endif
}
+#if 0
+#include
+#include
+#include
+#include
+#include
+#include
+
+static void test_mixed0() {
+ std::vector> numbers(1024 * 1024 * 100);
+ std::vector threads(1);
+
+ std::atomic index{};
+
+ auto start = std::chrono::system_clock::now();
+
+ for (auto& thread : threads) {
+ thread = std::thread{[&index, &numbers]() {
+ while (true) {
+ auto i = index.fetch_add(1, std::memory_order_relaxed);
+ if (i >= numbers.size()) return;
+
+ numbers[i] = std::make_unique(i);
+ }
+ }};
+ }
+
+ for (auto& thread : threads) thread.join();
+
+ auto end = std::chrono::system_clock::now();
+
+ auto duration =
+ std::chrono::duration_cast(end - start);
+ std::cout << "Running on " << threads.size() << " threads took " << duration
+ << std::endl;
+}
+#endif
+
+void asd() {
+ void* p = malloc(128);
+ free(p);
+}
+static void test_mixed1() {
+ std::thread thread(asd);
+ thread.join();
+}
+
+#if 0
+// issue #691
+static char* cptr;
+
+static void* thread1_allocate()
+{
+ cptr = mi_calloc_tp(char,22085632);
+ return NULL;
+}
+
+static void* thread2_free()
+{
+ assert(cptr);
+ mi_free(cptr);
+ cptr = NULL;
+ return NULL;
+}
+
+static void test_large_migrate(void) {
+ auto t1 = std::thread(thread1_allocate);
+ t1.join();
+ auto t2 = std::thread(thread2_free);
+ t2.join();
+ /*
+ pthread_t thread1, thread2;
+
+ pthread_create(&thread1, NULL, &thread1_allocate, NULL);
+ pthread_join(thread1, NULL);
+
+ pthread_create(&thread2, NULL, &thread2_free, NULL);
+ pthread_join(thread2, NULL);
+ */
+ return;
+}
+#endif
+
// issue 445
static void strdup_test() {
#ifdef _MSC_VER
@@ -312,3 +405,31 @@ static void tsan_numa_test() {
dummy_worker();
t1.join();
}
+
+
+class MTest
+{
+ char *data;
+public:
+ MTest() { data = (char*)malloc(1024); }
+ ~MTest() { free(data); };
+};
+
+thread_local MTest tlVariable;
+
+void threadFun( int i )
+{
+ printf( "Thread %d\n", i );
+ std::this_thread::sleep_for( std::chrono::milliseconds(100) );
+}
+
+void test_thread_local()
+{
+ for( int i=1; i < 100; ++i )
+ {
+ std::thread t( threadFun, i );
+ t.join();
+ mi_stats_print(NULL);
+ }
+ return;
+}
\ No newline at end of file
diff --git a/test/test-api-fill.c b/test/test-api-fill.c
index 3fca3b9d..eebbd394 100644
--- a/test/test-api-fill.c
+++ b/test/test-api-fill.c
@@ -271,7 +271,7 @@ int main(void) {
mi_free(p);
};
- #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN)
+ #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_GUARDED)
CHECK_BODY("fill-freed-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
diff --git a/test/test-api.c b/test/test-api.c
index 76101980..15484544 100644
--- a/test/test-api.c
+++ b/test/test-api.c
@@ -65,6 +65,15 @@ bool mem_is_zero(uint8_t* p, size_t size) {
int main(void) {
mi_option_disable(mi_option_verbose);
+ CHECK_BODY("malloc-aligned9a") { // test large alignments
+ void* p = mi_zalloc_aligned(1024 * 1024, 2);
+ mi_free(p);
+ p = mi_zalloc_aligned(1024 * 1024, 2);
+ mi_free(p);
+ result = true;
+ };
+
+
// ---------------------------------------------------
// Malloc
// ---------------------------------------------------
@@ -157,6 +166,7 @@ int main(void) {
printf("malloc_aligned5: usable size: %zi\n", usable);
mi_free(p);
};
+ /*
CHECK_BODY("malloc-aligned6") {
bool ok = true;
for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) {
@@ -174,6 +184,7 @@ int main(void) {
}
result = ok;
};
+ */
CHECK_BODY("malloc-aligned7") {
void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX);
mi_free(p);
@@ -189,7 +200,7 @@ int main(void) {
}
result = ok;
};
- CHECK_BODY("malloc-aligned9") {
+ CHECK_BODY("malloc-aligned9") { // test large alignments
bool ok = true;
void* p[8];
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
diff --git a/test/test-stress.c b/test/test-stress.c
index 58d6a6a1..8c5fca9c 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -22,20 +22,27 @@ terms of the MIT license.
#include
#include
+// #define MI_GUARDED
+// #define USE_STD_MALLOC
+
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
//
// argument defaults
#if defined(MI_TSAN) // with thread-sanitizer reduce the threads to test within the azure pipeline limits
static int THREADS = 8;
static int SCALE = 25;
-static int ITER = 200;
-#elif defined(MI_UBSAN) // with undefined behaviours sanitizer reduce parameters to stay within the azure pipeline limits
+static int ITER = 400;
+#elif defined(MI_UBSAN) // with undefined behavious sanitizer reduce parameters to stay within the azure pipeline limits
static int THREADS = 8;
static int SCALE = 25;
static int ITER = 20;
+#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
+static int THREADS = 8;
+static int SCALE = 10;
+static int ITER = 10;
#else
static int THREADS = 32; // more repeatable if THREADS <= #processors
-static int SCALE = 25; // scaling factor
+static int SCALE = 50; // scaling factor
static int ITER = 50; // N full iterations destructing and re-creating all threads
#endif
@@ -43,16 +50,11 @@ static int ITER = 50; // N full iterations destructing and re-creating a
#define STRESS // undefine for leak test
-#ifndef NDEBUG
-#define HEAP_WALK // walk the heap objects?
-#endif
-
-static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100)
+static bool allow_large_objects = false; // allow very large objects? (set to `true` if SCALE>100)
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
static bool main_participates = false; // main thread participates as a worker too
-// #define USE_STD_MALLOC
#ifdef USE_STD_MALLOC
#define custom_calloc(n,s) calloc(n,s)
#define custom_realloc(p,s) realloc(p,s)
@@ -62,6 +64,10 @@ static bool main_participates = false; // main thread participates as a
#define custom_calloc(n,s) mi_calloc(n,s)
#define custom_realloc(p,s) mi_realloc(p,s)
#define custom_free(p) mi_free(p)
+
+#ifndef NDEBUG
+#define HEAP_WALK // walk the heap objects?
+#endif
#endif
// transfer pointer between threads
@@ -216,9 +222,9 @@ static void test_stress(void) {
uintptr_t r = rand();
for (int n = 0; n < ITER; n++) {
run_os_threads(THREADS, &stress);
- #ifndef NDEBUG
+ #if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
// switch between arena and OS allocation for testing
- mi_option_set_enabled(mi_option_disallow_arena_alloc, (n%2)==1);
+ // mi_option_set_enabled(mi_option_disallow_arena_alloc, (n%2)==1);
#endif
#ifdef HEAP_WALK
size_t total = 0;
@@ -232,7 +238,7 @@ static void test_stress(void) {
}
#ifndef NDEBUG
//mi_collect(false);
- //mi_debug_show_arenas();
+ //mi_debug_show_arenas(true);
#endif
#if !defined(NDEBUG) || defined(MI_TSAN)
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
@@ -266,7 +272,7 @@ int main(int argc, char** argv) {
#ifdef HEAP_WALK
mi_option_enable(mi_option_visit_abandoned);
#endif
- #ifndef NDEBUG
+ #if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */);
#endif
#ifndef USE_STD_MALLOC
@@ -310,11 +316,11 @@ int main(int argc, char** argv) {
#ifndef USE_STD_MALLOC
#ifndef NDEBUG
- mi_debug_show_arenas(true,true,true);
+ mi_debug_show_arenas(true);
mi_collect(true);
- #endif
- mi_stats_print(NULL);
+ #endif
#endif
+ mi_stats_print(NULL);
//bench_end_program();
return 0;
}