diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5fc1808e..490cb483 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -14,6 +14,7 @@ option(MI_TRACK_VALGRIND "Compile with Valgrind support (adds a small overhea
option(MI_TRACK_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF)
option(MI_TRACK_ETW "Compile with Windows event tracing (ETW) support (adds a small overhead)" OFF)
option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF)
+option(MI_OPT_ARCH "Only for optimized builds: turn on architecture specific optimizations (for arm64: '-march=armv8.1-a' (2016))" ON)
option(MI_SEE_ASM "Generate assembly files" OFF)
option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" ON)
@@ -73,16 +74,16 @@ else()
endif()
# -----------------------------------------------------------------------------
-# Convenience: set default build type depending on the build directory
+# Convenience: set default build type and compiler depending on the build directory
# -----------------------------------------------------------------------------
message(STATUS "")
if (NOT CMAKE_BUILD_TYPE)
- if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$" OR MI_DEBUG_FULL)
- message(STATUS "No build type selected, default to: Debug")
+ if ("${CMAKE_BINARY_DIR}" MATCHES ".*((D|d)ebug|asan|tsan|ubsan|valgrind)$" OR MI_DEBUG_FULL)
+ message(STATUS "No build type selected, default to 'Debug'")
set(CMAKE_BUILD_TYPE "Debug")
else()
- message(STATUS "No build type selected, default to: Release")
+ message(STATUS "No build type selected, default to 'Release'")
set(CMAKE_BUILD_TYPE "Release")
endif()
endif()
@@ -92,7 +93,6 @@ if("${CMAKE_BINARY_DIR}" MATCHES ".*(S|s)ecure$")
set(MI_SECURE "ON")
endif()
-
# -----------------------------------------------------------------------------
# Process options
# -----------------------------------------------------------------------------
@@ -112,6 +112,14 @@ if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel")
set(MI_USE_CXX "ON")
endif()
+if(CMAKE_BUILD_TYPE MATCHES "Release|RelWithDebInfo")
+ if (NOT MI_OPT_ARCH)
+ message(STATUS "Architecture specific optimizations are disabled (MI_OPT_ARCH=OFF)")
+ endif()
+else()
+ set(MI_OPT_ARCH OFF)
+endif()
+
if(MI_OVERRIDE)
message(STATUS "Override standard malloc (MI_OVERRIDE=ON)")
if(APPLE)
@@ -138,13 +146,7 @@ if(MI_OVERRIDE)
endif()
endif()
-if(WIN32)
- if (MI_WIN_REDIRECT)
- if (MSVC_C_ARCHITECTURE_ID MATCHES "ARM")
- message(STATUS "Cannot use redirection on Windows ARM (MI_WIN_REDIRECT=OFF)")
- set(MI_WIN_REDIRECT OFF)
- endif()
- endif()
+if(WIN32)
if (NOT MI_WIN_REDIRECT)
# use a negative define for backward compatibility
list(APPEND mi_defines MI_WIN_NOREDIRECT=1)
@@ -319,16 +321,32 @@ if(MI_WIN_USE_FLS)
list(APPEND mi_defines MI_WIN_USE_FLS=1)
endif()
+# Determine architecture
+set(MI_OPT_ARCH_FLAGS "")
+set(MI_ARCH "unknown")
+if(APPLE)
+ list(FIND CMAKE_OSX_ARCHITECTURES "x86_64" x64_index)
+ list(FIND CMAKE_OSX_ARCHITECTURES "arm64" arm64_index)
+ if(x64_index GREATER_EQUAL 0)
+ set(MI_ARCH "x64")
+ elseif(arm64_index GREATER_EQUAL 0)
+ set(MI_ARCH "arm64")
+ endif()
+elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM STREQUAL "x64")
+ set(MI_ARCH "x64")
+elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ARM64" OR CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64")
+ set(MI_ARCH "arm64")
+endif()
- # Check /proc/cpuinfo for an SV39 MMU and limit the virtual address bits.
- # (this will skip the aligned hinting in that case. Issue #939, #949)
- if (EXISTS /proc/cpuinfo)
- file(STRINGS /proc/cpuinfo mi_sv39_mmu REGEX "^mmu[ \t]+:[ \t]+sv39$")
- if (mi_sv39_mmu)
- MESSAGE( STATUS "Set virtual address bits to 39 (SV39 MMU detected)" )
- list(APPEND mi_defines MI_DEFAULT_VIRTUAL_ADDRESS_BITS=39)
- endif()
- endif()
+# Check /proc/cpuinfo for an SV39 MMU and limit the virtual address bits.
+# (this will skip the aligned hinting in that case. Issue #939, #949)
+if (EXISTS /proc/cpuinfo)
+ file(STRINGS /proc/cpuinfo mi_sv39_mmu REGEX "^mmu[ \t]+:[ \t]+sv39$")
+ if (mi_sv39_mmu)
+ MESSAGE( STATUS "Set virtual address bits to 39 (SV39 MMU detected)" )
+ list(APPEND mi_defines MI_DEFAULT_VIRTUAL_ADDRESS_BITS=39)
+ endif()
+endif()
# On Haiku use `-DCMAKE_INSTALL_PREFIX` instead, issue #788
# if(CMAKE_SYSTEM_NAME MATCHES "Haiku")
@@ -367,16 +385,31 @@ if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM
if(MI_OVERRIDE)
list(APPEND mi_cflags -fno-builtin-malloc)
endif()
+ if(MI_OPT_ARCH)
+ if(MI_ARCH STREQUAL "arm64")
+ set(MI_OPT_ARCH_FLAGS "-march=armv8.1-a") # fast atomics
+ endif()
+ endif()
endif()
if (MSVC AND MSVC_VERSION GREATER_EQUAL 1914)
list(APPEND mi_cflags /Zc:__cplusplus)
+ if(MI_OPT_ARCH)
+ if(MI_ARCH STREQUAL "arm64")
+ set(MI_OPT_ARCH_FLAGS "/arch:armv8.1") # fast atomics
+ endif()
+ endif()
endif()
if(MINGW)
add_definitions(-D_WIN32_WINNT=0x600)
endif()
+if(MI_OPT_ARCH_FLAGS)
+ list(APPEND mi_cflags ${MI_OPT_ARCH_FLAGS})
+ message(STATUS "Architecture specific optimization is enabled (with ${MI_OPT_ARCH_FLAGS}) (MI_OPT_ARCH=ON)")
+endif()
+
# extra needed libraries
# we prefer -l test over `find_library` as sometimes core libraries
@@ -495,7 +528,9 @@ if(MI_BUILD_SHARED)
)
if(WIN32 AND MI_WIN_REDIRECT)
# On windows, link and copy the mimalloc redirection dll too.
- if(CMAKE_SIZEOF_VOID_P EQUAL 4)
+ if(MI_ARCH STREQUAL "arm64")
+ set(MIMALLOC_REDIRECT_SUFFIX "-arm64")
+ elseif(CMAKE_SIZEOF_VOID_P EQUAL 4)
set(MIMALLOC_REDIRECT_SUFFIX "32")
else()
set(MIMALLOC_REDIRECT_SUFFIX "")
@@ -617,6 +652,11 @@ endif()
if (MI_OVERRIDE)
if (MI_BUILD_SHARED)
target_compile_definitions(mimalloc PRIVATE MI_MALLOC_OVERRIDE)
+ if (WIN32)
+ # on windows we should generate mimalloc-override.dll.
+ string(REPLACE "mimalloc" "mimalloc-override" mi_override_output_name ${mi_basename})
+ set_target_properties(mimalloc PROPERTIES OUTPUT_NAME ${mi_override_output_name})
+ endif()
endif()
if(NOT WIN32)
# It is only possible to override malloc on Windows when building as a DLL.
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index a481ac48..442919f6 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -284,7 +284,7 @@ jobs:
displayName: CTest
- job:
- displayName: macOS 15 (Sequia)
+ displayName: macOS 15 (Sequoia)
pool:
vmImage:
macOS-15
diff --git a/bin/mimalloc-redirect-arm64.dll b/bin/mimalloc-redirect-arm64.dll
new file mode 100644
index 00000000..0cae1d51
Binary files /dev/null and b/bin/mimalloc-redirect-arm64.dll differ
diff --git a/bin/mimalloc-redirect-arm64.lib b/bin/mimalloc-redirect-arm64.lib
new file mode 100644
index 00000000..0445ce83
Binary files /dev/null and b/bin/mimalloc-redirect-arm64.lib differ
diff --git a/bin/minject-arm64.exe b/bin/minject-arm64.exe
new file mode 100644
index 00000000..63987afc
Binary files /dev/null and b/bin/minject-arm64.exe differ
diff --git a/bin/readme.md b/bin/readme.md
index 9b121bda..d133eea2 100644
--- a/bin/readme.md
+++ b/bin/readme.md
@@ -11,11 +11,12 @@ There are four requirements to make the overriding work robustly:
2. Link your program explicitly with `mimalloc-override.dll` library.
To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some
- call to the mimalloc API in the `main` function, like `mi_version()`
- (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project
- for an example on how to use this.
+ call to the mimalloc API in the `main` function, like `mi_version()`
+ (or use the `/INCLUDE:mi_version` switch on the linker, or
+ use `#pragma comment(linker, "/include:mi_version")` in some source file).
+ See the `mimalloc-override-test` project for an example on how to use this.
-3. The `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put
+3. The `mimalloc-redirect.dll` (x64) (or `mimalloc-redirect32.dll` (x86), or `mimalloc-redirect-arm64.dll` (arm64)) must be put
in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL).
The redirection DLL ensures that all calls to the C runtime malloc API get redirected to
mimalloc functions (which reside in `mimalloc-override.dll`).
@@ -40,7 +41,9 @@ if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `
into the import table (and put `mimalloc-redirect.dll` in the same folder)
Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388).
-The `minject` program can also do this from the command line, use `minject --help` for options:
+The `minject` program can also do this from the command line
+(or `minject32` for 32-bit PE files, or `minject-arm64` on arm64 Windows).
+Use `minject --help` for options:
```
> minject --help
diff --git a/ide/vs2022/mimalloc-override-test.vcxproj b/ide/vs2022/mimalloc-override-test.vcxproj
index a3c56f7b..ff5d53d0 100644
--- a/ide/vs2022/mimalloc-override-test.vcxproj
+++ b/ide/vs2022/mimalloc-override-test.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -42,12 +50,23 @@
true
v143
+
+ Application
+ true
+ v143
+
Application
false
v143
true
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +81,15 @@
+
+
+
+
+
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +103,18 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
Level3
@@ -128,6 +161,30 @@
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ MultiThreadedDebugDLL
+ Sync
+ Default
+ false
+
+
+ Console
+
+
+ kernel32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
Level3
@@ -176,6 +233,31 @@
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ MultiThreadedDLL
+
+
+ true
+ true
+ Console
+
+
+ kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
diff --git a/ide/vs2022/mimalloc-override.vcxproj b/ide/vs2022/mimalloc-override.vcxproj
index 4383d886..16a48740 100644
--- a/ide/vs2022/mimalloc-override.vcxproj
+++ b/ide/vs2022/mimalloc-override.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -41,11 +49,21 @@
true
v143
+
+ DynamicLibrary
+ true
+ v143
+
DynamicLibrary
false
v143
+
+ DynamicLibrary
+ false
+ v143
+
@@ -60,9 +78,15 @@
+
+
+
+
+
+
$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -82,12 +106,24 @@
.dll
mimalloc-override
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
.dll
mimalloc-override
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
Level3
@@ -144,6 +180,34 @@
copy mimalloc-redirect.dll to the output directory
+
+
+ Level3
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
+ MultiThreadedDebugDLL
+ false
+ CompileAsCpp
+
+
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies)
+
+
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64.dll to the output directory
+
+
Level3
@@ -208,6 +272,39 @@
copy mimalloc-redirect.dll to the output directory
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ MultiThreadedDLL
+ CompileAsCpp
+ false
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies)
+
+
+ Default
+ false
+
+
+ COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)"
+
+
+ copy mimalloc-redirect-arm64.dll to the output directory
+
+
@@ -226,13 +323,17 @@
false
false
false
+ false
false
+ false
true
true
true
+ true
true
+ true
@@ -240,7 +341,9 @@
true
true
true
+ true
true
+ true
@@ -252,7 +355,9 @@
true
true
true
+ true
true
+ true
@@ -260,7 +365,9 @@
true
true
true
+ true
true
+ true
diff --git a/ide/vs2022/mimalloc-test-api.vcxproj b/ide/vs2022/mimalloc-test-api.vcxproj
index d9b9cae4..babe7f96 100644
--- a/ide/vs2022/mimalloc-test-api.vcxproj
+++ b/ide/vs2022/mimalloc-test-api.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -42,12 +50,23 @@
true
v143
+
+ Application
+ true
+ v143
+
Application
false
v143
true
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +81,15 @@
+
+
+
+
+
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +103,18 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
Level3
@@ -106,6 +139,18 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
Level3
@@ -140,15 +185,35 @@
Console
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
true
true
true
+ true
true
+ true
false
+ false
diff --git a/ide/vs2022/mimalloc-test-stress.vcxproj b/ide/vs2022/mimalloc-test-stress.vcxproj
index 14bd3e69..c033aaeb 100644
--- a/ide/vs2022/mimalloc-test-stress.vcxproj
+++ b/ide/vs2022/mimalloc-test-stress.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -42,12 +50,23 @@
true
v143
+
+ Application
+ true
+ v143
+
Application
false
v143
true
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +81,15 @@
+
+
+
+
+
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +103,18 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
Level3
@@ -106,6 +139,18 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
Level3
@@ -140,17 +185,37 @@
Console
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+ CPUExtensionRequirementsARMv81
+
+
+ true
+ true
+ Console
+
+
false
false
+ false
false
false
+ false
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
+
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
diff --git a/ide/vs2022/mimalloc-test.vcxproj b/ide/vs2022/mimalloc-test.vcxproj
index 506dd7d4..bfd72287 100644
--- a/ide/vs2022/mimalloc-test.vcxproj
+++ b/ide/vs2022/mimalloc-test.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -42,12 +50,23 @@
true
v143
+
+ Application
+ true
+ v143
+
Application
false
v143
true
+
+ Application
+ false
+ v143
+ true
+
@@ -62,9 +81,15 @@
+
+
+
+
+
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -78,10 +103,18 @@
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
Level3
@@ -108,6 +141,19 @@
Console
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ stdcpp17
+
+
+ Console
+
+
Level3
@@ -144,6 +190,24 @@
Console
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ stdcpp17
+
+
+ true
+ true
+ Console
+
+
{abb5eae7-b3e6-432e-b636-333449892ea6}
diff --git a/ide/vs2022/mimalloc.sln b/ide/vs2022/mimalloc.sln
index 6ff01d3b..e4a6538b 100644
--- a/ide/vs2022/mimalloc.sln
+++ b/ide/vs2022/mimalloc.sln
@@ -1,81 +1,107 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29709.97
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Debug|x86 = Debug|x86
- Release|x64 = Release|x64
- Release|x86 = Release|x86
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
- {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
- {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
- GlobalSection(ExtensibilityGlobals) = postSolution
- SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
- EndGlobalSection
-EndGlobal
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.12.35527.113 d17.12
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|ARM64 = Debug|ARM64
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|ARM64 = Release|ARM64
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.Build.0 = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.ActiveCfg = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.Build.0 = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.Build.0 = Debug|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.ActiveCfg = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.Build.0 = Release|ARM64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
+ EndGlobalSection
+EndGlobal
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
index 3dc436e3..9b67148f 100644
--- a/ide/vs2022/mimalloc.vcxproj
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -1,10 +1,18 @@
+
+ Debug
+ ARM64
+
Debug
Win32
+
+ Release
+ ARM64
+
Release
Win32
@@ -42,12 +50,23 @@
true
v143
+
+ StaticLibrary
+ true
+ v143
+
StaticLibrary
false
v143
true
+
+ StaticLibrary
+ false
+ v143
+ true
+
@@ -62,9 +81,15 @@
+
+
+
+
+
+
$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
@@ -84,12 +109,24 @@
.lib
mimalloc-static
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
$(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
$(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
.lib
mimalloc-static
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
Level4
@@ -136,6 +173,33 @@
+
+
+ Level4
+ Disabled
+ true
+ Default
+ ../../include
+ MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions);
+ CompileAsCpp
+ false
+ stdcpp20
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Level4
@@ -198,34 +262,79 @@
+
+
+ Level4
+ MaxSpeed
+ true
+ Default
+ ../../include
+ %(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ false
+ Default
+ CompileAsCpp
+ true
+ stdcpp20
+ CPUExtensionRequirementsARMv81
+ Sync
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
false
false
+ false
false
false
+ false
true
true
true
+ true
true
+ true
true
+ true
true
+ true
false
+ false
true
+ true
true
true
true
+ true
@@ -235,14 +344,18 @@
true
true
true
+ true
true
+ true
true
true
true
+ true
true
+ true
diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h
index 57ca79fc..a5968651 100644
--- a/include/mimalloc/internal.h
+++ b/include/mimalloc/internal.h
@@ -53,90 +53,100 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_externc
#endif
+// "libc.c"
+#include
+void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
+void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
+char _mi_toupper(char c);
+int _mi_strnicmp(const char* s, const char* t, size_t n);
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
+void _mi_strlcat(char* dest, const char* src, size_t dest_size);
+size_t _mi_strlen(const char* s);
+size_t _mi_strnlen(const char* s, size_t max_len);
+bool _mi_getenv(const char* name, char* result, size_t result_size);
// "options.c"
-void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
-void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
-void _mi_warning_message(const char* fmt, ...);
-void _mi_verbose_message(const char* fmt, ...);
-void _mi_trace_message(const char* fmt, ...);
-void _mi_options_init(void);
-long _mi_option_get_fast(mi_option_t option);
-void _mi_error_message(int err, const char* fmt, ...);
+void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
+void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
+void _mi_warning_message(const char* fmt, ...);
+void _mi_verbose_message(const char* fmt, ...);
+void _mi_trace_message(const char* fmt, ...);
+void _mi_options_init(void);
+long _mi_option_get_fast(mi_option_t option);
+void _mi_error_message(int err, const char* fmt, ...);
// random.c
-void _mi_random_init(mi_random_ctx_t* ctx);
-void _mi_random_init_weak(mi_random_ctx_t* ctx);
-void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
-void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
-uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
-uintptr_t _mi_heap_random_next(mi_heap_t* heap);
-uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
+void _mi_random_init(mi_random_ctx_t* ctx);
+void _mi_random_init_weak(mi_random_ctx_t* ctx);
+void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
+void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
+uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
+uintptr_t _mi_heap_random_next(mi_heap_t* heap);
+uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
-void _mi_process_load(void);
+void _mi_process_load(void);
void mi_cdecl _mi_process_done(void);
-bool _mi_is_redirected(void);
-bool _mi_allocator_init(const char** message);
-void _mi_allocator_done(void);
-bool _mi_is_main_thread(void);
-size_t _mi_current_thread_count(void);
-bool _mi_preloading(void); // true while the C runtime is not initialized yet
-void _mi_thread_done(mi_heap_t* heap);
-void _mi_thread_data_collect(void);
-void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
+bool _mi_is_redirected(void);
+bool _mi_allocator_init(const char** message);
+void _mi_allocator_done(void);
+bool _mi_is_main_thread(void);
+size_t _mi_current_thread_count(void);
+bool _mi_preloading(void); // true while the C runtime is not initialized yet
+void _mi_thread_done(mi_heap_t* heap);
+void _mi_thread_data_collect(void);
+void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
-void _mi_heap_guarded_init(mi_heap_t* heap);
+void _mi_heap_guarded_init(mi_heap_t* heap);
// os.c
-void _mi_os_init(void); // called from process init
-void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
-void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
-void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
+void _mi_os_init(void); // called from process init
+void* _mi_os_alloc(size_t size, mi_memid_t* memid);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid);
+void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid);
-size_t _mi_os_page_size(void);
-size_t _mi_os_good_alloc_size(size_t size);
-bool _mi_os_has_overcommit(void);
-bool _mi_os_has_virtual_reserve(void);
+size_t _mi_os_page_size(void);
+size_t _mi_os_good_alloc_size(size_t size);
+bool _mi_os_has_overcommit(void);
+bool _mi_os_has_virtual_reserve(void);
-bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
-bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-bool _mi_os_protect(void* addr, size_t size);
-bool _mi_os_unprotect(void* addr, size_t size);
-bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
+bool _mi_os_reset(void* addr, size_t size);
+bool _mi_os_commit(void* p, size_t size, bool* is_zero);
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_protect(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_purge(void* p, size_t size);
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset);
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
-void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid);
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid);
-void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
-bool _mi_os_use_large_page(size_t size, size_t alignment);
-size_t _mi_os_large_page_size(void);
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
+bool _mi_os_use_large_page(size_t size, size_t alignment);
+size_t _mi_os_large_page_size(void);
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
-void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
-void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
-bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
-bool _mi_arena_contains(const void* p);
-void _mi_arenas_collect(bool force_purge, mi_stats_t* stats);
-void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
+void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid);
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid);
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_contains(const void* p);
+void _mi_arenas_collect(bool force_purge);
+void _mi_arena_unsafe_destroy_all(void);
-bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
-void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
+bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment);
+void _mi_arena_segment_mark_abandoned(mi_segment_t* segment);
-void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
-void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
+void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
+void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
typedef struct mi_arena_field_cursor_s { // abstract struct
size_t os_list_count; // max entries to visit in the OS abandoned list
@@ -152,20 +162,20 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr
void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current);
// "segment-map.c"
-void _mi_segment_map_allocated_at(const mi_segment_t* segment);
-void _mi_segment_map_freed_at(const mi_segment_t* segment);
+void _mi_segment_map_allocated_at(const mi_segment_t* segment);
+void _mi_segment_map_freed_at(const mi_segment_t* segment);
// "segment.c"
-mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
-void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld);
+void _mi_segment_collect(mi_segment_t* segment, bool force);
#if MI_HUGE_PAGE_ABANDON
-void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#else
-void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
@@ -175,42 +185,42 @@ bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// "page.c"
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
-void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
-void _mi_page_unfull(mi_page_t* page);
-void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
-void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
-void _mi_page_force_abandon(mi_page_t* page);
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
+void _mi_page_unfull(mi_page_t* page);
+void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
+void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
+void _mi_page_force_abandon(mi_page_t* page);
-void _mi_heap_delayed_free_all(mi_heap_t* heap);
-bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
-void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
+void _mi_heap_delayed_free_all(mi_heap_t* heap);
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
+void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
-void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
-bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
-size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
-void _mi_deferred_free(mi_heap_t* heap, bool force);
+void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
+void _mi_deferred_free(mi_heap_t* heap, bool force);
-void _mi_page_free_collect(mi_page_t* page,bool force);
-void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
+void _mi_page_free_collect(mi_page_t* page,bool force);
+void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
-size_t _mi_bin_size(uint8_t bin); // for stats
-uint8_t _mi_bin(size_t size); // for stats
+size_t _mi_bin_size(uint8_t bin); // for stats
+uint8_t _mi_bin(size_t size); // for stats
// "heap.c"
-void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
-void _mi_heap_destroy_pages(mi_heap_t* heap);
-void _mi_heap_collect_abandon(mi_heap_t* heap);
-void _mi_heap_set_default_direct(mi_heap_t* heap);
-bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
-void _mi_heap_unsafe_destroy_all(void);
-mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
-void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
-bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
+void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag);
+void _mi_heap_destroy_pages(mi_heap_t* heap);
+void _mi_heap_collect_abandon(mi_heap_t* heap);
+void _mi_heap_set_default_direct(mi_heap_t* heap);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
+void _mi_heap_unsafe_destroy_all(void);
+mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
+void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
+bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
// "stats.c"
-void _mi_stats_done(mi_stats_t* stats);
+void _mi_stats_done(mi_stats_t* stats);
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
@@ -227,18 +237,6 @@ bool _mi_free_delayed_block(mi_block_t* block);
void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
-// "libc.c"
-#include
-void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args);
-void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...);
-char _mi_toupper(char c);
-int _mi_strnicmp(const char* s, const char* t, size_t n);
-void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
-void _mi_strlcat(char* dest, const char* src, size_t dest_size);
-size_t _mi_strlen(const char* s);
-size_t _mi_strnlen(const char* s, size_t max_len);
-bool _mi_getenv(const char* name, char* result, size_t result_size);
-
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
#endif
@@ -890,13 +888,13 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
-int _mi_os_numa_node_get(mi_os_tld_t* tld);
+int _mi_os_numa_node_get(void);
size_t _mi_os_numa_node_count_get(void);
extern _Atomic(size_t) _mi_numa_node_count;
-static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
+static inline int _mi_os_numa_node(void) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
- else return _mi_os_numa_node_get(tld);
+ else return _mi_os_numa_node_get();
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
@@ -958,16 +956,18 @@ static inline size_t mi_ctz(uintptr_t x) {
}
#else
-static inline size_t mi_ctz32(uint32_t x) {
+
+static inline size_t mi_ctz_generic32(uint32_t x) {
// de Bruijn multiplication, see
- static const unsigned char debruijn[32] = {
+ static const uint8_t debruijn[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
if (x==0) return 32;
- return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
+ return debruijn[(uint32_t)((x & -(int32_t)x) * (uint32_t)(0x077CB531U)) >> 27];
}
-static inline size_t mi_clz32(uint32_t x) {
+
+static inline size_t mi_clz_generic32(uint32_t x) {
// de Bruijn multiplication, see
static const uint8_t debruijn[32] = {
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
@@ -979,28 +979,37 @@ static inline size_t mi_clz32(uint32_t x) {
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
- return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
+ return debruijn[(uint32_t)(x * (uint32_t)(0x07C4ACDDU)) >> 27];
}
-static inline size_t mi_clz(uintptr_t x) {
- if (x==0) return MI_INTPTR_BITS;
-#if (MI_INTPTR_BITS <= 32)
- return mi_clz32((uint32_t)x);
-#else
- size_t count = mi_clz32((uint32_t)(x >> 32));
- if (count < 32) return count;
- return (32 + mi_clz32((uint32_t)x));
-#endif
+static inline size_t mi_ctz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_ctz_generic32((uint32_t)x);
+ #else
+ const uint32_t lo = (uint32_t)x;
+ if (lo != 0) {
+ return mi_ctz_generic32(lo);
+ }
+ else {
+ return (32 + mi_ctz_generic32((uint32_t)(x>>32)));
+ }
+ #endif
}
-static inline size_t mi_ctz(uintptr_t x) {
- if (x==0) return MI_INTPTR_BITS;
-#if (MI_INTPTR_BITS <= 32)
- return mi_ctz32((uint32_t)x);
-#else
- size_t count = mi_ctz32((uint32_t)x);
- if (count < 32) return count;
- return (32 + mi_ctz32((uint32_t)(x>>32)));
-#endif
+
+static inline size_t mi_clz(size_t x) {
+ if (x==0) return MI_SIZE_BITS;
+ #if (MI_SIZE_BITS <= 32)
+ return mi_clz_generic32((uint32_t)x);
+ #else
+ const uint32_t hi = (uint32_t)(x>>32);
+ if (hi != 0) {
+ return mi_clz_generic32(hi);
+ }
+ else {
+ return 32 + mi_clz_generic32((uint32_t)x);
+ }
+ #endif
}
#endif
diff --git a/include/mimalloc/types.h b/include/mimalloc/types.h
index a1c49262..4448bc35 100644
--- a/include/mimalloc/types.h
+++ b/include/mimalloc/types.h
@@ -717,13 +717,6 @@ typedef struct mi_span_queue_s {
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
-// OS thread local data
-typedef struct mi_os_tld_s {
- size_t region_idx; // start point for next allocation
- mi_stats_t* stats; // points to tld stats
-} mi_os_tld_t;
-
-
// Segments thread local data
typedef struct mi_segments_tld_s {
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
@@ -734,7 +727,6 @@ typedef struct mi_segments_tld_s {
size_t reclaim_count;// number of reclaimed (abandoned) segments
mi_subproc_t* subproc; // sub-process this thread belongs to.
mi_stats_t* stats; // points to tld stats
- mi_os_tld_t* os; // points to os tld
} mi_segments_tld_t;
// Thread local data
@@ -744,7 +736,6 @@ struct mi_tld_s {
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_segments_tld_t segments; // segment tld
- mi_os_tld_t os; // os tld
mi_stats_t stats; // statistics
};
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index 9b5a6bd1..f6fcead3 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -39,9 +39,10 @@ static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, si
static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
const size_t rate = heap->guarded_sample_rate;
- heap->guarded_sample_rate = 0;
+ // only write if `rate!=0` so we don't write to the constant `_mi_heap_empty`
+ if (rate != 0) { heap->guarded_sample_rate = 0; }
void* p = _mi_heap_malloc_zero(heap, size, zero);
- heap->guarded_sample_rate = rate;
+ if (rate != 0) { heap->guarded_sample_rate = rate; }
return p;
}
#else
diff --git a/src/arena.c b/src/arena.c
index 686500b4..c575cef2 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -190,7 +190,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
if (p != NULL) return p;
// or fall back to the OS
- p = _mi_os_alloc(size, memid, &_mi_stats_main);
+ p = _mi_os_alloc(size, memid);
if (p == NULL) return NULL;
// zero the OS memory if needed
@@ -203,7 +203,7 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size) {
if (mi_memkind_is_os(memid.memkind)) {
- _mi_os_free(p, size, memid, &_mi_stats_main);
+ _mi_os_free(p, size, memid);
}
else {
mi_assert(memid.memkind == MI_MEM_STATIC);
@@ -220,10 +220,10 @@ void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
----------------------------------------------------------- */
// claim the `blocks_inuse` bits
-static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
+static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
- if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) {
+ if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
@@ -236,13 +236,13 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
- bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
+ bool commit, mi_memid_t* memid)
{
MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
mi_bitmap_index_t bitmap_index;
- if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL;
+ if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it!
void* p = mi_arena_block_start(arena, bitmap_index);
@@ -272,7 +272,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) {
bool commit_zero = false;
- if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
+ if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
memid->initially_committed = false;
}
else {
@@ -290,7 +290,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// allocate in a speficic arena
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
- bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
+ bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED_RELEASE(alignment);
mi_assert(alignment <= MI_SEGMENT_ALIGN);
@@ -311,7 +311,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
}
// try to allocate
- void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
+ void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid);
mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
return p;
}
@@ -320,7 +320,7 @@ static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_no
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
bool commit, bool allow_large,
- mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
+ mi_arena_id_t req_arena_id, mi_memid_t* memid )
{
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@@ -330,21 +330,21 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
if (req_arena_id != _mi_arena_id_none()) {
// try a specific arena if requested
if (mi_arena_id_index(req_arena_id) < max_arena) {
- void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
else {
// try numa affine allocation
for (size_t i = 0; i < max_arena; i++) {
- void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
// try from another numa node instead..
if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
for (size_t i = 0; i < max_arena; i++) {
- void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@@ -389,18 +389,18 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
- mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
+ mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
- mi_assert_internal(memid != NULL && tld != NULL);
+ mi_assert_internal(memid != NULL);
mi_assert_internal(size > 0);
*memid = _mi_memid_none();
- const int numa_node = _mi_os_numa_node(tld); // current numa node
+ const int numa_node = _mi_os_numa_node(); // current numa node
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed?
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
- void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
@@ -409,7 +409,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
// and try allocate in there
mi_assert_internal(req_arena_id == _mi_arena_id_none());
- p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
}
}
@@ -424,16 +424,16 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// finally, fall back to the OS
if (align_offset > 0) {
- return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
+ return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
}
else {
- return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
}
-void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
- return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
+ return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid);
}
@@ -459,7 +459,7 @@ static long mi_arena_purge_delay(void) {
// reset or decommit in an arena and update the committed/decommit bitmaps
// assumes we own the area (i.e. blocks_in_use is claimed by us)
-static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
mi_assert_internal(!arena->memid.is_pinned);
@@ -468,7 +468,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
- needs_recommit = _mi_os_purge(p, size, stats);
+ needs_recommit = _mi_os_purge(p, size);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
@@ -476,7 +476,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
- needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
+ needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */);
if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); }
}
@@ -490,14 +490,14 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
// Note: assumes we (still) own the area as we may purge immediately
-static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks) {
mi_assert_internal(arena->blocks_purge != NULL);
const long delay = mi_arena_purge_delay();
if (delay < 0) return; // is purging allowed at all?
if (_mi_preloading() || delay == 0) {
// decommit directly
- mi_arena_purge(arena, bitmap_idx, blocks, stats);
+ mi_arena_purge(arena, bitmap_idx, blocks);
}
else {
// schedule decommit
@@ -515,7 +515,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
// purge a range of blocks
// return true if the full range was purged.
// assumes we own the area (i.e. blocks_in_use is claimed by us)
-static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
+static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge) {
const size_t endidx = startidx + bitlen;
size_t bitidx = startidx;
bool all_purged = false;
@@ -528,7 +528,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
if (count > 0) {
// found range to be purged
const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
- mi_arena_purge(arena, range_idx, count, stats);
+ mi_arena_purge(arena, range_idx, count);
if (count == bitlen) {
all_purged = true;
}
@@ -539,7 +539,7 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
}
// returns true if anything was purged
-static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
+static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{
if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
@@ -575,7 +575,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
- if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
+ if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge)) {
full_purge = false;
}
any_purged = true;
@@ -595,7 +595,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
return any_purged;
}
-static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
+static void mi_arenas_try_purge( bool force, bool visit_all ) {
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
@@ -610,7 +610,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
if (arena != NULL) {
- if (mi_arena_try_purge(arena, now, force, stats)) {
+ if (mi_arena_try_purge(arena, now, force)) {
if (max_purge_count <= 1) break;
max_purge_count--;
}
@@ -624,8 +624,8 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free
----------------------------------------------------------- */
-void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
- mi_assert_internal(size > 0 && stats != NULL);
+void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid) {
+ mi_assert_internal(size > 0);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
@@ -640,7 +640,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
- _mi_os_free(p, size, memid, stats);
+ _mi_os_free(p, size, memid);
}
else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena
@@ -685,7 +685,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
- mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
+ mi_arena_schedule_purge(arena, bitmap_idx, blocks);
}
// and make it available to others again
@@ -701,7 +701,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
}
// purge expired decommits
- mi_arenas_try_purge(false, false, stats);
+ mi_arenas_try_purge(false, false);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
@@ -715,7 +715,7 @@ static void mi_arenas_unsafe_destroy(void) {
mi_lock_done(&arena->abandoned_visit_lock);
if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
- _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
+ _mi_os_free(arena->start, mi_arena_size(arena), arena->memid);
}
else {
new_max_arena = i;
@@ -730,15 +730,15 @@ static void mi_arenas_unsafe_destroy(void) {
}
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
-void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) {
- mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats);
+void _mi_arenas_collect(bool force_purge) {
+ mi_arenas_try_purge(force_purge, force_purge /* visit all? */);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
-void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
+void _mi_arena_unsafe_destroy_all(void) {
mi_arenas_unsafe_destroy();
- _mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas
+ _mi_arenas_collect(true /* force purge */); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
@@ -842,11 +842,11 @@ int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exc
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
mi_memid_t memid;
- void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
+ void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
const bool is_large = memid.is_pinned; // todo: use separate is_large field?
if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
- _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
+ _mi_os_free_ex(start, size, commit, memid);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}
@@ -942,7 +942,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
- _mi_os_free(p, hsize, memid, &_mi_stats_main);
+ _mi_os_free(p, hsize, memid);
return ENOMEM;
}
return 0;
diff --git a/src/bitmap.c b/src/bitmap.c
index 4b6be66b..084082fb 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -200,7 +200,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Try to atomically claim a sequence of `count` bits starting from the field
// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
-static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats)
+static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
@@ -260,7 +260,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
// claimed!
- mi_stat_counter_increase(stats->arena_crossover_count,1);
+ mi_stat_counter_increase(_mi_stats_main.arena_crossover_count,1);
*bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
return true;
@@ -280,10 +280,10 @@ rollback:
newmap = (map & ~initial_mask);
} while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
}
- mi_stat_counter_increase(stats->arena_rollback_count,1);
+ mi_stat_counter_increase(_mi_stats_main.arena_rollback_count,1);
// retry? (we make a recursive call instead of goto to be able to use const declarations)
if (retries <= 2) {
- return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats);
+ return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
}
else {
return false;
@@ -293,7 +293,7 @@ rollback:
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
-bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) {
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
mi_assert_internal(count > 0);
if (count <= 2) {
// we don't bother with crossover fields for small counts
@@ -313,7 +313,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
}
*/
// if that fails, then try to claim across fields
- if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) {
+ if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
return true;
}
}
diff --git a/src/bitmap.h b/src/bitmap.h
index 367da739..f098dd8f 100644
--- a/src/bitmap.h
+++ b/src/bitmap.h
@@ -103,7 +103,7 @@ bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
-bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats);
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
diff --git a/src/heap.c b/src/heap.c
index 154d4b80..6cdedb74 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -98,7 +98,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
if (collect == MI_FORCE) {
// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
mi_segment_t* segment = _mi_page_segment(page);
- _mi_segment_collect(segment, true /* force? */, &heap->tld->segments);
+ _mi_segment_collect(segment, true /* force? */);
}
if (mi_page_all_free(page)) {
// no more used blocks, free the page.
@@ -173,7 +173,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
}
// collect arenas (this is program wide so don't force purges on abandonment of threads)
- _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
+ _mi_arenas_collect(collect == MI_FORCE /* force purge? */);
}
void _mi_heap_collect_abandon(mi_heap_t* heap) {
@@ -458,6 +458,12 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_heap_reset_pages(from);
}
+// are two heaps compatible with respect to heap-tag, exclusive arena etc.
+static bool mi_heaps_are_compatible(mi_heap_t* heap1, mi_heap_t* heap2) {
+ return (heap1->tag == heap2->tag && // store same kind of objects
+ heap1->arena_id == heap2->arena_id); // same arena preference
+}
+
// Safe delete a heap without freeing any still allocated blocks in that heap.
void mi_heap_delete(mi_heap_t* heap)
{
@@ -466,9 +472,10 @@ void mi_heap_delete(mi_heap_t* heap)
mi_assert_expensive(mi_heap_is_valid(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
- if (!mi_heap_is_backing(heap)) {
+ mi_heap_t* bheap = heap->tld->heap_backing;
+ if (bheap != heap && mi_heaps_are_compatible(bheap,heap)) {
// transfer still used pages to the backing heap
- mi_heap_absorb(heap->tld->heap_backing, heap);
+ mi_heap_absorb(bheap, heap);
}
else {
// the backing heap abandons its pages
diff --git a/src/init.c b/src/init.c
index 3f431ee4..4f572023 100644
--- a/src/init.c
+++ b/src/init.c
@@ -135,14 +135,12 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
-#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os)))
mi_decl_cache_align static const mi_tld_t tld_empty = {
0,
false,
NULL, NULL,
- { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats, tld_empty_os }, // segments
- { 0, tld_empty_stats }, // os
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats }, // segments
{ MI_STATS_NULL } // stats
};
@@ -158,8 +156,7 @@ extern mi_heap_t _mi_heap_main;
static mi_decl_cache_align mi_tld_t tld_main = {
0, false,
&_mi_heap_main, & _mi_heap_main,
- { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats, &tld_main.os }, // segments
- { 0, &tld_main.stats }, // os
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats }, // segments
{ MI_STATS_NULL } // stats
};
@@ -341,10 +338,10 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
// if that fails, allocate as meta data
if (td == NULL) {
mi_memid_t memid;
- td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) {
// if this fails, try once more. (issue #257)
- td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid);
if (td == NULL) {
// really out of memory
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
@@ -374,7 +371,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
}
}
// if that fails, just free it directly
- _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
+ _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid);
}
void _mi_thread_data_collect(void) {
@@ -384,7 +381,7 @@ void _mi_thread_data_collect(void) {
if (td != NULL) {
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) {
- _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main);
+ _mi_os_free(td, sizeof(mi_thread_data_t), td->memid);
}
}
}
@@ -420,9 +417,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->heap_backing = bheap;
tld->heaps = NULL;
tld->segments.subproc = &mi_subproc_default;
- tld->segments.stats = &tld->stats;
- tld->segments.os = &tld->os;
- tld->os.stats = &tld->stats;
+ tld->segments.stats = &tld->stats;
}
// Free the thread local default heap (called from `mi_thread_done`)
@@ -709,7 +704,7 @@ void mi_cdecl _mi_process_done(void) {
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
mi_collect(true /* force */);
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
- _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
+ _mi_arena_unsafe_destroy_all();
}
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
diff --git a/src/os.c b/src/os.c
index 967f5663..6a884fd0 100644
--- a/src/os.c
+++ b/src/os.c
@@ -9,6 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc/atomic.h"
#include "mimalloc/prim.h"
+#define os_stats (&_mi_stats_main)
/* -----------------------------------------------------------
Initialization.
@@ -85,8 +86,8 @@ void _mi_os_init(void) {
/* -----------------------------------------------------------
Util
-------------------------------------------------------------- */
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
+bool _mi_os_decommit(void* addr, size_t size);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
/* -----------------------------------------------------------
@@ -146,23 +147,20 @@ void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
Free memory
-------------------------------------------------------------- */
-static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
+static void mi_os_free_huge_os_pages(void* p, size_t size);
-static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
+static void mi_os_prim_free(void* addr, size_t size, bool still_committed) {
mi_assert_internal((size % _mi_os_page_size()) == 0);
if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
int err = _mi_prim_free(addr, size);
if (err != 0) {
_mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
}
- if (still_committed) { _mi_stat_decrease(&stats->committed, size); }
- _mi_stat_decrease(&stats->reserved, size);
+ if (still_committed) { _mi_stat_decrease(&os_stats->committed, size); }
+ _mi_stat_decrease(&os_stats->reserved, size);
}
-void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats) {
- if (stats == NULL) stats = &_mi_stats_main;
+void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid) {
if (mi_memkind_is_os(memid.memkind)) {
size_t csize = _mi_os_good_alloc_size(size);
void* base = addr;
@@ -176,10 +174,10 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
// free it
if (memid.memkind == MI_MEM_OS_HUGE) {
mi_assert(memid.is_pinned);
- mi_os_free_huge_os_pages(base, csize, stats);
+ mi_os_free_huge_os_pages(base, csize);
}
else {
- mi_os_prim_free(base, csize, still_committed, stats);
+ mi_os_prim_free(base, csize, still_committed);
}
}
else {
@@ -188,9 +186,8 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
}
}
-void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
- if (stats == NULL) stats = &_mi_stats_main;
- _mi_os_free_ex(p, size, true, memid, stats);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid) {
+ _mi_os_free_ex(p, size, true, memid);
}
@@ -200,7 +197,7 @@ void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
// Also `hint_addr` is a hint and may be ignored.
-static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) {
+static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_zero != NULL);
mi_assert_internal(is_large != NULL);
@@ -214,13 +211,13 @@ static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignm
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large);
}
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- mi_stat_counter_increase(stats->mmap_calls, 1);
+
+
+ mi_stat_counter_increase(os_stats->mmap_calls, 1);
if (p != NULL) {
- _mi_stat_increase(&stats->reserved, size);
+ _mi_stat_increase(&os_stats->reserved, size);
if (commit) {
- _mi_stat_increase(&stats->committed, size);
+ _mi_stat_increase(&os_stats->committed, size);
// seems needed for asan (or `mimalloc-test-api` fails)
#ifdef MI_TRACK_ASAN
if (*is_zero) { mi_track_mem_defined(p,size); }
@@ -231,14 +228,14 @@ static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignm
return p;
}
-static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) {
- return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero, tld_stats);
+static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) {
+ return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero);
}
// Primitive aligned allocation from the OS.
// This function guarantees the allocated memory is aligned.
-static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) {
+static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) {
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
mi_assert_internal(is_large != NULL);
@@ -249,7 +246,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
size = _mi_align_up(size, _mi_os_page_size());
// try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD)
- void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
+ void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero);
if (p == NULL) return NULL;
// aligned already?
@@ -261,13 +258,13 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
#if !MI_TRACK_ASAN
_mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
#endif
- mi_os_prim_free(p, size, commit, stats);
+ mi_os_prim_free(p, size, commit);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment;
if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block
// over-allocate uncommitted (virtual) memory
- p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
+ p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero);
if (p == NULL) return NULL;
// set p to the aligned part in the full region
@@ -278,12 +275,12 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
// explicitly commit only the aligned part
if (commit) {
- _mi_os_commit(p, size, NULL, stats);
+ _mi_os_commit(p, size, NULL);
}
}
else { // mmap can free inside an allocation
// overallocate...
- p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
+ p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero);
if (p == NULL) return NULL;
// and selectively unmap parts around the over-allocated area.
@@ -292,8 +289,8 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
size_t mid_size = _mi_align_up(size, _mi_os_page_size());
size_t post_size = over_size - pre_size - mid_size;
mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
- if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); }
- if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
+ if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit); }
+ if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit); }
// we can return the aligned pointer on `mmap` systems
p = aligned_p;
*base = aligned_p; // since we freed the pre part, `*base == p`.
@@ -309,33 +306,31 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
OS API: alloc and alloc_aligned
----------------------------------------------------------- */
-void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
+void* _mi_os_alloc(size_t size, mi_memid_t* memid) {
*memid = _mi_memid_none();
if (size == 0) return NULL;
- if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size);
bool os_is_large = false;
bool os_is_zero = false;
- void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
+ void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero);
if (p != NULL) {
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
}
return p;
}
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats)
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid)
{
MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
*memid = _mi_memid_none();
if (size == 0) return NULL;
- if (stats == NULL) stats = &_mi_stats_main;
size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size());
bool os_is_large = false;
bool os_is_zero = false;
void* os_base = NULL;
- void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats );
+ void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base );
if (p != NULL) {
*memid = _mi_memid_create_os(commit, os_is_zero, os_is_large);
memid->mem.os.base = os_base;
@@ -352,29 +347,28 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
to use the actual start of the memory region.
----------------------------------------------------------- */
-void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) {
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) {
mi_assert(offset <= MI_SEGMENT_SIZE);
mi_assert(offset <= size);
mi_assert((alignment % _mi_os_page_size()) == 0);
*memid = _mi_memid_none();
- if (stats == NULL) stats = &_mi_stats_main;
if (offset > MI_SEGMENT_SIZE) return NULL;
if (offset == 0) {
// regular aligned allocation
- return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats);
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
else {
// overallocate to align at an offset
const size_t extra = _mi_align_up(offset, alignment) - offset;
const size_t oversize = size + extra;
- void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats);
+ void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid);
if (start == NULL) return NULL;
void* const p = (uint8_t*)start + extra;
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
// decommit the overallocation at the start
if (commit && extra > _mi_os_page_size()) {
- _mi_os_decommit(start, extra, stats);
+ _mi_os_decommit(start, extra);
}
return p;
}
@@ -408,12 +402,10 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero) {
if (is_zero != NULL) { *is_zero = false; }
- _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
- _mi_stat_counter_increase(&stats->commit_calls, 1);
+ _mi_stat_increase(&os_stats->committed, size); // use size for precise commit vs. decommit
+ _mi_stat_counter_increase(&os_stats->commit_calls, 1);
// page align range
size_t csize;
@@ -439,11 +431,8 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
return true;
}
-static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- mi_assert_internal(needs_recommit!=NULL);
- _mi_stat_decrease(&stats->committed, size);
+static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit) { mi_assert_internal(needs_recommit!=NULL);
+ _mi_stat_decrease(&os_stats->committed, size);
// page align
size_t csize;
@@ -460,9 +449,9 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
return (err == 0);
}
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
+bool _mi_os_decommit(void* addr, size_t size) {
bool needs_recommit;
- return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats);
+ return mi_os_decommit_ex(addr, size, &needs_recommit);
}
@@ -470,13 +459,13 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
-bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
+bool _mi_os_reset(void* addr, size_t size) {
// page align conservatively within the range
size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
- _mi_stat_increase(&stats->reset, csize);
- _mi_stat_counter_increase(&stats->reset_calls, 1);
+ _mi_stat_increase(&os_stats->reset, csize);
+ _mi_stat_counter_increase(&os_stats->reset_calls, 1);
#if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
memset(start, 0, csize); // pretend it is eagerly reset
@@ -492,22 +481,22 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
-bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset)
{
if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
- _mi_stat_counter_increase(&stats->purge_calls, 1);
- _mi_stat_increase(&stats->purged, size);
+ _mi_stat_counter_increase(&os_stats->purge_calls, 1);
+ _mi_stat_increase(&os_stats->purged, size);
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
!_mi_preloading()) // don't decommit during preloading (unsafe)
{
bool needs_recommit = true;
- mi_os_decommit_ex(p, size, &needs_recommit, stats);
+ mi_os_decommit_ex(p, size, &needs_recommit);
return needs_recommit;
}
else {
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
- _mi_os_reset(p, size, stats);
+ _mi_os_reset(p, size);
}
return false; // needs no recommit
}
@@ -515,8 +504,8 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
-bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
- return _mi_os_purge_ex(p, size, true, stats);
+bool _mi_os_purge(void* p, size_t size) {
+ return _mi_os_purge_ex(p, size, true);
}
// Protect a region in memory to be not accessible.
@@ -623,15 +612,15 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// no success, issue a warning and break
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
- mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main);
+ mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true);
}
break;
}
// success, record it
page++; // increase before timeout check (see issue #711)
- _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
- _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
+ _mi_stat_increase(&os_stats->committed, MI_HUGE_OS_PAGE_SIZE);
+ _mi_stat_increase(&os_stats->reserved, MI_HUGE_OS_PAGE_SIZE);
// check for timeout
if (max_msecs > 0) {
@@ -665,11 +654,11 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
-static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
+static void mi_os_free_huge_os_pages(void* p, size_t size) {
if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) {
- mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats);
+ mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true);
size -= MI_HUGE_OS_PAGE_SIZE;
base += MI_HUGE_OS_PAGE_SIZE;
}
@@ -698,8 +687,7 @@ size_t _mi_os_numa_node_count_get(void) {
return count;
}
-int _mi_os_numa_node_get(mi_os_tld_t* tld) {
- MI_UNUSED(tld);
+int _mi_os_numa_node_get(void) {
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0
diff --git a/src/page.c b/src/page.c
index 06f7ddaf..d4db9fba 100644
--- a/src/page.c
+++ b/src/page.c
@@ -276,7 +276,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif
- mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
+ mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
diff --git a/src/segment-map.c b/src/segment-map.c
index 2c3964fe..399f221c 100644
--- a/src/segment-map.c
+++ b/src/segment-map.c
@@ -55,11 +55,11 @@ static mi_segmap_part_t* mi_segment_map_index_of(const mi_segment_t* segment, bo
if (part == NULL) {
if (!create_on_demand) return NULL;
mi_memid_t memid;
- part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid, NULL);
+ part = (mi_segmap_part_t*)_mi_os_alloc(sizeof(mi_segmap_part_t), &memid);
if (part == NULL) return NULL;
mi_segmap_part_t* expected = NULL;
if (!mi_atomic_cas_ptr_strong_release(mi_segmap_part_t, &mi_segment_map[segindex], &expected, part)) {
- _mi_os_free(part, sizeof(mi_segmap_part_t), memid, NULL);
+ _mi_os_free(part, sizeof(mi_segmap_part_t), memid);
part = expected;
if (part == NULL) return NULL;
}
diff --git a/src/segment.c b/src/segment.c
index b8810167..a93ea218 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -17,7 +17,7 @@ terms of the MIT license. A copy of the license can be found in the file
// -------------------------------------------------------------------
-static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats);
+static void mi_segment_try_purge(mi_segment_t* segment, bool force);
// -------------------------------------------------------------------
@@ -410,7 +410,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
const size_t size = mi_segment_size(segment);
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
- _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats);
+ _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid);
}
/* -----------------------------------------------------------
@@ -467,7 +467,7 @@ static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uin
mi_commit_mask_create(bitidx, bitcount, cm);
}
-static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size) {
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
// commit liberal
@@ -483,7 +483,7 @@ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
_mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
- if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false;
+ if (!_mi_os_commit(start, full_size, &is_zero)) return false;
mi_commit_mask_set(&segment->commit_mask, &mask);
}
@@ -497,15 +497,15 @@ static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi
return true;
}
-static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size) {
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
// note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
- return mi_segment_commit(segment, p, size, stats);
+ return mi_segment_commit(segment, p, size);
}
-static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask));
if (!segment->allow_purge) return true;
@@ -520,7 +520,7 @@ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_
// purging
mi_assert_internal((void*)start != (void*)segment);
mi_assert_internal(segment->allow_decommit);
- const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit
+ const bool decommitted = _mi_os_purge(start, full_size); // reset or decommit
if (decommitted) {
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
@@ -534,11 +534,11 @@ static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_
return true;
}
-static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size) {
if (!segment->allow_purge) return;
if (mi_option_get(mi_option_purge_delay) == 0) {
- mi_segment_purge(segment, p, size, stats);
+ mi_segment_purge(segment, p, size);
}
else {
// register for future purge in the purge mask
@@ -561,7 +561,7 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t
else if (segment->purge_expire <= now) {
// previous purge mask already expired
if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) {
- mi_segment_try_purge(segment, true, stats);
+ mi_segment_try_purge(segment, true);
}
else {
segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's
@@ -574,7 +574,7 @@ static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t
}
}
-static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) {
+static void mi_segment_try_purge(mi_segment_t* segment, bool force) {
if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return;
mi_msecs_t now = _mi_clock_now();
if (!force && now < segment->purge_expire) return;
@@ -590,7 +590,7 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t*
if (count > 0) {
uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
size_t size = count * MI_COMMIT_SIZE;
- mi_segment_purge(segment, p, size, stats);
+ mi_segment_purge(segment, p, size);
}
}
mi_commit_mask_foreach_end()
@@ -599,8 +599,8 @@ static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t*
// called from `mi_heap_collect_ex`
// this can be called per-page so it is important that try_purge has fast exit path
-void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
- mi_segment_try_purge(segment, force, tld->stats);
+void _mi_segment_collect(mi_segment_t* segment, bool force) {
+ mi_segment_try_purge(segment, force);
}
/* -----------------------------------------------------------
@@ -635,7 +635,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
// perhaps decommit
if (allow_purge) {
- mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats);
+ mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE);
}
// and push it on the free page queue (if it was not a huge page)
@@ -712,13 +712,13 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
----------------------------------------------------------- */
// Note: may still return NULL if committing the memory failed
-static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
+static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count) {
mi_assert_internal(slice_index < segment->slice_entries);
mi_slice_t* const slice = &segment->slices[slice_index];
mi_assert_internal(slice->block_size==0 || slice->block_size==1);
// commit before changing the slice data
- if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
+ if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE)) {
return NULL; // commit failed!
}
@@ -791,7 +791,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
mi_segment_slice_split(segment, slice, slice_count, tld);
}
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
- mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
+ mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count);
if (page == NULL) {
// commit failed; return NULL but first restore the slice
mi_segment_span_free_coalesce(slice, tld);
@@ -814,7 +814,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
size_t* psegment_slices, size_t* pinfo_slices,
- bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+ bool commit, mi_segments_tld_t* tld)
{
mi_memid_t memid;
@@ -835,7 +835,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
}
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
- mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld);
+ mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid);
if (segment == NULL) {
return NULL; // failed to allocate
}
@@ -851,8 +851,8 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
mi_assert_internal(commit_needed>0);
mi_commit_mask_create(0, commit_needed, &commit_mask);
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
- if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) {
- _mi_arena_free(segment,segment_size,0,memid,tld->stats);
+ if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL)) {
+ _mi_arena_free(segment,segment_size,0,memid);
return NULL;
}
}
@@ -874,7 +874,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
-static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
+static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_page_t** huge_page)
{
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
@@ -892,7 +892,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
// Allocate the segment from the OS
mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
- &segment_slices, &info_slices, commit, tld, os_tld);
+ &segment_slices, &info_slices, commit, tld);
if (segment == NULL) return NULL;
// zero the segment info? -- not always needed as it may be zero initialized from the OS
@@ -923,14 +923,14 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
size_t os_pagesize = _mi_os_page_size();
_mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
- mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats);
+ mi_segment_ensure_committed(segment, end, os_pagesize);
_mi_os_protect(end, os_pagesize);
if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-(
guard_slices = 1;
}
// reserve first slices for segment info
- mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld);
+ mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices);
mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance
mi_assert_internal(segment->used == 1);
segment->used = 0; // don't count our internal slices towards usage
@@ -944,7 +944,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
mi_assert_internal(huge_page!=NULL);
mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask));
mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
- *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld);
+ *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices);
mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
}
@@ -1011,7 +1011,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
size_t psize;
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
- _mi_os_reset(start, psize, tld->stats);
+ _mi_os_reset(start, psize);
}
// zero the page data, but not the segment fields and heap tag
@@ -1050,7 +1050,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
}
else {
// perform delayed purges
- mi_segment_try_purge(segment, false /* force? */, tld->stats);
+ mi_segment_try_purge(segment, false /* force? */);
}
}
@@ -1100,7 +1100,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
// Only abandoned segments in arena memory can be reclaimed without a free
// so if a segment is not from an arena we force purge here to be conservative.
const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge);
- mi_segment_try_purge(segment, force_purge, tld->stats);
+ mi_segment_try_purge(segment, force_purge);
// all pages in the segment are abandoned; add it to the abandoned list
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
@@ -1350,7 +1350,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
}
else {
// otherwise, push on the visited list so it gets not looked at too quickly again
- mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again
+ mi_segment_try_purge(segment, false /* true force? */); // force purge if needed as we may not visit soon again
_mi_arena_segment_mark_abandoned(segment);
}
}
@@ -1375,7 +1375,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
else {
// otherwise, purge if needed and push on the visited list
// note: forced purge can be expensive if many threads are destroyed/created as in mstress.
- mi_segment_try_purge(segment, force, tld->stats);
+ mi_segment_try_purge(segment, force);
_mi_arena_segment_mark_abandoned(segment);
}
}
@@ -1434,7 +1434,7 @@ static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* t
}
else {
// perform delayed purges
- mi_segment_try_purge(segment, false /* force? */, tld->stats);
+ mi_segment_try_purge(segment, false /* force? */);
}
}
@@ -1483,7 +1483,7 @@ void mi_collect_reduce(size_t target_size) mi_attr_noexcept {
Reclaim or allocate
----------------------------------------------------------- */
-static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld)
{
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
@@ -1503,7 +1503,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
return segment;
}
// 2. otherwise allocate a fresh segment
- return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL);
+ return mi_segment_alloc(0, 0, heap->arena_id, tld, NULL);
}
@@ -1511,7 +1511,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
Page allocation
----------------------------------------------------------- */
-static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld)
{
mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
@@ -1522,18 +1522,18 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
if (page==NULL) {
// no free page, allocate a new segment and try again
- if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) {
+ if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld) == NULL) {
// OOM or reclaimed a good page in the heap
return NULL;
}
else {
// otherwise try again
- return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld);
+ return mi_segments_page_alloc(heap, page_kind, required, block_size, tld);
}
}
mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
- mi_segment_try_purge(_mi_ptr_segment(page), false, tld->stats);
+ mi_segment_try_purge(_mi_ptr_segment(page), false);
return page;
}
@@ -1543,10 +1543,10 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
Huge page allocation
----------------------------------------------------------- */
-static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld)
{
mi_page_t* page = NULL;
- mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page);
+ mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,&page);
if (segment == NULL || page==NULL) return NULL;
mi_assert_internal(segment->used==1);
mi_assert_internal(mi_page_block_size(page) >= size);
@@ -1568,7 +1568,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
- _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
+ _mi_os_reset(decommit_start, decommit_size); // note: cannot use segment_decommit on huge segments
}
return page;
@@ -1615,7 +1615,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
if (csize > sizeof(mi_block_t)) {
csize = csize - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
- _mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
+ _mi_os_reset(p, csize); // note: cannot use segment_decommit on huge segments
}
}
}
@@ -1624,25 +1624,25 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
/* -----------------------------------------------------------
Page allocation and free
----------------------------------------------------------- */
-mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld) {
mi_page_t* page;
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
mi_assert_internal(_mi_is_power_of_two(page_alignment));
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
- page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
}
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
- page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld);
}
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
- page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld);
}
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
- page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld);
}
else {
- page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
+ page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld);
}
mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
diff --git a/test/main-override-static.c b/test/main-override-static.c
index ccaba543..4ead333a 100644
--- a/test/main-override-static.c
+++ b/test/main-override-static.c
@@ -316,11 +316,11 @@ static void test_large_pages(void) {
static inline uint8_t mi_bsr32(uint32_t x);
#if defined(_MSC_VER)
-#include
+//#include
#include
static inline uint8_t mi_bsr32(uint32_t x) {
uint32_t idx;
- _BitScanReverse((DWORD*)&idx, x);
+ _BitScanReverse(&idx, x);
return idx;
}
#elif defined(__GNUC__) || defined(__clang__)
@@ -344,7 +344,7 @@ static inline uint8_t mi_bsr32(uint32_t x) {
}
#endif
-/*
+
// Bit scan reverse: return the index of the highest bit.
uint8_t _mi_bsr(uintptr_t x) {
if (x == 0) return 0;
@@ -357,7 +357,7 @@ uint8_t _mi_bsr(uintptr_t x) {
# error "define bsr for non-32 or 64-bit platforms"
#endif
}
-*/
+
static inline size_t _mi_wsize_from_size(size_t size) {
@@ -434,11 +434,20 @@ static inline uint8_t _mi_bin4(size_t size) {
return bin;
}
-static size_t _mi_binx4(size_t bsize) {
- if (bsize==0) return 0;
- uint8_t b = mi_bsr32((uint32_t)bsize);
- if (b <= 1) return bsize;
- size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01);
+static size_t _mi_binx4(size_t wsize) {
+ size_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+ else if (wsize <= 8) {
+ // bin = (wsize+1)&~1; // round to double word sizes
+ bin = (uint8_t)wsize;
+ }
+ else {
+ uint8_t b = mi_bsr32((uint32_t)wsize);
+ if (b <= 1) return wsize;
+ bin = ((b << 1) | (wsize >> (b - 1))&0x01) + 3;
+ }
return bin;
}
@@ -450,22 +459,40 @@ static size_t _mi_binx8(size_t bsize) {
return bin;
}
+
+static inline size_t mi_bin(size_t wsize) {
+ uint8_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+ else if (wsize <= 8) {
+ // bin = (wsize+1)&~1; // round to double word sizes
+ bin = (uint8_t)wsize;
+ }
+ else {
+ wsize--;
+ // find the highest bit
+ uint8_t b = (uint8_t)mi_bsr32((uint32_t)wsize); // note: wsize != 0
+ // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
+ // - adjust with 3 because we use do not round the first 8 sizes
+ // which each get an exact bin
+ bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
+ }
+ return bin;
+}
+
+
static void mi_bins(void) {
//printf(" QNULL(1), /* 0 */ \\\n ");
size_t last_bin = 0;
- size_t min_bsize = 0;
- size_t last_bsize = 0;
- for (size_t bsize = 1; bsize < 2*1024; bsize++) {
- size_t size = bsize * 64 * 1024;
- size_t bin = _mi_binx8(bsize);
+ for (size_t wsize = 1; wsize <= (4*1024*1024) / 8 + 1024; wsize++) {
+ size_t bin = mi_bin(wsize);
if (bin != last_bin) {
- printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin);
- //printf("QNULL(%6zd), ", wsize);
- //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin);
+ //printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_wsize, last_wsize, last_bin);
+ printf("QNULL(%6zd), ", wsize-1);
+ if (last_bin%8 == 0) printf("/* %zu */ \\\n ", last_bin);
last_bin = bin;
- min_bsize = bsize;
}
- last_bsize = bsize;
}
}
#endif
diff --git a/test/main-override.cpp b/test/main-override.cpp
index fc9c3f22..15da6d4d 100644
--- a/test/main-override.cpp
+++ b/test/main-override.cpp
@@ -47,8 +47,8 @@ static void test_stl_allocators();
int main() {
- // mi_stats_reset(); // ignore earlier allocations
-
+ mi_stats_reset(); // ignore earlier allocations
+ various_tests();
test_mixed1();
//test_std_string();
//test_thread_local();
diff --git a/test/test-stress.c b/test/test-stress.c
index 0e8b45a2..b1eedba8 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -23,7 +23,7 @@ terms of the MIT license.
#include
// #define MI_GUARDED
-// #define USE_STD_MALLOC
+#define USE_STD_MALLOC
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
//
@@ -36,13 +36,13 @@ static int ITER = 400;
static int THREADS = 8;
static int SCALE = 25;
static int ITER = 20;
-#elif defined(xMI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
+#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits
static int THREADS = 8;
static int SCALE = 10;
static int ITER = 10;
#else
static int THREADS = 32; // more repeatable if THREADS <= #processors
-static int SCALE = 25; // scaling factor
+static int SCALE = 50; // scaling factor
static int ITER = 50; // N full iterations destructing and re-creating all threads
#endif
@@ -50,7 +50,7 @@ static int ITER = 50; // N full iterations destructing and re-creating a
#define STRESS // undefine for leak test
-static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100)
+static bool allow_large_objects = false; // allow very large objects? (set to `true` if SCALE>100)
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
static bool main_participates = false; // main thread participates as a worker too
@@ -326,6 +326,7 @@ int main(int argc, char** argv) {
#endif
mi_stats_print(NULL);
#endif
+ mi_stats_print(NULL);
//bench_end_program();
return 0;
}