diff --git a/.gitignore b/.gitignore
index 3639d324..f8b7f5eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ ide/vs20??/*.opendb
ide/vs20??/*.user
ide/vs20??/*.vcxproj.filters
ide/vs20??/.vs
+ide/vs20??/VTune*
out/
docs/
*.zip
diff --git a/ide/vs2022/mimalloc-override-test.vcxproj b/ide/vs2022/mimalloc-override-test.vcxproj
new file mode 100644
index 00000000..a3c56f7b
--- /dev/null
+++ b/ide/vs2022/mimalloc-override-test.vcxproj
@@ -0,0 +1,190 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {FEF7868F-750E-4C21-A04D-22707CC66879}
+ mimalloc-override-test
+ 10.0
+ mimalloc-override-test
+
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ MultiThreadedDebugDLL
+ Sync
+ Default
+ false
+
+
+ Console
+ kernel32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ MultiThreadedDebugDLL
+ Sync
+ Default
+ false
+
+
+ Console
+
+
+ kernel32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ MultiThreadedDLL
+
+
+ true
+ true
+ Console
+ kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ MultiThreadedDLL
+
+
+ true
+ true
+ Console
+
+
+ kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
+
+
+
+
+
+
+
+
+
+
+
+ {abb5eae7-b3e6-432e-b636-333449892ea7}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-override.vcxproj b/ide/vs2022/mimalloc-override.vcxproj
new file mode 100644
index 00000000..f10376c7
--- /dev/null
+++ b/ide/vs2022/mimalloc-override.vcxproj
@@ -0,0 +1,256 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}
+ mimalloc-override
+ 10.0
+ mimalloc-override
+
+
+
+ DynamicLibrary
+ true
+ v143
+
+
+ DynamicLibrary
+ false
+ v143
+
+
+ DynamicLibrary
+ true
+ v143
+
+
+ DynamicLibrary
+ false
+ v143
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .dll
+ mimalloc-override
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
+ MultiThreadedDebugDLL
+ false
+ Default
+
+
+ $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
+
+
+
+
+ Default
+ false
+
+
+ COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
+
+
+ Copy mimalloc-redirect32.dll to the output directory
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
+ MultiThreadedDebugDLL
+ false
+ Default
+
+
+ $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
+
+
+
+
+ Default
+ false
+
+
+ COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
+
+
+ copy mimalloc-redirect.dll to the output directory
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ MultiThreadedDLL
+ Default
+ false
+
+
+ true
+ true
+ $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
+
+
+ Default
+ false
+
+
+ COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
+
+
+ Copy mimalloc-redirect32.dll to the output directory
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ../../include
+ MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ MultiThreadedDLL
+ Default
+ false
+
+
+ true
+ true
+ $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
+
+
+ Default
+ false
+
+
+ COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
+
+
+ copy mimalloc-redirect.dll to the output directory
+
+
+
+
+
+
+
+
+
+
+
+
+
+ false
+ false
+ false
+ false
+
+
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-test-api.vcxproj b/ide/vs2022/mimalloc-test-api.vcxproj
new file mode 100644
index 00000000..6023c251
--- /dev/null
+++ b/ide/vs2022/mimalloc-test-api.vcxproj
@@ -0,0 +1,155 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {FFF7958F-750E-4C21-A04D-22707CC66878}
+ mimalloc-test-api
+ 10.0
+ mimalloc-test-api
+
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
+
+
+
+
+
+
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-test-stress.vcxproj b/ide/vs2022/mimalloc-test-stress.vcxproj
new file mode 100644
index 00000000..c7e820df
--- /dev/null
+++ b/ide/vs2022/mimalloc-test-stress.vcxproj
@@ -0,0 +1,159 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {FEF7958F-750E-4C21-A04D-22707CC66878}
+ mimalloc-test-stress
+ 10.0
+ mimalloc-test-stress
+
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+
+
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ %(PreprocessorDefinitions);NDEBUG
+
+
+ true
+ true
+ Console
+
+
+
+
+ false
+ false
+ false
+ false
+
+
+
+
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc-test.vcxproj b/ide/vs2022/mimalloc-test.vcxproj
new file mode 100644
index 00000000..506dd7d4
--- /dev/null
+++ b/ide/vs2022/mimalloc-test.vcxproj
@@ -0,0 +1,158 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {FEF7858F-750E-4C21-A04D-22707CC66878}
+ mimalloctest
+ 10.0
+ mimalloc-test
+
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+ Application
+ true
+ v143
+
+
+ Application
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ stdcpp17
+
+
+ Console
+
+
+
+
+ Level3
+ Disabled
+ true
+ true
+ ..\..\include
+ stdcpp17
+
+
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ stdcpp17
+
+
+ true
+ true
+ Console
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ true
+ ..\..\include
+ _MBCS;%(PreprocessorDefinitions);NDEBUG
+ stdcpp17
+
+
+ true
+ true
+ Console
+
+
+
+
+ {abb5eae7-b3e6-432e-b636-333449892ea6}
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ide/vs2022/mimalloc.sln b/ide/vs2022/mimalloc.sln
new file mode 100644
index 00000000..fcb938a4
--- /dev/null
+++ b/ide/vs2022/mimalloc.sln
@@ -0,0 +1,81 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.29709.97
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32
+ {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
+ {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A}
+ EndGlobalSection
+EndGlobal
diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj
new file mode 100644
index 00000000..0a45006c
--- /dev/null
+++ b/ide/vs2022/mimalloc.vcxproj
@@ -0,0 +1,254 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Release
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ 15.0
+ {ABB5EAE7-B3E6-432E-B636-333449892EA6}
+ mimalloc
+ 10.0
+ mimalloc
+
+
+
+ StaticLibrary
+ true
+ v143
+
+
+ StaticLibrary
+ false
+ v143
+ true
+
+
+ StaticLibrary
+ true
+ v143
+
+
+ StaticLibrary
+ false
+ v143
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
+ $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
+ .lib
+ mimalloc-static
+
+
+
+ Level4
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=3;%(PreprocessorDefinitions);
+ CompileAsCpp
+ false
+ Default
+
+
+
+
+
+
+
+
+
+
+ Level4
+ Disabled
+ true
+ true
+ ../../include
+ MI_DEBUG=3;%(PreprocessorDefinitions);
+ CompileAsCpp
+ false
+ stdcpp20
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Level4
+ MaxSpeed
+ true
+ true
+ ../../include
+ %(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ false
+ Default
+ CompileAsCpp
+ true
+ Default
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+ Level4
+ MaxSpeed
+ true
+ true
+ ../../include
+ %(PreprocessorDefinitions);NDEBUG
+ AssemblyAndSourceCode
+ $(IntDir)
+ false
+ false
+ Default
+ CompileAsCpp
+ true
+ stdcpp20
+
+
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ false
+ false
+ false
+ false
+
+
+ true
+ true
+ true
+ true
+
+
+ true
+ true
+ true
+ true
+
+
+
+
+
+ false
+
+
+
+
+
+ true
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h
index 3a78f355..b94bf2a3 100644
--- a/include/mimalloc-internal.h
+++ b/include/mimalloc-internal.h
@@ -121,7 +121,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t*
// "page.c"
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
@@ -153,12 +153,11 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
-void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
+void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
void _mi_show_block_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg);
#if MI_DEBUG>1
@@ -171,8 +170,11 @@ bool _mi_page_is_valid(mi_page_t* page);
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
-#define mi_unlikely(x) __builtin_expect(!!(x),false)
-#define mi_likely(x) __builtin_expect(!!(x),true)
+#define mi_unlikely(x) (__builtin_expect(!!(x),false))
+#define mi_likely(x) (__builtin_expect(!!(x),true))
+#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+#define mi_unlikely(x) (x) [[unlikely]]
+#define mi_likely(x) (x) [[likely]]
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
@@ -296,8 +298,8 @@ static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
*total = count * size;
- return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
- && size > 0 && (SIZE_MAX / size) < count);
+ // note: gcc/clang optimize this to directly check the overflow flag
+ return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
}
#endif
@@ -307,8 +309,10 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
*total = size;
return false;
}
- else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
+ else if mi_unlikely(mi_mul_overflow(count, size, total)) {
+ #if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
+ #endif
*total = SIZE_MAX;
return true;
}
@@ -379,7 +383,7 @@ extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate
static inline mi_heap_t* mi_get_default_heap(void) {
#if defined(MI_TLS_SLOT)
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
- if (mi_unlikely(heap == NULL)) {
+ if mi_unlikely(heap == NULL) {
#ifdef __GNUC__
__asm(""); // prevent conditional load of the address of _mi_heap_empty
#endif
@@ -492,8 +496,8 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
// Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
- mi_assert_internal(bsize > 0);
- if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
+ mi_assert_internal(bsize > 0);
+ if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
return bsize;
}
else {
@@ -656,11 +660,11 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
- return (mi_unlikely(p==null) ? NULL : p);
+ return (p==null ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
- uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
+ uintptr_t x = (uintptr_t)(p==NULL ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
@@ -687,7 +691,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
- if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
+ if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
_mi_show_block_trace_with_predecessor(page, block, "free block");
_mi_error_message(EFAULT, "corrupted free list entry of size %zu at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL;
@@ -787,12 +791,12 @@ size_t _mi_os_numa_node_count_get(void);
extern _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
- if (mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1)) return 0;
+ if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get(tld);
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
- if (mi_likely(count>0)) return count;
+ if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
@@ -1020,7 +1024,15 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
}
else {
- memcpy(dst, src, n); // todo: use noinline?
+ memcpy(dst, src, n);
+ }
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ if (_mi_cpu_has_fsrm) {
+ __stosb((unsigned char*)dst, 0, n);
+ }
+ else {
+ memset(dst, 0, n);
}
}
#else
@@ -1028,6 +1040,9 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
+static inline void _mi_memzero(void* dst, size_t n) {
+ memset(dst, 0, n);
+}
#endif
@@ -1045,12 +1060,23 @@ static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
_mi_memcpy(adst, asrc, n);
}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ _mi_memzero(adst, n);
+}
#else
// Default fallback on `_mi_memcpy`
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
_mi_memcpy(dst, src, n);
}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ _mi_memzero(dst, n);
+}
#endif
diff --git a/include/mimalloc.h b/include/mimalloc.h
index c752ac24..c776efeb 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -166,7 +166,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
// Note that `alignment` always follows `size` for consistency with unaligned
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
// -------------------------------------------------------------------------------------
-#define MI_ALIGNMENT_MAX (1024*1024UL) // maximum supported alignment is 1MiB
+#define MI_ALIGNMENT_MAX (16*1024*1024UL) // maximum supported alignment is 16MiB
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index fce0fd74..2c8d8f3a 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -49,19 +49,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
{
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0);
- if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see )
+ if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see )
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
- if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
+ if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
#endif
return NULL;
}
- if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see )
+ if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see )
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
@@ -71,18 +71,17 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
// try first if there happens to be a small block available with just the right alignment
- if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
+ if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
- if (mi_likely(page->free != NULL && is_aligned))
+ if mi_likely(page->free != NULL && is_aligned)
{
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc, size);
#endif
- void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc
+ void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
- if (zero) { _mi_block_zero_init(page, p, size); }
return p;
}
}
@@ -95,19 +94,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
// ------------------------------------------------------
-mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
}
-mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
#if !MI_PADDING
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
if (!_mi_is_power_of_two(alignment)) return NULL;
- if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX))
+ if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
#else
// with padding, we can only guarantee this for fixed alignments
- if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
- && size <= MI_SMALL_SIZE_MAX))
+ if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
+ && size <= MI_SMALL_SIZE_MAX)
#endif
{
// fast path for common alignment and size
@@ -122,45 +121,45 @@ mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size
// Aligned Allocation
// ------------------------------------------------------
-mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
}
-mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
}
-mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
}
-mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
}
-mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
-mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
}
-mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
-mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
}
-mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
}
-mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
}
@@ -207,55 +206,55 @@ static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsi
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
}
-void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
}
-void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
}
-void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
}
-void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
}
-void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
-void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}
-void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
-void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
-void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
-void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
-void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
}
-void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}
diff --git a/src/alloc-override-osx.c b/src/alloc-override-osx.c
index 41d0a386..ba2313a2 100644
--- a/src/alloc-override-osx.c
+++ b/src/alloc-override-osx.c
@@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = {
static inline malloc_zone_t* mi_get_default_zone(void)
{
static bool init;
- if (mi_unlikely(!init)) {
+ if mi_unlikely(!init) {
init = true;
malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see )
}
diff --git a/src/alloc-posix.c b/src/alloc-posix.c
index 176e7ec3..e1b4a286 100644
--- a/src/alloc-posix.c
+++ b/src/alloc-posix.c
@@ -83,7 +83,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcep
}
mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
- if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see
+ if mi_unlikely((size&(alignment-1)) != 0) { // C11 requires alignment>0 && integral multiple, see
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
#endif
@@ -109,7 +109,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att
}
void** op = (void**)p;
void* newp = mi_reallocarray(*op, count, size);
- if (mi_unlikely(newp == NULL)) return errno;
+ if mi_unlikely(newp == NULL) { return errno; }
*op = newp;
return 0;
}
diff --git a/src/alloc.c b/src/alloc.c
index 6b6c5945..4bf5bd53 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -25,11 +25,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
-extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
+extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
- if (mi_unlikely(block == NULL)) {
- return _mi_malloc_generic(heap, size);
+ if mi_unlikely(block == NULL) {
+ return _mi_malloc_generic(heap, size, zero);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
@@ -37,10 +37,17 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
+ // zero the block? note: we need to zero the full block size (issue #63)
+ if mi_unlikely(zero) {
+ mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
+ const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
+ _mi_memzero_aligned(block, zsize);
+ }
+
#if (MI_DEBUG>0)
- if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
+ if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size); }
#elif (MI_SECURE!=0)
- block->next = 0; // don't leak internal data
+ if (!zero) { block->next = 0; } // don't leak internal data
#endif
#if (MI_STAT>0)
@@ -76,9 +83,8 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
return block;
}
-// allocate a small block
-extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
- mi_assert(heap!=NULL);
+static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
+ mi_assert(heap != NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
void* p;
@@ -90,13 +96,13 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
#if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0)
// with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE, so we need an extra check
if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) {
- p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);
+ p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero);
}
else
#endif
{
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
- p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
+ p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
}
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
@@ -104,24 +110,29 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
- #endif
+#endif
return p;
}
-extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
+// allocate a small block
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(heap, size, false);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
}
// The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
- if (mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE)) {
+ if mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE) {
return mi_heap_malloc_small(heap, size);
}
else
{
mi_assert(heap!=NULL);
- mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
- void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
+ mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
+ void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@@ -133,51 +144,24 @@ extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size
}
}
-extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return _mi_heap_malloc_zero(heap, size, false);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
}
-
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
- // note: we need to initialize the whole usable block size to zero, not just the requested size,
- // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
- MI_UNUSED(size);
- mi_assert_internal(p != NULL);
- mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
- mi_assert_internal(_mi_ptr_page(p)==page);
- if (page->is_zero && size > sizeof(mi_block_t)) {
- // already zero initialized memory
- ((mi_block_t*)p)->next = 0; // clear the free list pointer
- mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
- }
- else {
- // otherwise memset
- memset(p, 0, mi_usable_size(p));
- }
-}
-
// zero initialized small block
-mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
- void* p = mi_malloc_small(size);
- if (p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
- }
- return p;
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(mi_get_default_heap(), size, true);
}
-void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
- void* p = mi_heap_malloc(heap,size);
- if (zero && p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
- }
- return p;
-}
-
-extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
}
-mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
return mi_heap_zalloc(mi_get_default_heap(),size);
}
@@ -449,7 +433,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
do {
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
}
@@ -460,7 +444,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
@@ -486,9 +470,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
- if (mi_likely(local)) {
+ if mi_likely(local) {
// owning thread can free a block directly
- if (mi_unlikely(mi_check_is_double_free(page, block))) return;
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
@@ -496,10 +480,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
- if (mi_unlikely(mi_page_all_free(page))) {
+ if mi_unlikely(mi_page_all_free(page)) {
_mi_page_retire(page);
}
- else if (mi_unlikely(mi_page_is_in_full(page))) {
+ else if mi_unlikely(mi_page_is_in_full(page)) {
_mi_page_unfull(page);
}
}
@@ -532,26 +516,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
#if (MI_DEBUG>0)
- if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
#endif
mi_segment_t* const segment = _mi_ptr_segment(p);
- if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
+ if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
#if (MI_DEBUG>0)
- if (mi_unlikely(!mi_is_in_heap_region(p))) {
+ if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
- if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
+ if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
- if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
+ if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
@@ -563,15 +547,15 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
void mi_free(void* p) mi_attr_noexcept
{
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
- if (mi_unlikely(segment == NULL)) return;
+ if mi_unlikely(segment == NULL) return;
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
- if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
+ if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)(p);
- if (mi_unlikely(mi_check_is_double_free(page, block))) return;
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
@@ -579,7 +563,7 @@ void mi_free(void* p) mi_attr_noexcept
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
- if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
+ if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
@@ -625,7 +609,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
const mi_page_t* const page = _mi_segment_page_of(segment, p);
- if (mi_likely(!mi_page_has_aligned(page))) {
+ if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
}
@@ -635,7 +619,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
}
}
-size_t mi_usable_size(const void* p) mi_attr_noexcept {
+mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
return _mi_usable_size(p, "mi_usable_size");
}
@@ -647,6 +631,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
#ifdef __cplusplus
void* _mi_externs[] = {
(void*)&_mi_page_malloc,
+ (void*)&_mi_heap_malloc_zero,
(void*)&mi_malloc,
(void*)&mi_malloc_small,
(void*)&mi_zalloc_small,
@@ -679,24 +664,24 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
mi_free(p);
}
-extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
-mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_calloc(mi_get_default_heap(),count,size);
}
// Uninitialized `calloc`
-extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
-mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_mallocn(mi_get_default_heap(),count,size);
}
@@ -715,37 +700,34 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
}
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
- const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL
- if (mi_unlikely(newsize <= size && newsize >= (size / 2))) {
+ // if p == NULL then behave as malloc.
+ // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
+ // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
+ const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
+ if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size?
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
- if (mi_likely(newp != NULL)) {
+ if mi_likely(newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
}
- if (mi_likely(p != NULL)) {
- const size_t copysize = (newsize > size ? size : newsize);
- if (mi_likely(((uintptr_t)p % MI_INTPTR_SIZE) == 0)) {
- _mi_memcpy_aligned(newp, p, copysize);
- }
- else {
- _mi_memcpy(newp, p, copysize);
- }
+ if mi_likely(p != NULL) {
+ _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free the original pointer if successful
}
}
return newp;
}
-void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
-void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
@@ -753,41 +735,41 @@ void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_a
// Reallocate but free `p` on errors
-void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
-void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
-void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
-void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
}
-void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
-void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
}
-void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
}
-void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
}
@@ -798,7 +780,7 @@ void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
// ------------------------------------------------------
// `strdup` using mi_malloc
-mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
@@ -806,12 +788,12 @@ mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_no
return t;
}
-mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
return mi_heap_strdup(mi_get_default_heap(), s);
}
// `strndup` using mi_malloc
-mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
if (s == NULL) return NULL;
const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found)
const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string
@@ -823,7 +805,7 @@ mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n)
return t;
}
-mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
return mi_heap_strndup(mi_get_default_heap(),s,n);
}
@@ -834,7 +816,7 @@ mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include
-mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@@ -880,7 +862,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
}
#endif
-mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
}
#endif
@@ -956,19 +938,19 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
return p;
}
-mi_decl_restrict void* mi_new(size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
+ if mi_unlikely(p == NULL) return mi_try_new(size,false);
return p;
}
-mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
+ if mi_unlikely(p == NULL) return mi_try_new(size, true);
return p;
}
-mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -977,7 +959,7 @@ mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
return p;
}
-mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -986,9 +968,9 @@ mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_
return p;
}
-mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
- if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
+ if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
@@ -997,7 +979,7 @@ mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
}
}
-void* mi_new_realloc(void* p, size_t newsize) {
+mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
void* q;
do {
q = mi_realloc(p, newsize);
@@ -1005,9 +987,9 @@ void* mi_new_realloc(void* p, size_t newsize) {
return q;
}
-void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
+mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
size_t total;
- if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
+ if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
diff --git a/src/arena.c b/src/arena.c
index 6b1e951f..516e9946 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -155,7 +155,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
const size_t bcount = mi_block_count_of_size(size);
- if (mi_likely(max_arena == 0)) return NULL;
+ if mi_likely(max_arena == 0) return NULL;
mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
// try numa affine allocation
diff --git a/src/bitmap.c b/src/bitmap.c
index af6de0a1..4e85d687 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -283,7 +283,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
MI_UNUSED_RELEASE(bitmap_fields);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
- if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) {
+ if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
*pre_mask = mi_bitmap_mask_(count, bitidx);
*mid_mask = 0;
*post_mask = 0;
diff --git a/src/heap.c b/src/heap.c
index 816d961a..cc2b646e 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -200,7 +200,7 @@ mi_heap_t* mi_heap_get_backing(void) {
return bheap;
}
-mi_heap_t* mi_heap_new(void) {
+mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
if (heap==NULL) return NULL;
@@ -421,7 +421,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
mi_segment_t* segment = _mi_ptr_segment(p);
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(valid);
- if (mi_unlikely(!valid)) return NULL;
+ if mi_unlikely(!valid) return NULL;
return mi_page_heap(_mi_segment_page_of(segment,p));
}
@@ -543,7 +543,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
xarea.area.reserved = page->reserved * bsize;
xarea.area.committed = page->capacity * bsize;
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
- xarea.area.used = page->used * bsize;
+ xarea.area.used = page->used; // number of blocks in use (#553)
xarea.area.block_size = ubsize;
xarea.area.full_block_size = bsize;
return fun(heap, &xarea, arg);
diff --git a/src/options.c b/src/options.c
index ea2e7709..63d629eb 100644
--- a/src/options.c
+++ b/src/options.c
@@ -120,7 +120,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) {
if (option < 0 || option >= _mi_option_last) return 0;
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
- if (mi_unlikely(desc->init == UNINIT)) {
+ if mi_unlikely(desc->init == UNINIT) {
mi_option_init(desc);
}
return desc->value;
diff --git a/src/os.c b/src/os.c
index 72959d81..f8e56ba6 100644
--- a/src/os.c
+++ b/src/os.c
@@ -122,7 +122,7 @@ size_t _mi_os_good_alloc_size(size_t size) {
else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
else align_size = 4*MI_MiB;
- if (mi_unlikely(size >= (SIZE_MAX - align_size))) return size; // possible overflow?
+ if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
return _mi_align_up(size, align_size);
}
diff --git a/src/page.c b/src/page.c
index fd6c5397..73a9725c 100644
--- a/src/page.c
+++ b/src/page.c
@@ -131,7 +131,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
tfreex = mi_tf_set_delayed(tfree, delay);
old_delay = mi_tf_delayed(tfree);
- if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) {
+ if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
}
@@ -199,7 +199,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
- if (mi_likely(page->free == NULL)) {
+ if mi_likely(page->free == NULL) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
@@ -403,7 +403,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
- if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
+ if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
@@ -812,8 +812,8 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
- if (mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
- if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see )
+ if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE)) {
+ if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see )
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
@@ -830,15 +830,15 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
// initialize if necessary
- if (mi_unlikely(!mi_heap_is_initialized(heap))) {
+ if mi_unlikely(!mi_heap_is_initialized(heap)) {
mi_thread_init(); // calls `_mi_heap_init` in turn
heap = mi_get_default_heap();
- if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; }
+ if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
}
mi_assert_internal(mi_heap_is_initialized(heap));
@@ -850,12 +850,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// find (or allocate) a page of the right size
mi_page_t* page = mi_find_page(heap, size);
- if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more
+ if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
mi_heap_collect(heap, true /* force */);
page = mi_find_page(heap, size);
}
- if (mi_unlikely(page == NULL)) { // out of memory
+ if mi_unlikely(page == NULL) { // out of memory
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
return NULL;
@@ -864,6 +864,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_block_size(page) >= size);
- // and try again, this time succeeding! (i.e. this should never recurse)
- return _mi_page_malloc(heap, page, size);
+ // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
+ if mi_unlikely(zero && page->xblock_size == 0) {
+ // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
+ void* p = _mi_page_malloc(heap, page, size, false);
+ mi_assert_internal(p != NULL);
+ _mi_memzero_aligned(p, mi_page_usable_block_size(page));
+ return p;
+ }
+ else {
+ return _mi_page_malloc(heap, page, size, zero);
+ }
}
diff --git a/src/segment-cache.c b/src/segment-cache.c
index aacdbc11..eac8f843 100644
--- a/src/segment-cache.c
+++ b/src/segment-cache.c
@@ -283,7 +283,7 @@ static mi_segment_t* _mi_segment_of(const void* p) {
size_t index = mi_segment_map_index_of(segment, &bitidx);
// fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge
const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
- if (mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0)) {
+ if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) {
return segment; // yes, allocated by us
}
if (index==MI_SEGMENT_MAP_WSIZE) return NULL;
@@ -324,7 +324,7 @@ static mi_segment_t* _mi_segment_of(const void* p) {
mi_assert_internal((void*)segment < p);
bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(cookie_ok);
- if (mi_unlikely(!cookie_ok)) return NULL;
+ if mi_unlikely(!cookie_ok) return NULL;
if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range
mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment));
return segment;
diff --git a/src/segment.c b/src/segment.c
index 800d4fc3..63dd5ac0 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -1149,8 +1149,8 @@ static mi_segment_t* mi_abandoned_pop(void) {
// Check efficiently if it is empty (or if the visited list needs to be moved)
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
segment = mi_tagged_segment_ptr(ts);
- if (mi_likely(segment == NULL)) {
- if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
+ if mi_likely(segment == NULL) {
+ if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL
return NULL;
}
}
diff --git a/test/test-api-fill.c b/test/test-api-fill.c
index 0e5a65dc..ef50acc2 100644
--- a/test/test-api-fill.c
+++ b/test/test-api-fill.c
@@ -27,39 +27,39 @@ int main(void) {
// ---------------------------------------------------
// Zeroing allocation
// ---------------------------------------------------
- CHECK_BODY("zeroinit-zalloc-small", {
+ CHECK_BODY("zeroinit-zalloc-small") {
size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size);
result = check_zero_init(p, zalloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-zalloc-large", {
+ };
+ CHECK_BODY("zeroinit-zalloc-large") {
size_t zalloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size);
result = check_zero_init(p, zalloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-zalloc_small", {
+ };
+ CHECK_BODY("zeroinit-zalloc_small") {
size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_zalloc_small(zalloc_size);
result = check_zero_init(p, zalloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-calloc-small", {
+ CHECK_BODY("zeroinit-calloc-small") {
size_t calloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1);
result = check_zero_init(p, calloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-calloc-large", {
+ };
+ CHECK_BODY("zeroinit-calloc-large") {
size_t calloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1);
result = check_zero_init(p, calloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-rezalloc-small", {
+ CHECK_BODY("zeroinit-rezalloc-small") {
size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size);
result = check_zero_init(p, zalloc_size);
@@ -67,8 +67,8 @@ int main(void) {
p = (uint8_t*)mi_rezalloc(p, zalloc_size);
result &= check_zero_init(p, zalloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-rezalloc-large", {
+ };
+ CHECK_BODY("zeroinit-rezalloc-large") {
size_t zalloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size);
result = check_zero_init(p, zalloc_size);
@@ -76,9 +76,9 @@ int main(void) {
p = (uint8_t*)mi_rezalloc(p, zalloc_size);
result &= check_zero_init(p, zalloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-recalloc-small", {
+ CHECK_BODY("zeroinit-recalloc-small") {
size_t calloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1);
result = check_zero_init(p, calloc_size);
@@ -86,8 +86,8 @@ int main(void) {
p = (uint8_t*)mi_recalloc(p, calloc_size, 1);
result &= check_zero_init(p, calloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-recalloc-large", {
+ };
+ CHECK_BODY("zeroinit-recalloc-large") {
size_t calloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1);
result = check_zero_init(p, calloc_size);
@@ -95,38 +95,38 @@ int main(void) {
p = (uint8_t*)mi_recalloc(p, calloc_size, 1);
result &= check_zero_init(p, calloc_size);
mi_free(p);
- });
+ };
// ---------------------------------------------------
// Zeroing in aligned API
// ---------------------------------------------------
- CHECK_BODY("zeroinit-zalloc_aligned-small", {
+ CHECK_BODY("zeroinit-zalloc_aligned-small") {
size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, zalloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-zalloc_aligned-large", {
+ };
+ CHECK_BODY("zeroinit-zalloc_aligned-large") {
size_t zalloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, zalloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-calloc_aligned-small", {
+ CHECK_BODY("zeroinit-calloc_aligned-small") {
size_t calloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, calloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-calloc_aligned-large", {
+ };
+ CHECK_BODY("zeroinit-calloc_aligned-large") {
size_t calloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, calloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-rezalloc_aligned-small", {
+ CHECK_BODY("zeroinit-rezalloc_aligned-small") {
size_t zalloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, zalloc_size);
@@ -134,8 +134,8 @@ int main(void) {
p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result &= check_zero_init(p, zalloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-rezalloc_aligned-large", {
+ };
+ CHECK_BODY("zeroinit-rezalloc_aligned-large") {
size_t zalloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, zalloc_size);
@@ -143,9 +143,9 @@ int main(void) {
p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2);
result &= check_zero_init(p, zalloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("zeroinit-recalloc_aligned-small", {
+ CHECK_BODY("zeroinit-recalloc_aligned-small") {
size_t calloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, calloc_size);
@@ -153,8 +153,8 @@ int main(void) {
p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result &= check_zero_init(p, calloc_size);
mi_free(p);
- });
- CHECK_BODY("zeroinit-recalloc_aligned-large", {
+ };
+ CHECK_BODY("zeroinit-recalloc_aligned-large") {
size_t calloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result = check_zero_init(p, calloc_size);
@@ -162,33 +162,33 @@ int main(void) {
p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2);
result &= check_zero_init(p, calloc_size);
mi_free(p);
- });
+ };
#if MI_DEBUG >= 2
// ---------------------------------------------------
// Debug filling
// ---------------------------------------------------
- CHECK_BODY("uninit-malloc-small", {
+ CHECK_BODY("uninit-malloc-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-malloc-large", {
+ };
+ CHECK_BODY("uninit-malloc-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-malloc_small", {
+ CHECK_BODY("uninit-malloc_small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc_small(malloc_size);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-realloc-small", {
+ CHECK_BODY("uninit-realloc-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
result = check_debug_fill_uninit(p, malloc_size);
@@ -196,8 +196,8 @@ int main(void) {
p = (uint8_t*)mi_realloc(p, malloc_size);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-realloc-large", {
+ };
+ CHECK_BODY("uninit-realloc-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
result = check_debug_fill_uninit(p, malloc_size);
@@ -205,22 +205,22 @@ int main(void) {
p = (uint8_t*)mi_realloc(p, malloc_size);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-mallocn-small", {
+ CHECK_BODY("uninit-mallocn-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-mallocn-large", {
+ };
+ CHECK_BODY("uninit-mallocn-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-reallocn-small", {
+ CHECK_BODY("uninit-reallocn-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1);
result = check_debug_fill_uninit(p, malloc_size);
@@ -228,8 +228,8 @@ int main(void) {
p = (uint8_t*)mi_reallocn(p, malloc_size, 1);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-reallocn-large", {
+ };
+ CHECK_BODY("uninit-reallocn-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1);
result = check_debug_fill_uninit(p, malloc_size);
@@ -237,22 +237,22 @@ int main(void) {
p = (uint8_t*)mi_reallocn(p, malloc_size, 1);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-malloc_aligned-small", {
+ CHECK_BODY("uninit-malloc_aligned-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-malloc_aligned-large", {
+ };
+ CHECK_BODY("uninit-malloc_aligned-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("uninit-realloc_aligned-small", {
+ CHECK_BODY("uninit-realloc_aligned-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_debug_fill_uninit(p, malloc_size);
@@ -260,8 +260,8 @@ int main(void) {
p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
- CHECK_BODY("uninit-realloc_aligned-large", {
+ };
+ CHECK_BODY("uninit-realloc_aligned-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2);
result = check_debug_fill_uninit(p, malloc_size);
@@ -269,23 +269,23 @@ int main(void) {
p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2);
result &= check_debug_fill_uninit(p, malloc_size);
mi_free(p);
- });
+ };
- CHECK_BODY("fill-freed-small", {
+ CHECK_BODY("fill-freed-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
mi_free(p);
// First sizeof(void*) bytes will contain housekeeping data, skip these
result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*));
- });
- CHECK_BODY("fill-freed-large", {
+ };
+ CHECK_BODY("fill-freed-large") {
size_t malloc_size = MI_SMALL_SIZE_MAX * 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);
mi_free(p);
// First sizeof(void*) bytes will contain housekeeping data, skip these
result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*));
- });
+ };
#endif
// ---------------------------------------------------
diff --git a/test/test-api.c b/test/test-api.c
index 0302464e..c56fb88d 100644
--- a/test/test-api.c
+++ b/test/test-api.c
@@ -56,75 +56,77 @@ int main(void) {
// Malloc
// ---------------------------------------------------
- CHECK_BODY("malloc-zero",{
- void* p = mi_malloc(0); mi_free(p);
- });
- CHECK_BODY("malloc-nomem1",{
+ CHECK_BODY("malloc-zero") {
+ void* p = mi_malloc(0);
+ result = (p != NULL);
+ mi_free(p);
+ };
+ CHECK_BODY("malloc-nomem1") {
result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL);
- });
- CHECK_BODY("malloc-null",{
+ };
+ CHECK_BODY("malloc-null") {
mi_free(NULL);
- });
- CHECK_BODY("calloc-overflow",{
+ };
+ CHECK_BODY("calloc-overflow") {
// use (size_t)&mi_calloc to get some number without triggering compiler warnings
result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL);
- });
- CHECK_BODY("calloc0",{
+ };
+ CHECK_BODY("calloc0") {
result = (mi_usable_size(mi_calloc(0,1000)) <= 16);
- });
- CHECK_BODY("malloc-large",{ // see PR #544.
+ };
+ CHECK_BODY("malloc-large") { // see PR #544.
void* p = mi_malloc(67108872);
mi_free(p);
- });
+ };
// ---------------------------------------------------
// Extended
// ---------------------------------------------------
- CHECK_BODY("posix_memalign1", {
+ CHECK_BODY("posix_memalign1") {
void* p = &p;
int err = mi_posix_memalign(&p, sizeof(void*), 32);
result = ((err==0 && (uintptr_t)p % sizeof(void*) == 0) || p==&p);
mi_free(p);
- });
- CHECK_BODY("posix_memalign_no_align", {
+ };
+ CHECK_BODY("posix_memalign_no_align") {
void* p = &p;
int err = mi_posix_memalign(&p, 3, 32);
result = (err==EINVAL && p==&p);
- });
- CHECK_BODY("posix_memalign_zero", {
+ };
+ CHECK_BODY("posix_memalign_zero") {
void* p = &p;
int err = mi_posix_memalign(&p, sizeof(void*), 0);
mi_free(p);
result = (err==0);
- });
- CHECK_BODY("posix_memalign_nopow2", {
+ };
+ CHECK_BODY("posix_memalign_nopow2") {
void* p = &p;
int err = mi_posix_memalign(&p, 3*sizeof(void*), 32);
result = (err==EINVAL && p==&p);
- });
- CHECK_BODY("posix_memalign_nomem", {
+ };
+ CHECK_BODY("posix_memalign_nomem") {
void* p = &p;
int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX);
result = (err==ENOMEM && p==&p);
- });
+ };
// ---------------------------------------------------
// Aligned API
// ---------------------------------------------------
- CHECK_BODY("malloc-aligned1", {
+ CHECK_BODY("malloc-aligned1") {
void* p = mi_malloc_aligned(32,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p);
- });
- CHECK_BODY("malloc-aligned2", {
+ };
+ CHECK_BODY("malloc-aligned2") {
void* p = mi_malloc_aligned(48,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p);
- });
- CHECK_BODY("malloc-aligned3", {
+ };
+ CHECK_BODY("malloc-aligned3") {
void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0);
void* p2 = mi_malloc_aligned(48,32); bool result2 = (p2 != NULL && (uintptr_t)(p2) % 32 == 0);
mi_free(p2);
mi_free(p1);
result = (result1&&result2);
- });
- CHECK_BODY("malloc-aligned4", {
+ };
+ CHECK_BODY("malloc-aligned4") {
void* p;
bool ok = true;
for (int i = 0; i < 8 && ok; i++) {
@@ -132,11 +134,11 @@ int main(void) {
ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p);
}
result = ok;
- });
- CHECK_BODY("malloc-aligned5", {
+ };
+ CHECK_BODY("malloc-aligned5") {
void* p = mi_malloc_aligned(4097,4096); size_t usable = mi_usable_size(p); result = usable >= 4097 && usable < 10000; mi_free(p);
- });
- CHECK_BODY("malloc-aligned6", {
+ };
+ CHECK_BODY("malloc-aligned6") {
bool ok = true;
for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) {
void* ps[8];
@@ -151,20 +153,20 @@ int main(void) {
}
}
result = ok;
- });
- CHECK_BODY("malloc-aligned7", {
+ };
+ CHECK_BODY("malloc-aligned7") {
void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX); mi_free(p);
- });
- CHECK_BODY("malloc-aligned8", {
+ };
+ CHECK_BODY("malloc-aligned8") {
void* p = mi_malloc_aligned(1024,2*MI_ALIGNMENT_MAX); mi_free(p);
- });
- CHECK_BODY("malloc-aligned-at1", {
+ };
+ CHECK_BODY("malloc-aligned-at1") {
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
- });
- CHECK_BODY("malloc-aligned-at2", {
+ };
+ CHECK_BODY("malloc-aligned-at2") {
void* p = mi_malloc_aligned_at(50,32,8); result = (p != NULL && ((uintptr_t)(p) + 8) % 32 == 0); mi_free(p);
- });
- CHECK_BODY("memalign1", {
+ };
+ CHECK_BODY("memalign1") {
void* p;
bool ok = true;
for (int i = 0; i < 8 && ok; i++) {
@@ -172,8 +174,36 @@ int main(void) {
ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p);
}
result = ok;
- });
+ };
+ // ---------------------------------------------------
+ // Reallocation
+ // ---------------------------------------------------
+ CHECK_BODY("realloc-null") {
+ void* p = mi_realloc(NULL,4);
+ result = (p != NULL);
+ mi_free(p);
+ };
+
+ CHECK_BODY("realloc-null-sizezero") {
+ void* p = mi_realloc(NULL,0); // "If ptr is NULL, the behavior is the same as calling malloc(new_size)."
+ result = (p != NULL);
+ mi_free(p);
+ };
+
+ CHECK_BODY("realloc-sizezero") {
+ void* p = mi_malloc(4);
+ void* q = mi_realloc(p, 0);
+ result = (q != NULL);
+ mi_free(q);
+ };
+
+ CHECK_BODY("reallocarray-null-sizezero") {
+ void* p = mi_reallocarray(NULL,0,16); // issue #574
+ result = (p != NULL && errno == 0);
+ mi_free(p);
+ };
+
// ---------------------------------------------------
// Heaps
// ---------------------------------------------------
@@ -185,11 +215,11 @@ int main(void) {
// ---------------------------------------------------
// various
// ---------------------------------------------------
- CHECK_BODY("realpath", {
+ CHECK_BODY("realpath") {
char* s = mi_realpath( ".", NULL );
// printf("realpath: %s\n",s);
mi_free(s);
- });
+ };
CHECK("stl_allocator1", test_stl_allocator1());
CHECK("stl_allocator2", test_stl_allocator2());
diff --git a/test/test-stress.c b/test/test-stress.c
index 15df0e3c..d5091f5c 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -267,7 +267,7 @@ int main(int argc, char** argv) {
//mi_debug_show_arenas();
#endif
mi_stats_print(NULL);
-#endif
+#endif
//bench_end_program();
return 0;
}
diff --git a/test/testhelper.h b/test/testhelper.h
index 46d63a00..44776b74 100644
--- a/test/testhelper.h
+++ b/test/testhelper.h
@@ -7,7 +7,9 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef TESTHELPER_H_
#define TESTHELPER_H_
+#include
#include
+#include
// ---------------------------------------------------------------------------
// Test macros: CHECK(name,predicate) and CHECK_BODY(name,body)
@@ -15,27 +17,25 @@ terms of the MIT license. A copy of the license can be found in the file
static int ok = 0;
static int failed = 0;
-#define CHECK_BODY(name,body) \
- do { \
- fprintf(stderr,"test: %s... ", name ); \
- bool result = true; \
- do { body } while(false); \
- if (!(result)) { \
- failed++; \
- fprintf(stderr, \
- "\n FAILED: %s:%d:\n %s\n", \
- __FILE__, \
- __LINE__, \
- #body); \
- /* exit(1); */ \
- } \
- else { \
- ok++; \
- fprintf(stderr,"ok.\n"); \
- } \
- } while (false)
+static bool check_result(bool result, const char* testname, const char* fname, long lineno) {
+ if (!(result)) {
+ failed++;
+ fprintf(stderr,"\n FAILED: %s: %s:%ld\n", testname, fname, lineno);
+ /* exit(1); */
+ }
+ else {
+ ok++;
+ fprintf(stderr, "ok.\n");
+ }
+ return true;
+}
-#define CHECK(name,expr) CHECK_BODY(name,{ result = (expr); })
+#define CHECK_BODY(name) \
+ fprintf(stderr,"test: %s... ", name ); \
+ errno = 0; \
+ for(bool done = false, result = true; !done; done = check_result(result,name,__FILE__,__LINE__))
+
+#define CHECK(name,expr) CHECK_BODY(name){ result = (expr); }
// Print summary of test. Return value can be directly use as a return value for main().
static inline int print_test_summary(void)