Compare commits

...

880 commits
v1.9.2 ... main

Author SHA1 Message Date
Daan
af21001f7a clarify to use as the PR branch 2025-04-02 12:17:45 -07:00
Daan
9f5a2969b8 clarify v3 tag 2025-04-02 12:08:26 -07:00
Daan
13cd3f7f05 Merge branch 'dev2' 2025-03-28 14:20:20 -07:00
Daan
94036de6fe Merge branch 'dev' into dev2 2025-03-28 14:19:07 -07:00
Daan
8a81fc73c8 update readme 2025-03-28 14:18:28 -07:00
Daan
401a043849 Merge branch 'dev' into dev2 2025-03-28 13:28:16 -07:00
Daan
f46c1d2624 update readme 2025-03-28 13:28:10 -07:00
Daan
b843fead22 update mimalloc-redirect to v1.3.3; fix issue #1049 2025-03-28 13:11:37 -07:00
Daan
1052c30f03 fix parenthesis in #if condition 2025-03-28 13:09:24 -07:00
Daan
eb992b1d19
Merge pull request #1050 from vfazio/vfazio-mi_yield_atomic-armeb
atomic: fix mi_atomic_yield for big-endian arm32
2025-03-28 13:07:25 -07:00
Vincent Fazio
23fbee7ec6 atomic: fix mi_atomic_yield for big-endian arm32
Previously, `mi_atomic_yield` would not be defined on ARM32 big-endian
architectures if they were pre-ARMv7.

Rework the #ifdef guard to be more readable and collapse the ARM guards
so both little and big endian are handled via the same mechanism.

Now, ARMv7+ will utilize `yield` while older targets will use `nop`
regardless of endianness.

Signed-off-by: Vincent Fazio <vfazio@gmail.com>
2025-03-28 08:04:07 -05:00
Daan
e1da869d9b Merge branch 'dev' into dev2 2025-03-25 16:06:14 -07:00
Daan
632eab958b fix for atomic_yield on arm 32-bit, issue #1046 2025-03-25 16:02:29 -07:00
Daan
881d5e3c06
Merge pull request #1047 from ognevny/patch-1
cmake: don't change properties of import lib on Windows/MinGW
2025-03-25 15:43:16 -07:00
Daan
246b3bc120
Merge pull request #1045 from SquallATF/mingw-fix
remove the `lib` prefix when enabling mimalloc-redirect for mingw
2025-03-25 15:42:53 -07:00
Maksim Bondarenkov
797ca19ba9
cmake: don't change properties of import lib on Windows/MinGW
CMake handles import lib for it automatically, and using `.dll.lib` extension is MSVC-specific hack
2025-03-24 08:35:15 +03:00
Peiyuan Song
34cc5c8fd9 remove the lib prefix when enabling mimalloc-redirect for mingw 2025-03-24 09:39:42 +08:00
Daan
9155fe98f1 Merge branch 'dev' into dev2 2025-03-21 20:07:29 -07:00
Daan
26b792d93b fix aligned malloc_requested statistic 2025-03-21 20:07:16 -07:00
Daan
ea2c7c6e56 merge from dev statistics update 2025-03-21 19:43:26 -07:00
Daan
a077311a5e improve tracking of malloc_requested count 2025-03-21 19:40:44 -07:00
Daan
d48bafe2bb print statistics nicer 2025-03-21 19:21:41 -07:00
Daan
02607f2b8d reduce test sizes for 32-bit 2025-03-21 17:22:36 -07:00
Daan
1463ead070 Merge branch 'dev' into dev2 2025-03-21 16:57:34 -07:00
Daan
6ed451c555 fix linux compile by including linux/prctl.h 2025-03-21 16:48:50 -07:00
Daan
01ee3568c1 name anonymous mmap address ranges for debugging on Linux (based on PR #1032 by @zhuker) 2025-03-21 16:19:54 -07:00
Daan
fdde679de1
Merge pull request #1040 from jbatez/dev
support MI_OPT_ARCH when using CMAKE_OSX_ARCHITECTURES with non-Apple Clang
2025-03-21 16:01:37 -07:00
Jo Bates
cf08c27d2b support MI_OPT_ARCH when using CMAKE_OSX_ARCHITECTURES with non-Apple Clang 2025-03-20 11:24:59 -07:00
Daan
891f9f4cf6 fix conflict marker (issue #1038) 2025-03-19 20:38:19 -07:00
Daan
e46877a5e1 merge from dev 2025-03-19 20:34:34 -07:00
Daan
660d749d77 do not default to MI_DEBUG=2 in release mode builds even when NDEBUG is not defined by defininig MI_BUILD_RELEASE (issue #1037) 2025-03-19 20:29:29 -07:00
Daan
7eafaa9685 fix visibility warning (issue #1031) 2025-03-19 20:22:02 -07:00
Daan
b2dcab58f7 fix assertion failure (issue #1031) 2025-03-19 20:20:36 -07:00
Daan
acba60a248 Merge branch 'dev' into dev2 2025-03-19 19:17:03 -07:00
Daan
afbc581f8d add Windows x86 to the build pipeline 2025-03-19 19:16:10 -07:00
Daan
1aa88e0d9a try to fix pipeline trigger 2025-03-19 19:11:38 -07:00
Daan
c49c8acfda Merge branch 'dev' into dev2 2025-03-19 19:06:28 -07:00
Daan
47bf3a5b1b potential fix for sporadic assertion failure on random returning 0 (issue #1039) 2025-03-19 19:06:21 -07:00
Daan
992ebd4eae Merge branch 'dev' into dev2 2025-03-19 18:50:59 -07:00
Daan
26fa8be427 improved accounting of committed bytes (issue #1035) 2025-03-19 18:50:53 -07:00
Daan
6e5ed1ea67 Merge branch 'dev' into dev2 2025-03-19 16:12:23 -07:00
Daan
9a35bca556 possible fix for wrong accounting of committed bytes (issue #1035) 2025-03-19 16:12:17 -07:00
Daan
3d6065f987 merge from dev 2025-03-10 14:21:50 -07:00
Daan Leijen
f11732acdf set default compilation to c++ mode on msvc 2025-03-10 12:39:09 -07:00
daanx
4aae566191 fix link error with msvc in C mode (issue #1030) 2025-03-10 12:17:46 -07:00
Daan
2b895f4e97 bump version to 2.2.3 for further development 2025-03-06 21:04:32 -08:00
Daan
2fc6b14bab bump version to 1.9.3 for further development 2025-03-06 21:03:51 -08:00
Daan
e112300059 Merge branch 'master' of https://github.com/microsoft/mimalloc 2025-03-06 20:59:26 -08:00
Daan
a62932135a merge from dev2: v2.2.2 2025-03-06 20:59:12 -08:00
Daan
f81bf1b31a Merge branch 'dev' into dev2 2025-03-06 20:56:33 -08:00
Daan
2697d55fa5 Merge branch 'dev' into dev2 2025-03-06 20:41:05 -08:00
Daan
26aade92cf merge from dev 2025-03-06 20:15:32 -08:00
Daan
bd4ee09dc6 Merge branch 'dev' into dev2 2025-03-06 19:52:19 -08:00
Daan
a3bbb32ab1 Merge branch 'dev' into dev2 2025-03-06 19:21:45 -08:00
Daan
ddff485628 Merge branch 'dev' into dev2 2025-03-06 19:11:32 -08:00
daanx
5bed67cda9 Merge branch 'dev' into dev2 2025-03-05 16:09:43 -08:00
Daan Leijen
3e69b5323a Merge branch 'dev' into dev2 2025-03-05 15:37:54 -08:00
Daan
2b9385911b Merge branch 'dev' into dev2 2025-03-05 09:59:45 -08:00
Daan
346a5cb3ee Merge branch 'dev' into dev2 2025-03-05 09:56:17 -08:00
Daan
a7a3d317ef Merge branch 'dev' into dev2 2025-03-02 17:41:30 -08:00
Daan Leijen
a24d71f374 fix compile warning 2025-03-02 17:10:24 -08:00
Daan Leijen
2d81b6fee9 Merge branch 'dev' into dev2 2025-03-02 17:06:35 -08:00
Daan Leijen
fcc2b561e9 merge new statistics from dev 2025-03-02 15:52:52 -08:00
Daan Leijen
cd6737a1ee merge from dev 2025-03-01 19:58:44 -08:00
Daan Leijen
7758bb1067 merge from dev 2025-03-01 16:58:57 -08:00
Daan Leijen
2e52ef2f69 Merge branch 'dev' into dev2 2025-03-01 16:29:45 -08:00
Daan
7a4d7b8d18 Merge branch 'dev' into dev2 2025-02-24 10:50:39 -08:00
Daan
b0c1765ab9 Merge branch 'dev' into dev2 2025-02-20 19:36:48 -08:00
Daan
26a0299537 Merge branch 'dev' into dev2 2025-02-13 11:12:40 -08:00
Daan
b7779c7770 merge from dev 2025-02-11 16:30:03 -08:00
Daan
482d8e1ae7 Merge branch 'dev' into dev2 2025-02-11 09:52:10 -08:00
Daan
0d7956c7c2 Merge branch 'dev' into dev2 2025-02-10 20:55:41 -08:00
Daan
5ce6f9f407 Merge branch 'dev' into dev2 2025-02-10 20:29:46 -08:00
Daan
34a66514c0 Merge branch 'dev' into dev2 2025-02-09 19:27:07 -08:00
Daan
8199a7f952 Merge branch 'dev' into dev2 2025-02-09 18:44:29 -08:00
Daan
5f43fe91e0 Merge branch 'dev' into dev2 2025-02-09 18:34:53 -08:00
Daan
06ced84829 Merge branch 'dev2' 2025-02-08 13:18:09 -08:00
Daan Leijen
f945d35d27 Merge branch 'dev' into dev2 2025-02-08 12:51:34 -08:00
Daan
65a89854d1 Merge branch 'dev' into dev2 2025-02-08 12:06:48 -08:00
Daan
402a560c31 Merge branch 'dev' into dev2 2025-01-16 13:06:52 -08:00
Daan
81af010484 erge branch 'dev2' of /Volumes/T9/dev/mimalloc into dev2 2025-01-16 13:06:31 -08:00
daanx
5e109f8b3f Merge branch 'dev' into dev2 2025-01-15 12:31:42 -08:00
daanx
39b7af9d5f merge from dev 2025-01-15 12:12:24 -08:00
Daan
67f733281f merge from dev 2025-01-13 16:59:56 -08:00
Daan
0ef19762fe bump vcpkg sha 2025-01-13 16:57:17 -08:00
Daan
e2db21e9ba remove INTERFACE_INCLUDE_DIRECTORIES 2025-01-13 16:55:56 -08:00
daanx
03e501bddb Merge branch 'dev' into dev2 2025-01-13 16:03:44 -08:00
daanx
2e597fbbf3 Merge branch 'dev' into dev2 2025-01-13 15:33:07 -08:00
daanx
65c3a5c015 Merge branch 'dev' into dev2 2025-01-13 15:03:38 -08:00
Daan
34b5d3c779 update vcpkg hash 2025-01-10 09:53:11 -08:00
Daan
191ea046e4 merge from dev 2025-01-10 09:50:30 -08:00
Daan Leijen
fab7397e6b merge from dev 2025-01-07 19:34:24 -08:00
Daan Leijen
24297c8dba Merge branch 'dev' into dev2 2025-01-07 19:20:10 -08:00
Daan Leijen
1f99d3d91b Merge branch 'dev' into dev2 2025-01-07 19:14:25 -08:00
daanx
248d8aad71 Merge branch 'dev' into dev2 2025-01-06 12:08:43 -08:00
Daan Leijen
c23fbaa16a bump version for further development 2025-01-05 15:46:11 -08:00
daanx
53d8b771aa Merge branch 'dev' into dev2 2025-01-05 11:42:23 -08:00
daanx
4ded84afdc Merge branch 'dev' into dev2 2025-01-05 11:12:34 -08:00
daanx
8d8f2ad190 Merge branch 'dev' into dev2 2025-01-04 17:45:22 -08:00
Daan Leijen
1d8348b411 Merge branch 'dev2' 2025-01-03 20:28:03 -08:00
Daan Leijen
a3070dc57f Merge branch 'dev' into dev2 2025-01-03 20:20:32 -08:00
Daan Leijen
adc4daa64e Merge branch 'dev' into dev2 2025-01-03 20:07:03 -08:00
Daan Leijen
6f90b55093 merge from dev 2025-01-03 18:45:30 -08:00
Daan Leijen
17dd7e1901 bump version to 2.1.9 for further development 2025-01-03 18:45:00 -08:00
Daan Leijen
c9b8b82bf6 merge from dev2 2025-01-03 18:22:26 -08:00
Daan Leijen
d984042ca6 Merge branch 'dev' into dev2 2025-01-03 18:18:37 -08:00
Daan Leijen
49c3dbec63 Merge branch 'dev' into dev2 2025-01-03 18:10:56 -08:00
Daan Leijen
7cac1e4a16 Merge branch 'dev' into dev-slice 2025-01-03 18:03:06 -08:00
Daan Leijen
fcdec6dec7 Merge branch 'dev' into dev-slice 2025-01-03 08:52:40 -08:00
Daan Leijen
3ad1461db0 Merge branch 'dev' into dev-slice 2025-01-02 17:21:41 -08:00
daanx
c761d2f933 Merge branch 'dev' into dev-slice 2025-01-02 15:02:51 -08:00
Daan Leijen
7ebdfac18b merge from dev 2024-12-31 14:20:00 -08:00
Daan
fbae6a98d5 Merge branch 'dev' into dev-slice 2024-12-26 11:16:47 -08:00
daanx
1f0ddbf59b Merge branch 'dev' into dev-slice 2024-12-26 10:33:34 -08:00
daanx
cb66bd7055 Merge branch 'dev' into dev-slice 2024-12-26 10:24:27 -08:00
daanx
7407057aca merge from dev 2024-12-26 10:10:45 -08:00
daanx
34cdf1a49f fix eager delayed setting 2024-12-25 13:56:38 -08:00
daanx
a2cb1d5d19 Merge branch 'dev' into dev-slice 2024-12-25 13:36:02 -08:00
daanx
a1cfe9667c Merge branch 'dev' into dev-slice 2024-12-25 13:31:08 -08:00
daanx
75459a1bd7 Merge branch 'dev' into dev-slice 2024-12-21 15:38:36 -08:00
daanx
2d01c22cd8 Merge branch 'dev' into dev-slice 2024-12-21 15:33:47 -08:00
Daan Leijen
3903f09b28 merge from dev 2024-12-20 13:57:24 -08:00
Daan Leijen
e22f19ed61 Merge branch 'dev' into dev-slice 2024-12-20 13:05:37 -08:00
Daan Leijen
9c5c628f99 merge from dev 2024-12-20 12:58:46 -08:00
Daan Leijen
41ccb766f6 Merge branch 'dev' into dev-slice 2024-12-20 12:55:50 -08:00
Daan Leijen
b6019897c1 Merge branch 'dev' into dev-slice 2024-12-19 11:50:42 -08:00
Daan Leijen
a188fe1a5e Merge branch 'dev' into dev-slice 2024-12-19 11:31:01 -08:00
daanx
27959c2403 Merge branch 'dev' into dev-slice 2024-12-18 14:45:51 -08:00
daanx
c9eafa8536 Merge branch 'dev' into dev-slice 2024-12-18 14:41:40 -08:00
daanx
6a930f542d Merge branch 'dev' into dev-slice 2024-12-17 17:54:00 -08:00
Daan Leijen
615043af7c Merge branch 'dev' into dev-slice 2024-12-17 00:35:47 -08:00
Daan Leijen
c57873ede7 merge from dev 2024-12-17 00:09:33 -08:00
Daan Leijen
806bf8ea7e merge from dev 2024-12-16 23:44:56 -08:00
Daan
577246d9ed Merge branch 'dev' into dev-slice 2024-11-25 19:29:49 -08:00
Daan
e333491952 Merge branch 'dev-slice-steal' into dev-slice 2024-11-25 19:29:26 -08:00
daanx
25f84f5fd1 merge from dev-steal 2024-11-25 19:17:25 -08:00
daanx
829ae9fdc3 merge from dev-steal 2024-11-25 16:59:15 -08:00
Daan
c58990d4eb fix syntax error (issue #963) 2024-11-22 13:55:10 -08:00
daanx
bb83080289 Merge branch 'dev-steal' into dev-slice-steal 2024-11-18 16:12:54 -08:00
daanx
5f585c83cd Merge branch 'dev-slice' into dev-slice-steal 2024-11-18 16:12:48 -08:00
daanx
9bb29b177a Merge branch 'dev' into dev-slice 2024-11-18 16:12:37 -08:00
daanx
ea3ac0750e merge from dev-steal 2024-11-18 15:08:17 -08:00
daanx
0fa99d41fc merge from dev-slice 2024-11-18 11:13:29 -08:00
Daan
54c5af5862 Merge branch 'dev' into dev-slice 2024-11-17 23:37:32 -08:00
daanx
54ee4e7632 merge from dev 2024-11-17 23:29:21 -08:00
Daan
d2b6455749 Merge branch 'dev' into dev-slice 2024-11-05 02:07:58 -08:00
Daan
826425d5ab fix merge error, issue #955 2024-11-02 06:24:28 -07:00
Daan
4f46cf7d5a ensure we dont reclaim a segment on a free if that would go above the target segment count 2024-10-29 22:40:58 -07:00
Daan
5cd09cec76 merge from dev 2024-10-29 22:24:24 -07:00
Daan
3f3aee918a Merge branch 'dev-slice' into dev-slice-steal 2024-10-29 20:09:12 -07:00
Daan
deba02e7be Merge branch 'dev' into dev-slice 2024-10-29 20:08:45 -07:00
Daan
eda16d7c91 remove wrong assertion 2024-10-29 20:07:35 -07:00
Daan
471bc768e3 Merge branch 'dev-slice' into dev-slice-steal 2024-10-27 22:20:19 -07:00
Daan
c0e1132674 merge from dev 2024-10-27 22:17:23 -07:00
Daan
06ee1d7949 Merge branch 'dev' into dev-slice 2024-10-27 21:11:04 -07:00
Daan
d3455ea29e Merge branch 'dev' into dev-slice 2024-10-27 18:17:38 -07:00
Daan
164bd8c06c Merge branch 'dev' into dev-slice 2024-10-21 05:10:21 -07:00
Daan
e58e41c8da merge from dev 2024-10-21 05:07:12 -07:00
Daan
81da26d7d3 make target test for stealing one less since we are about to reclaim_or_alloc a fresh segment 2024-10-11 10:52:35 -07:00
Daan
19ce2c6461 restore randomization when trying to reclaim abandoned segments 2024-10-11 10:44:43 -07:00
daanx
723869014f add ability to abandon segments after a threshold 2024-10-09 21:24:20 -07:00
Daan Leijen
4913c2c65b Merge branch 'dev-steal' into dev-slice-steal 2024-10-09 15:16:13 -07:00
daanx
428e3b9ee0 Merge branch 'dev-steal' into dev-slice-steal 2024-10-09 15:05:48 -07:00
daanx
933ac5c14c Merge branch 'dev-steal' into dev-slice-steal 2024-10-09 14:41:25 -07:00
daanx
03fbaedec5 Merge branch 'dev-steal' into dev-slice-steal 2024-10-09 14:35:49 -07:00
daanx
b3d502179a Merge branch 'dev-steal' into dev-slice-steal 2024-10-09 11:28:33 -07:00
Daan
ad02086d3b remove default MI_DEBUG_GUARDED 2024-08-21 17:07:01 -07:00
Daan
60206ec075 merge from dev 2024-08-21 17:01:46 -07:00
Daan
532141fda2 Merge branch 'dev' into dev-slice 2024-08-21 15:39:31 -07:00
Daan Leijen
3ec3aaf858 merge from dev 2024-08-21 11:33:11 -07:00
Daan Leijen
6a21db1017 Merge branch 'dev-guarded' into dev-slice-guarded 2024-08-21 11:31:10 -07:00
daanx
4ea9df3ee4 Merge branch 'dev-slice' into dev-slice-guarded 2024-08-20 13:13:10 -07:00
daanx
f163164d36 ensure start-offset in a segment respects minimal alignment 2024-08-20 13:12:51 -07:00
Daan Leijen
9731941c7b merge from dev-guarded 2024-08-20 13:11:54 -07:00
Daan Leijen
1ab67d44c8 merge from dev-guarded 2024-08-20 12:57:20 -07:00
Daan
01503df7f3 move declaration to avoid gcc warning, see issue #919 2024-08-12 13:51:39 -07:00
daanx
349b7dc2f4 merge from dev 2024-06-17 16:22:44 -07:00
daanx
3c5e480ce7 fix alignment test 2024-06-17 16:21:46 -07:00
daanx
3726cf94ba merge from dev 2024-06-17 16:18:03 -07:00
Daan
64f3afdda4 merge from dev 2024-06-03 21:31:36 -07:00
daanx
f79ea2461a merge from dev 2024-06-03 21:01:23 -07:00
daanx
614c4944e5 merge from dev 2024-06-03 18:27:33 -07:00
Daan Leijen
7c67114fd4 merge from dev 2024-06-03 18:14:44 -07:00
daanx
043df00a97 Merge branch 'dev' into dev-slice 2024-06-03 11:43:49 -07:00
Daan
3f2b6e6df9 merge from dev 2024-06-03 11:34:54 -07:00
Daan
52a4ff2cb9 Merge branch 'dev' into dev-slice 2024-06-03 09:26:08 -07:00
Daan
f77adf4a18 merge from dev (visit abandoned, upstream of python/cpython#114133) 2024-06-02 17:03:13 -07:00
Daan
556b81b2cc bump version to v2.1.8 for further development 2024-05-21 12:32:38 -07:00
Daan
2765ec9302 Merge branch 'dev-slice' 2024-05-21 12:27:13 -07:00
Daan
8c532c32c3 Merge branch 'dev' into dev-slice 2024-05-21 12:25:21 -07:00
Daan
b907d76439 Merge branch 'dev' into dev-slice 2024-05-21 12:15:00 -07:00
Daan
998401b6d7 Merge branch 'dev' into dev-slice 2024-05-21 11:58:10 -07:00
Daan
1462cc4e9a Merge branch 'dev' into dev-slice 2024-05-19 15:43:26 -07:00
Daan
65a0c82ac9 merge from dev (support for heap tag, upstream of python/cpython#113742 2024-05-19 08:10:35 -07:00
Daan
b822a44cfe merge from dev, upstream of python/cpython#113263 2024-05-18 17:47:07 -07:00
Daan
d4a7c0ffcc merge from dev 2024-05-18 16:57:35 -07:00
Daan
1daa4ea627 Merge branch 'dev' into dev-slice 2024-05-18 10:01:31 -07:00
Daan
21434a2a9e Merge branch 'dev' into dev-slice 2024-05-17 09:07:24 -07:00
Daan
69f81732f8 merge from dev, bump version to v2.1.7 2024-05-16 19:07:14 -07:00
Daan
d468745b6b Merge branch 'dev' into dev-slice 2024-05-16 16:09:35 -07:00
Daan
c27b7c4fae Merge branch 'dev' into dev-slice 2024-05-16 14:41:21 -07:00
Daan
4a26a4568e fix out-of-bounds write on span free in huge segments 2024-05-16 14:26:05 -07:00
Daan
44b65b19df remove pre_size parameter for slices 2024-05-16 13:30:33 -07:00
Daan
a1b284de0a Merge branch 'dev-slice' 2024-05-13 10:18:56 -07:00
Daan
6141843614 Merge branch 'dev' into dev-slice 2024-05-13 10:17:32 -07:00
Daan
d824b9db2b fix page collection where a freed segment could be accessed 2024-05-13 10:11:57 -07:00
Daan
7b2e9c6e77 bump version to v2.1.6 2024-05-13 09:17:04 -07:00
Daan
8b15203950 merge from dev-slice 2024-05-13 09:15:44 -07:00
Daan
16d6baddf4 Merge branch 'dev' into dev-slice 2024-05-13 09:14:05 -07:00
Daan
f94f101d22 Merge branch 'dev' into dev-slice 2024-05-13 09:02:51 -07:00
Daan
ee2167da55 Merge branch 'dev' into dev-slice 2024-05-11 07:39:18 -07:00
Daan
2b42f64b19 merge from dev 2024-05-11 07:23:43 -07:00
Daan
e8206e9f6f merge from dev improved aligned allocation 2024-05-11 07:08:48 -07:00
Daan
e17b99de77 merge from dev 2024-05-10 20:27:09 -07:00
Daan
5b3ccc3cf4 Merge branch 'dev' into dev-slice 2024-05-10 17:33:15 -07:00
Daan
6c5d6e1f72 fix max allocation size on 32-bit systems (issue #882) 2024-05-10 17:24:52 -07:00
Daan
3b4c61b4cf Merge branch 'dev' into dev-slice 2024-05-10 17:11:21 -07:00
Daan
bc5dca39ba Merge branch 'dev' into dev-slice 2024-04-26 23:40:45 -07:00
Daan
bbcfe0f6c7 Merge branch 'dev' into dev-slice 2024-04-24 19:48:16 -07:00
Daan
45de947be5 bump version to 2.1.5 for further development 2024-04-22 11:26:15 -07:00
Daan
229ec9cbdc merge from dev-slice v2.1.4 2024-04-22 11:10:56 -07:00
Daan
1b3cb0258f Merge branch 'dev' into dev-slice 2024-04-22 11:09:16 -07:00
Daan
6ba166f528 merge from dev 2024-04-22 11:05:01 -07:00
Daan
79aa5bbb7a Merge branch 'dev' into dev-slice 2024-04-20 19:59:19 -07:00
Daan
70782c3670 merge from dev 2024-04-20 16:47:48 -07:00
Daan
19626c5381 Merge branch 'dev' into dev-slice 2024-04-19 13:44:40 -07:00
Daan
25002c03df Merge branch 'dev' into dev-slice 2024-04-19 13:31:56 -07:00
Daan
83af243bca merge from dev 2024-04-19 12:50:32 -07:00
Daan
35658681e9 purge segments on forced collect (issue #878) 2024-04-19 12:29:49 -07:00
Daan
833121dbc0 Merge branch 'dev' into dev-slice 2024-04-19 12:07:06 -07:00
Daan
e688d5cbc8 merge from dev 2024-04-19 12:03:33 -07:00
Daan
01ba98b183 Merge branch 'dev' into dev-slice 2024-04-19 10:54:47 -07:00
Daan
a7c033caed avoid unused warning 2024-04-19 10:14:27 -07:00
Daan
da1d64f05a Merge branch 'dev' into dev-slice 2024-04-19 10:08:57 -07:00
Daan
78418b3d24 fix overflow of slice count, PR #877 2024-04-19 10:03:14 -07:00
Daan
f199b888b4 Merge branch 'dev' into dev-slice 2024-03-29 11:17:36 -07:00
Daan
bcf975c028 merge from dev 2024-03-25 17:00:06 -07:00
Daan
c6077376d4 merge from dev 2024-03-25 16:33:04 -07:00
Daan
415127ba31 Merge branch 'dev' into dev-slice 2024-03-25 16:28:30 -07:00
daanx
630521e8db merge from dev 2024-03-25 16:05:39 -07:00
Daan Leijen
c1d7d7f563 merge from dev 2024-03-25 15:32:56 -07:00
Daan Leijen
cc8d89a085 update comments 2024-03-25 07:35:49 -07:00
Daan Leijen
7c17c3d33e optimize page struct layout 2024-03-24 22:41:33 -07:00
Daan Leijen
af3f2f9168 fix assertions and warnings on gcc 2024-03-24 22:32:32 -07:00
Daan Leijen
3c85983a35 merge from dev with new page fields (block_size and is_huge) 2024-03-24 22:12:33 -07:00
Daan Leijen
e363f477a7 Merge branch 'dev' into dev-slice 2024-03-24 10:39:39 -07:00
Daan Leijen
6399dbdc30 merge from dev (new free.c) 2024-03-24 09:56:26 -07:00
Daan
2cca58dfc8 Merge branch 'dev' into dev-slice 2024-03-03 18:08:27 -08:00
daanx
abb8eab9b2 merge from dev 2024-03-03 17:43:52 -08:00
Daan
757f15fb7c merge from dev-slice 2024-03-03 14:17:27 -08:00
Daan
bcb8ce94f1 merge from dev-abandon 2024-03-03 14:17:07 -08:00
Daan
ac5d261e08 Merge branch 'dev' into dev-slice 2024-03-03 13:22:33 -08:00
Daan
c8f840741a Merge branch 'dev' into dev-slice 2024-03-03 13:19:35 -08:00
Daan
7986e92e9f merge from dev 2024-03-03 09:45:49 -08:00
Daan
e5283eda92 Merge branch 'dev-abandon' into dev-slice-abandon 2024-03-03 08:47:05 -08:00
Daan
61cc89a98d Merge branch 'dev-slice' into dev-slice-abandon 2024-03-03 08:46:58 -08:00
Daan
0a5cf933fc Merge branch 'dev' into dev-slice 2024-03-03 08:46:40 -08:00
daanx
5a2ed6d977 fix assertion 2024-03-02 18:27:06 -08:00
daanx
8e1f8f4d5c Merge branch 'dev-abandon' into dev-slice-abandon 2024-03-02 18:25:09 -08:00
daanx
656c81a096 Merge branch 'dev-abandon' into dev-slice-abandon 2024-03-02 17:36:26 -08:00
daanx
ccd2ac644d merge from dev-slice 2024-03-02 17:36:23 -08:00
Daan
7ff4607f6c merge from dev 2024-03-02 17:26:38 -08:00
daanx
4482237a33 Merge branch 'dev-abandon' into dev-slice-abandon 2024-03-01 18:59:39 -08:00
daanx
2c433a2b23 merge from dev-abandon 2024-03-01 18:54:29 -08:00
daanx
2845f0cab3 merge from dev-abandon 2024-03-01 16:26:46 -08:00
daanx
280123bd5c purge on page free as well 2024-03-01 15:25:57 -08:00
daanx
f6320bd3be merge from dev-abandon 2024-03-01 15:15:24 -08:00
daanx
e5d1cb3092 merge from dev-abandon 2024-03-01 10:56:43 -08:00
daanx
f57c9e16bd merge from dev-abandon 2024-02-29 18:34:50 -08:00
daanx
8fb51aae4d merge from dev-abandon 2024-02-29 15:51:37 -08:00
Daan Leijen
10efe291af Merge branch 'dev' into dev-slice 2023-06-30 12:21:19 -07:00
Daan Leijen
578d20f237 Merge branch 'dev' into dev-slice 2023-06-30 12:16:51 -07:00
microsoft-github-policy-service[bot]
4e50d6714d
Auto merge mandatory file pr
This pr is auto merged as it contains a mandatory file and is opened for more than 10 days.
2023-06-12 18:55:34 +00:00
microsoft-github-policy-service[bot]
5c90133021
Microsoft mandatory file 2023-06-02 17:40:26 +00:00
daanx
24668b9daf Merge branch 'dev' into dev-slice 2023-05-19 21:22:24 -07:00
daanx
c569b77b77 Merge branch 'dev' into dev-slice 2023-05-19 17:53:02 -07:00
daanx
2f8fb6aade Merge branch 'dev' into dev-slice 2023-05-19 09:12:59 -07:00
daanx
5e09f1b051 Merge branch 'dev' into dev-slice 2023-04-24 22:30:55 -07:00
daanx
2cbf68b5e7 Merge branch 'dev-slice' 2023-04-24 09:32:44 -07:00
daanx
43ce4bd7fd Merge branch 'dev' into dev-slice 2023-04-24 09:08:36 -07:00
daanx
56c0a8025a merge from dev 2023-04-24 09:06:56 -07:00
daanx
074adc14e5 Merge branch 'dev' into dev-slice 2023-04-23 20:53:39 -07:00
daanx
c93a0775cb Merge branch 'dev' into dev-slice 2023-04-23 20:32:32 -07:00
daanx
95c2059e89 fix asan mem tracking 2023-04-23 19:48:49 -07:00
daanx
c0bc8abe14 Merge branch 'dev' into dev-slice 2023-04-23 19:45:38 -07:00
daanx
8f768ac841 merge from dev 2023-04-23 19:33:25 -07:00
daanx
a6e7baec73 merge from dev 2023-04-23 16:08:27 -07:00
daanx
c0695fefd0 merge from dev 2023-04-23 13:14:01 -07:00
daanx
2d85abdece merge from dev 2023-04-23 13:06:50 -07:00
daanx
6303cd46cf Merge branch 'dev' into dev-slice 2023-04-22 21:53:00 -07:00
daanx
f22801bc15 Merge branch 'dev' into dev-slice 2023-04-22 21:32:54 -07:00
daanx
426d2b3643 Merge branch 'dev' into dev-slice 2023-04-22 17:26:59 -07:00
daanx
063f25ba11 merge from dev 2023-04-22 10:20:24 -07:00
daanx
6faff49998 merge from dev 2023-04-21 11:35:48 -07:00
daanx
672e3dde8f Merge branch 'dev' into dev-slice 2023-04-18 19:19:51 -07:00
daanx
eff7940f30 merge from dev 2023-04-18 17:54:07 -07:00
daanx
fb07276d48 merge from dev 2023-04-18 17:33:01 -07:00
daanx
3d7635b7b3 Merge branch 'dev' into dev-slice 2023-04-17 13:01:19 -07:00
daanx
015aac05a5 keep tsan enabled for dev-slice 2023-04-17 12:24:51 -07:00
daanx
61fc830014 Merge branch 'dev' into dev-slice 2023-04-17 12:24:19 -07:00
daanx
a200f013bf Merge branch 'dev' into dev-slice 2023-04-17 12:14:38 -07:00
daanx
15e6b6e634 Merge branch 'dev' into dev-slice 2023-04-17 11:36:25 -07:00
daanx
6d42f2ac39 increase pipeline timeout to 10min for tsan 2023-04-17 11:35:51 -07:00
daanx
a2954397d9 Merge branch 'dev' into dev-slice 2023-04-17 10:19:55 -07:00
daanx
c3200d1623 merge from dev 2023-04-17 10:18:46 -07:00
daanx
866d402d0f Merge branch 'dev-reset' into dev-slice-reset 2023-04-17 09:49:49 -07:00
daanx
7de3201767 Merge branch 'dev-slice' into dev-slice-reset 2023-04-17 09:49:43 -07:00
daanx
4dce9c0f00 merge from dev 2023-04-17 09:37:27 -07:00
daanx
66aa7a17ac further fix for atomic build error suggested by Davide Di Gennaro (issue #729, pr #724) 2023-04-17 09:33:13 -07:00
Daan
f890679316 Merge branch 'dev' into dev-slice 2023-04-17 08:58:18 -07:00
daanx
5693506cb3 merge from dev-reset 2023-04-16 19:43:57 -07:00
daanx
689147e089 merge from dev-reset 2023-04-16 16:43:50 -07:00
daanx
0be48b19a7 Merge branch 'dev-reset' into dev-slice-reset 2023-04-16 12:33:06 -07:00
daanx
becf379ecd merge from dev-reset 2023-04-16 12:32:20 -07:00
daanx
b0104ef4fd merge from dev 2023-04-16 11:01:25 -07:00
daanx
6f531a61ef Merge branch 'dev-reset' into dev-slice-reset 2023-04-15 17:59:26 -07:00
Daan
62708b9843 Merge branch 'dev' into dev-slice 2023-04-14 10:03:48 -07:00
daanx
e35e919ea4 remove segment-cache as it is superseded by better arena management 2023-04-13 15:37:54 -07:00
daanx
e6681f2d4b Merge branch 'dev-reset' into dev-slice-reset 2023-04-13 15:27:33 -07:00
daanx
991d04b2b1 merge from dev-reset 2023-04-13 13:37:56 -07:00
daanx
96b02dda1f fix accidental cmake move 2023-04-08 17:55:07 -07:00
daanx
269e0ea80b merge from dev-reset 2023-04-08 17:51:50 -07:00
daanx
fa621d5224 Merge branch 'dev-reset' into dev-slice-reset 2023-04-05 11:57:43 -07:00
daanx
2715191f58 merge from dev-reset 2023-04-05 11:22:36 -07:00
daanx
940e890dd0 Merge branch 'dev-reset' into dev-slice-reset 2023-04-04 19:18:58 -07:00
daanx
1e36f7efe9 Merge branch 'dev-reset' into dev-slice-reset 2023-04-04 19:03:29 -07:00
daanx
c0c81a1b7b merge from dev-reset 2023-04-04 18:47:55 -07:00
daanx
9f0da5c195 merge ide files 2023-04-04 16:48:02 -07:00
daanx
24034c997c merge from dev-reset 2023-04-04 16:44:07 -07:00
daanx
b6603c2ee0 merge from dev-reset 2023-04-04 13:02:06 -07:00
daanx
a836d233ff merge from dev-reset 2023-04-04 12:27:47 -07:00
daanx
09297ba8cf wip: purgeable arenas 2023-04-04 11:46:02 -07:00
daanx
33d7503fdb rename to arena_eager_commit 2023-04-03 19:57:26 -07:00
daanx
d22a13c990 wip: purgeable arenas, various fixes 2023-04-03 17:58:28 -07:00
daanx
a9f42376b7 small changes; make minimal commit most fine grained 2023-04-03 16:17:02 -07:00
daanx
f5ab38f87b wip: use purge throughout for segments and arenas; more agressive delays 2023-04-03 15:06:09 -07:00
daanx
94a867869e wip: purgeable arenas; fix asan warnings 2023-04-03 13:53:43 -07:00
daanx
fcec09832a Merge branch 'dev-reset' into dev-slice-reset 2023-04-03 12:48:36 -07:00
daanx
e96af1dba0 Merge branch 'dev-slice' into dev-slice-reset 2023-04-03 12:48:30 -07:00
daanx
5e4f7d332e bump version for further development 2023-04-03 12:45:30 -07:00
Daan Leijen
5ac9e36ed6 Merge branch 'dev-slice' 2023-04-03 12:39:35 -07:00
Daan Leijen
3e313478d9 merge from dev 2023-04-03 12:32:28 -07:00
Daan Leijen
80d7267dad Merge branch 'dev' into dev-slice 2023-04-01 11:35:35 -07:00
Daan Leijen
c344bf5c20 wip: work on purgable arenas 2023-03-31 21:18:50 -07:00
Daan Leijen
f4e006fa76 merge from dev-reset 2023-03-31 21:10:25 -07:00
Daan Leijen
92ab16d5eb Merge branch 'dev-reset' into dev-slice-reset 2023-03-31 10:44:19 -07:00
Daan Leijen
f8faa8f2a1 Merge branch 'dev-slice' into dev-slice-reset 2023-03-31 10:44:14 -07:00
Daan Leijen
b9e28eb23a merge from dev 2023-03-31 10:34:19 -07:00
Daan Leijen
14fdd9a102 Merge branch 'dev' into dev-slice 2023-03-31 10:25:49 -07:00
Daan Leijen
8c526622ff merge from dev-reset 2023-03-30 16:24:10 -07:00
Daan Leijen
9c544aba41 bump version for further development 2023-03-30 09:27:21 -07:00
Daan Leijen
acdd35290e Merge branch 'dev-slice' 2023-03-29 16:43:22 -07:00
Daan Leijen
ee7814dfe9 Merge branch 'dev' into dev-slice 2023-03-29 16:43:11 -07:00
Daan Leijen
70450b80d2 fix readme links 2023-03-29 16:40:50 -07:00
Daan Leijen
5021918c20 Merge branch 'dev' into dev-slice 2023-03-29 16:40:14 -07:00
Daan Leijen
c2e5031710 merge from dev-slice 2023-03-29 16:33:27 -07:00
Daan Leijen
8f0901b887 Merge branch 'dev' into dev-slice 2023-03-29 16:31:47 -07:00
Daan Leijen
c4220e43b6 merge from dev 2023-03-29 16:24:55 -07:00
Daan Leijen
72f4e0aedd Merge branch 'dev' into dev-slice 2023-03-29 13:00:59 -07:00
Daan
5a8f26359c Merge branch 'dev' into dev-slice 2023-03-29 12:40:03 -07:00
Daan Leijen
42c8015cbc merge from dev 2023-03-29 12:31:49 -07:00
Daan Leijen
cbd7f94a44 merge from dev 2023-03-29 11:48:46 -07:00
Daan Leijen
ec97e2ab37 Merge branch 'dev' into dev-slice 2023-03-28 16:48:59 -07:00
Daan Leijen
6dd3073a75 avoid caching segments in pinned arenas; happes with huge OS page reservations 2023-03-28 10:16:19 -07:00
Daan Leijen
391f8bbd72 merge from dev 2023-03-28 10:00:18 -07:00
Daan Leijen
90600188a8 remove superfluous prototypes 2023-03-28 09:58:31 -07:00
Daan Leijen
4c681cffe0 merge from dev 2023-03-28 09:27:06 -07:00
Daan Leijen
6a8b158a9f merge from dev 2023-03-28 09:15:01 -07:00
Daan Leijen
165b847051 improve segment_cache assertions 2023-03-23 16:11:38 -07:00
Daan Leijen
1cbc55f2b8 fix initialization of decommit mask for huge pages 2023-03-23 13:05:10 -07:00
Daan Leijen
b0e4309210 Merge branch 'dev' into dev-slice 2023-03-23 11:21:57 -07:00
Daan
564222e737 Merge branch 'dev' into dev-slice 2023-03-22 09:56:51 -07:00
Daan
0f07900601 Merge branch 'dev' into dev-slice 2023-03-22 09:49:45 -07:00
Daan
65660a83be merge from dev 2023-03-22 09:49:43 -07:00
Daan Leijen
be04391ab7 Merge branch 'dev' into dev-slice 2023-03-21 19:43:27 -07:00
Daan Leijen
bdf1021886 merge from dev 2023-03-20 14:31:01 -07:00
Daan Leijen
b0ba746307 merge from dev 2023-03-20 14:28:21 -07:00
Daan
0877c941de merge from dev 2023-03-20 14:20:48 -07:00
Daan Leijen
268dceaa12 Merge branch 'dev' into dev-slice 2023-03-20 13:55:47 -07:00
Daan Leijen
a582d760ed refine start offset in a page 2023-03-20 12:39:15 -07:00
Daan Leijen
c935521bf9 fix test and project 2023-03-20 12:32:41 -07:00
Daan Leijen
4bf63300b3 fix alignment issue #700 2023-03-20 12:29:36 -07:00
Daan Leijen
90f866c5bc fix warnings for issues #709 2023-03-20 11:45:34 -07:00
Daan Leijen
dc0dddcb7d Merge branch 'dev' into dev-slice 2023-03-20 11:43:41 -07:00
Daan Leijen
4a18fa3775 Merge branch 'dev-platform' into dev-slice-platform 2023-03-20 11:31:03 -07:00
Daan Leijen
d40a26a536 merge from dev 2023-03-20 11:22:11 -07:00
Daan Leijen
e0763f81bc Merge branch 'dev-platform' into dev-slice-platform 2023-03-20 11:11:13 -07:00
Daan Leijen
993c0a49b4 fix includes 2023-03-20 11:06:28 -07:00
Daan Leijen
3fc30c4a1e merge from dev, version bump to 2.1.0 2023-03-20 11:05:18 -07:00
Daan Leijen
1a5afd9976 Merge branch 'dev-platform' into dev-slice-platform 2023-03-20 10:23:14 -07:00
Daan Leijen
99c9f55511 simplify primitives API 2023-03-19 20:21:20 -07:00
Daan Leijen
8fbe7aae50 update process info primitive api 2023-03-19 19:11:43 -07:00
Daan Leijen
eca98ac056 Merge branch 'dev-platform' into dev-slice-platform 2023-03-16 20:17:01 -07:00
Daan Leijen
92358f850d Merge branch 'dev-platform' into dev-slice-platform 2023-03-16 20:14:07 -07:00
Daan Leijen
8a560908ea Merge branch 'dev-slice' into dev-slice-platform 2023-03-16 20:13:54 -07:00
Daan Leijen
33880f9a23 Merge branch 'dev' into dev-slice 2023-03-16 20:13:27 -07:00
Daan Leijen
1e8769ec95 Merge branch 'dev-platform' into dev-slice-platform 2023-03-16 20:11:59 -07:00
Daan Leijen
61ae0d1d5e merge from dev-platform 2023-03-15 20:42:59 -07:00
Daan Leijen
4da64ac904 Merge branch 'dev-slice' into dev-slice-platform 2023-03-15 20:33:52 -07:00
Daan Leijen
c80e2d5b03 Merge branch 'dev' into dev-slice 2023-03-15 20:33:16 -07:00
Daan Leijen
ea40b8fcda merge from dev-platform 2023-03-15 19:17:20 -07:00
Daan Leijen
348800600a Merge branch 'dev-platform' into dev-slice-platform 2023-03-15 19:07:48 -07:00
Daan Leijen
65bbe4014f merge from dev-platform 2023-03-15 15:10:12 -07:00
Daan Leijen
a90737a7fa fix valgrind tracking for zero initialized segments 2023-03-06 10:44:43 -08:00
Daan Leijen
8184e9de1f Merge branch 'dev' into dev-slice 2023-03-06 10:29:27 -08:00
Daan Leijen
7ec798e197 make test-stress match the one in dev 2023-03-05 22:54:10 -08:00
Daan Leijen
43533fa968 Merge branch 'dev' into dev-slice 2023-03-05 22:29:54 -08:00
Daan Leijen
d0eebedfbf merge from dev 2023-03-05 22:15:07 -08:00
Daan Leijen
6f31115c7f fix segment defined memory for valgrind 2023-03-05 22:11:42 -08:00
Daan
b3176ada74 merge from dev, fix commit size in asan tracking 2023-03-05 11:17:39 -08:00
Daan
f32b42e6cc Merge branch 'dev' into dev-slice 2023-03-04 16:03:22 -08:00
Daan
e4b9ea918f merge from dev 2023-03-04 14:52:30 -08:00
Daan Leijen
5fe4a3480f revert default max align commit back to 16 2023-02-20 12:21:06 -08:00
daan
8be4cee418 change max align size to 8 2023-02-20 12:15:26 -08:00
Daan Leijen
e7b941a136 Merge branch 'dev' into dev-slice 2023-02-07 11:08:10 -08:00
Daan Leijen
1a136c7e3d Merge branch 'dev' into dev-slice 2023-02-01 11:28:22 -08:00
Daan Leijen
efb7a159d5 Merge branch 'dev' into dev-slice 2023-02-01 11:23:37 -08:00
Daan Leijen
a6f092a6f5 Merge branch 'dev' into dev-slice 2023-01-31 21:08:55 -08:00
Daan Leijen
1e4b6b734e fix assertion that was too strict (issue #691) 2023-01-31 16:02:35 -08:00
Daan Leijen
dd7348066f Merge branch 'dev-slice' 2022-12-23 13:35:58 -08:00
Daan Leijen
7bb34e056c fix readme 2022-12-23 13:35:50 -08:00
Daan Leijen
df6e288519 merge from dev-slice v2.0.9 2022-12-23 13:34:21 -08:00
Daan Leijen
28cf67e5b6 bump version to 2.0.9 2022-12-23 13:31:56 -08:00
Daan Leijen
e87badaa1b Merge branch 'dev' into dev-slice 2022-12-23 13:05:05 -08:00
Daan Leijen
d1fff1119a reorganize span free code 2022-12-21 12:19:09 -08:00
Daan Leijen
a873ddc4fa merge from dev 2022-12-21 12:10:46 -08:00
Daan Leijen
0f796a56a9 fix bug where eager committed memory would be immediatedy decommitted; possible fix for issue #669 2022-12-20 18:59:55 -08:00
Daan Leijen
11ddba7a06 Merge branch 'dev' into dev-slice 2022-12-19 18:59:45 -08:00
Daan Leijen
9b2dd0d757 Merge branch 'dev' into dev-slice 2022-12-19 18:02:13 -08:00
Daan Leijen
d862c8a3eb Merge branch 'dev' into dev-slice 2022-12-19 17:53:51 -08:00
Daan Leijen
35997c0384 Merge branch 'dev' into dev-slice 2022-12-19 17:38:53 -08:00
Daan
9b558e2a07
Merge pull request #655 from rganesan/patch-1
Fix typo
2022-12-19 17:26:55 -08:00
Daan Leijen
92ffc25d79 merge from dev 2022-12-19 17:08:45 -08:00
Ganesan Rajagopal
aea0de4777
Fix typo 2022-12-03 16:27:33 +05:30
Daan Leijen
6304bbec6e Merge branch 'dev' into dev-slice 2022-11-28 11:12:52 -08:00
Daan Leijen
911ea81630 Merge branch 'dev' into dev-slice 2022-11-28 10:55:35 -08:00
Daan Leijen
90c8f0516c Merge branch 'dev' into dev-slice 2022-11-28 09:18:04 -08:00
Daan Leijen
447c2f18c5 Merge branch 'dev' into dev-slice 2022-11-27 13:00:30 -08:00
Daan Leijen
6988bbcca0 fix duplicate definition (issue #652 2022-11-27 12:03:16 -08:00
Daan
afb5468ded Merge branch 'dev' into dev-slice 2022-11-25 16:38:46 -08:00
Daan Leijen
55dac20805 Merge branch 'dev' into dev-slice 2022-11-25 14:28:06 -08:00
Daan Leijen
58d12723d6 make mi_collect(true) actually free the segment caches 2022-11-23 10:34:19 -08:00
Daan Leijen
c613c1de94 merge from dev 2022-11-23 10:10:55 -08:00
Daan Leijen
9e56567d23 fix decommit for huge objects 2022-11-23 09:50:29 -08:00
Daan Leijen
20880807ce remove comment 2022-11-22 22:05:18 -08:00
Daan Leijen
ed82aa90ea merge from dev where huge objects are now part of page queues again 2022-11-22 21:54:58 -08:00
Daan Leijen
85b5fa11bc merge segment_init refactoring from dev 2022-11-22 19:03:26 -08:00
Daan
d01ecc272b Merge branch 'dev' into dev-slice 2022-11-22 10:58:40 -08:00
Daan Leijen
83c027c4bf fix medium page size to 512k 2022-11-21 18:56:56 -08:00
Daan Leijen
c007747169 back to 64k pages but 32MiB segments and a 1MiB minimal commit size 2022-11-21 15:03:15 -08:00
Daan Leijen
3ccf849c1a more refined decommit extend delay 2022-11-21 15:02:41 -08:00
Daan Leijen
961778f0a7 Merge branch 'dev' into dev-slice 2022-11-21 10:25:36 -08:00
Daan Leijen
1a7f6f376d move threadid field 2022-11-21 10:22:50 -08:00
Daan Leijen
163afcce75 merge from dev with the destroy_on_exit option 2022-11-21 10:03:52 -08:00
Daan Leijen
7ebd1c6daf merge from dev 2022-11-18 10:22:45 -08:00
Daan Leijen
82a765a255 experiment with 32KiB slices and increased MI_MIN_EXTEND 2022-11-18 09:38:01 -08:00
Daan Leijen
b940543cd5 experiment with smaller segment size (32MiB) and finer minimal commit (1MiB) 2022-11-17 18:57:45 -08:00
Daan
c7d4a099d9
Merge pull request #641 from ofek/patch-1
Fix typo
2022-11-07 18:00:10 -08:00
daan
ba8c0f8903 avoid warning for large aligned blocks on linux 2022-11-07 17:21:03 -08:00
daan
67439bb4e5 add NULL check in _mi_segment_of 2022-11-07 17:12:14 -08:00
daan
89ba6cc2fa merge from dev 2022-11-07 16:48:00 -08:00
daan
a27637acb3 merge from dev 2022-11-07 14:58:17 -08:00
daan
29405c7d70 fix initializer 2022-11-07 14:53:27 -08:00
daan
c26c5da016 Merge branch 'dev' into dev-slice 2022-11-07 14:51:18 -08:00
daan
5d6b149ea9 bump version to v2.0.8 for further development 2022-11-07 11:37:22 -08:00
daan
c55cc260ab merge from dev-align 2022-11-07 11:29:03 -08:00
daan
96f1574faf fix huge page aligned allocation size in secure mode 2022-11-07 10:51:15 -08:00
daan
1632dd73c9 remove superfluous asserts 2022-11-06 21:03:23 -08:00
daan
651a99b35d refine last slice setting for large alignments 2022-11-06 20:57:27 -08:00
daan
562efed54d fix full SEGMENT_SIZE internal alignment by adding one more slice entry 2022-11-06 20:36:51 -08:00
daan
4814a649be merge from dev-align 2022-11-06 16:23:42 -08:00
Ofek Lev
18a4b90501
Fix typo 2022-11-05 16:29:18 -04:00
Daan
0e3d543a13
Update readme.md 2022-11-03 17:11:21 -07:00
daan
91ba1f374d merge from dev 2022-11-03 17:06:34 -07:00
daan
f859190cba update to v2.0.7 2022-11-03 17:05:38 -07:00
daan
e4630e7985 Merge branch 'dev' into dev-slice 2022-11-02 10:56:26 -07:00
daan
63397d857e Merge branch 'dev' into dev-slice 2022-11-02 10:25:59 -07:00
daan
6f8e115980 Merge branch 'dev' into dev-slice 2022-11-02 09:51:03 -07:00
daan
b3b479490e Merge branch 'dev' into dev-slice 2022-11-01 16:34:24 -07:00
daan
cb3b73ba36 merge from dev 2022-11-01 16:33:50 -07:00
daan
933713292c merge from dev 2022-11-01 16:27:50 -07:00
daan
9f36808a7f initial api for heaps restricted to a certain arena 2022-11-01 16:22:51 -07:00
daan
e961ef705e merge arena_id from dev 2022-11-01 14:16:49 -07:00
daan
43ce102425 Merge branch 'dev' into dev-slice 2022-10-31 16:17:17 -07:00
daan
923ef1ba74 Merge branch 'dev' into dev-slice 2022-10-31 15:49:48 -07:00
daan
0b1012aee0 Merge branch 'dev' into dev-slice 2022-10-31 15:30:54 -07:00
daan
3f122692eb Merge branch 'dev' into dev-slice 2022-10-31 11:01:10 -07:00
daan
4442fda895 Merge branch 'dev' into dev-slice 2022-10-31 10:35:05 -07:00
daan
cf2c2bac85 Merge branch 'dev' into dev-slice 2022-10-31 10:12:20 -07:00
daan
c128cf69be fix alignment_max for 32-bit systems (unfortunately, we need to include stdint.h now) 2022-10-30 19:47:54 -07:00
daan
24aac114e9 Merge branch 'dev' into dev-slice 2022-10-30 19:18:14 -07:00
daan
a3415079ec Merge branch 'dev' into dev-slice 2022-10-30 19:11:59 -07:00
daan
de21d04ba5 Merge branch 'dev' into dev-slice 2022-10-30 14:53:42 -07:00
daan
66525ccae3 merge from dev-track 2022-10-30 14:31:21 -07:00
Daan Leijen
3d6017de7c Merge branch 'dev' into dev-slice 2022-05-21 10:21:55 -07:00
Daan Leijen
cacb387a61 Merge branch 'dev' into dev-slice 2022-04-20 17:34:56 -07:00
Daan Leijen
83d84b8703 increase max alignment limit to 16MiB (issue #576) 2022-04-20 09:54:24 -07:00
Daan Leijen
c48c275a8f Merge branch 'dev' into dev-slice 2022-04-19 20:16:59 -07:00
Daan Leijen
9459513813 Merge branch 'dev' into dev-slice 2022-04-19 19:59:51 -07:00
Daan Leijen
a90b98a144 update to vs2022 2022-04-19 19:57:57 -07:00
Daan Leijen
eb5deccea8 Merge branch 'dev' into dev-slice 2022-04-19 19:57:00 -07:00
Daan Leijen
413141ae29 merge from dev 2022-04-19 19:55:03 -07:00
Daan Leijen
487b401b26 Merge branch 'dev' into dev-slice 2022-04-19 18:43:32 -07:00
Daan Leijen
a949c9321c update vs2022 solution 2022-04-19 11:17:53 -07:00
Daan Leijen
5c64f51503 Merge branch 'dev' into dev-slice 2022-04-19 11:07:41 -07:00
Daan Leijen
f2712f4a8f Merge branch 'dev' into dev-slice 2022-04-14 16:54:04 -07:00
Daan Leijen
f819dbb4e4 fix trailing comma 2022-04-14 16:12:02 -07:00
Daan Leijen
12a3a4c51a merge from dev 2022-04-14 16:11:29 -07:00
Daan Leijen
f9416ce71c merge from dev 2022-04-14 16:09:12 -07:00
Daan Leijen
b86bbbff00 merge from dev 2022-04-14 16:07:57 -07:00
Daan
dd929659ab fix wrong assertion 2022-04-14 11:28:40 -07:00
Daan Leijen
4b95e8ea1d Merge branch 'dev' into dev-slice 2022-04-10 13:02:38 -07:00
Daan Leijen
a3ced56b18 merge from dev 2022-04-09 16:22:10 -07:00
Daan Leijen
0a1d0bbcbf Merge branch 'dev' into dev-slice 2022-04-09 15:59:11 -07:00
Daan Leijen
7e492f4420 merge from dev 2022-04-09 15:07:07 -07:00
Daan Leijen
157c9b0966 Merge branch 'dev' into dev-slice 2022-04-09 14:08:36 -07:00
Daan Leijen
12c91999ac Merge branch 'dev' into dev-slice 2022-04-09 13:48:30 -07:00
Daan Leijen
774d12f12e merge from dev 2022-04-09 13:26:38 -07:00
Daan Leijen
ea0f5b8779 use new MI_ATOMIC_VAR_INIT 2022-04-08 14:52:15 -07:00
Daan Leijen
2d8f13fb93 Merge branch 'dev-slice' of https://github.com/microsoft/mimalloc into dev-slice 2022-04-08 14:46:33 -07:00
Daan
862f07bc76 Merge branch 'dev' into dev-slice 2022-04-08 14:44:35 -07:00
Daan
131b62283b Merge branch 'dev' into dev-slice 2022-04-08 14:10:08 -07:00
daan
984e946f76 Merge branch 'dev' into dev-slice 2022-04-07 20:26:43 -07:00
daan
196ceeac59 merge from dev 2022-04-07 20:18:52 -07:00
Daan Leijen
6431176f4e Merge branch 'dev' into dev-slice 2022-04-07 19:09:39 -07:00
Daan
2a4a3dfa23 Merge branch 'dev' into dev-slice 2022-04-07 16:12:30 -07:00
Daan
0075a81879 Merge branch 'dev' into dev-slice 2022-04-07 13:02:53 -07:00
Daan Leijen
88f9c94101 Merge branch 'dev' into dev-slice 2022-04-07 12:35:34 -07:00
Daan Leijen
0cda8b02d5 fix stats for large objects that were off by the block size padding 2022-04-07 11:08:54 -07:00
Daan Leijen
332346b685 remove unneeded MI_HUGE_OBJ_SIZE_MAX 2022-04-07 10:38:31 -07:00
Daan Leijen
1e4f0c58dc Merge branch 'dev' into dev-slice 2022-04-07 10:22:08 -07:00
Daan Leijen
8509ce2096 Merge branch 'dev' into dev-slice 2022-04-07 10:19:33 -07:00
Daan Leijen
9f6cbc50ee use heap_stat_decrease when possible 2022-04-07 09:48:08 -07:00
Daan Leijen
5a90a2a9a1 merge from dev 2022-04-04 17:40:29 -07:00
Daan Leijen
1f089e99f6 Merge branch 'dev' into dev-slice 2022-04-02 11:42:02 -07:00
Daan
18c1891708 Merge branch 'dev' into dev-slice 2022-02-22 16:46:06 -08:00
Daan
10da1af59b merge from dev 2022-02-14 16:48:30 -08:00
Daan
b89b4fd18a fix v2.0.5 version 2022-02-14 16:44:33 -08:00
Daan
19edc880da merge from dev 2022-02-14 16:36:03 -08:00
Daan
a1310047c4 Merge branch 'dev-slice' of https://github.com/microsoft/mimalloc into dev-slice 2022-02-14 16:16:30 -08:00
Daan
e91ee4c384 Merge branch 'dev' into dev-slice 2022-02-14 16:16:03 -08:00
daan
26695dc582 Merge branch 'dev' into dev-slice 2022-02-14 15:45:10 -08:00
daan
221f96ac2c Merge branch 'dev' into dev-slice 2022-02-10 11:59:28 -08:00
daan
96008c55d0 fix ubsan warning on huge allocations (issue #543) 2022-02-10 11:57:30 -08:00
daan
352d8be237 Merge branch 'dev' into dev-slice 2022-02-10 11:46:43 -08:00
daan
e87b1d2298 add extra huge allocation test 2022-02-10 11:08:13 -08:00
daan
f2b6938d64 fix start adjustment for the commit mask 2022-02-05 17:36:14 -08:00
daan
47f8caad4d improve commit chunk alignment 2022-02-05 17:23:28 -08:00
daan
8ec83f6945 increase min commit to 2 mib 2022-02-05 11:21:47 -08:00
daan
e11100a137 add minimal commit size for increased efficiency (decommit fine grained, commit coarse grained) 2022-02-05 10:57:15 -08:00
daan
9ca363d0e4 merge from dev 2022-02-04 16:13:12 -08:00
daan
0e2df71829 increase minimal commit size to 8*slice-size and add decommit_extend_delay as option 2022-02-04 16:11:38 -08:00
daan
fb418831df only delay eager commit after the first thread 2022-02-04 16:10:51 -08:00
Daan
0dd5a2e0a5 Merge branch 'dev' into dev-slice 2022-02-03 15:59:49 -08:00
Daan
0e1beb0018 check for decommit allowed before purging the segment cache 2022-02-03 15:51:27 -08:00
Daan
cbcee4dce4 merge from dev 2022-02-03 15:49:27 -08:00
daan
741d39a004 fix over aggressive decommit of abandoned pages 2022-02-03 14:26:56 -08:00
Daan
b365623b13 merge from dev 2022-02-02 19:21:15 -08:00
Daan
4e65b5018f clean up options 2022-02-02 19:01:41 -08:00
Daan
932f866105 decommit segment cache on force collect 2022-02-02 18:28:02 -08:00
Daan
ccfe005731 decommit in abandoned pages on mi_collect 2022-02-02 17:08:05 -08:00
Daan
bd2ac3c92e collect segment cache on mi_collect 2022-02-02 16:17:21 -08:00
Daan
05aa7648bb merge from dev 2022-02-02 16:17:06 -08:00
Daan
bfea3e2fc2 Merge branch 'dev' into dev-slice 2022-01-22 13:12:40 -08:00
Daan
3b93554ce6 merge from dev 2022-01-22 13:09:18 -08:00
Daan Leijen
1718fc811e merge from dev 2022-01-16 12:41:23 -08:00
Daan Leijen
44e7eb12d6 Merge branch 'dev' into dev-slice 2022-01-12 17:00:04 -08:00
Daan Leijen
df01e463b6 Merge branch 'dev' into dev-slice 2022-01-11 15:42:36 -08:00
Daan Leijen
e115a655dc Merge branch 'dev' into dev-slice 2022-01-10 16:57:23 -08:00
daan
a74c05c6c0 Merge branch 'dev' into dev-slice 2022-01-10 16:21:15 -08:00
Daan Leijen
a763b6310d merge from dev 2022-01-10 15:40:22 -08:00
daan
ae1c06d940 merge from dev 2022-01-10 15:29:49 -08:00
Daan
f317225a70 ignore reset_decommits option in the 2.x / dev-slice version 2022-01-10 12:10:18 -08:00
Daan
0842004b61 Merge branch 'dev' into dev-slice 2022-01-10 12:04:47 -08:00
Daan
9f9c77e6b6 Merge branch 'dev' into dev-slice 2022-01-10 11:41:12 -08:00
daan
3eac4a912c Merge branch 'dev' into dev-slice 2022-01-01 16:24:41 -08:00
Daan Leijen
c4b934c2ae Merge branch 'dev' into dev-slice 2021-12-20 12:34:13 -08:00
Daan
43ed851006 Merge branch 'dev' into dev-slice 2021-12-19 15:37:57 -08:00
daan
af854570cd Merge branch 'dev' into dev-slice 2021-12-18 16:36:58 -08:00
daan
72a33c37ef merge from dev 2021-12-18 11:34:02 -08:00
Daan Leijen
78e2e580f8 Merge branch 'dev' into dev-slice 2021-12-18 11:11:54 -08:00
daan
3d35147aba Merge branch 'dev' into dev-slice 2021-12-17 13:25:44 -08:00
daan
abbff9c030 merge from dev (MI_ALIGNED_MAX) 2021-12-17 13:23:24 -08:00
daan
e6400bcc27 Merge branch 'dev' into dev-slice 2021-12-16 15:36:03 -08:00
daan
7f7ae1a749 Merge branch 'dev' into dev-slice 2021-12-16 15:35:04 -08:00
daan
8d9336dfa6 Merge branch 'dev' into dev-slice 2021-12-16 15:11:58 -08:00
daan
bc79abb7d5 Merge branch 'dev-slice' of https://github.com/microsoft/mimalloc into dev-slice 2021-12-15 19:29:12 -08:00
daan
2af1db7f3a Merge branch 'dev' into dev-slice 2021-12-15 19:29:04 -08:00
Daan
f21841e926 Merge branch 'dev' into dev-slice 2021-12-15 16:05:20 -08:00
daan
60ca554413 Merge branch 'dev' into dev-slice 2021-12-15 08:47:00 -08:00
daan
f24a0b1019 merge from dev 2021-12-15 08:35:15 -08:00
Daan
d15f5fae64 merge from dev 2021-12-14 18:29:58 -08:00
Daan Leijen
775c10da3b Merge branch 'dev' into dev-slice 2021-12-09 16:18:43 -08:00
daan
67e8df6a5c Merge branch 'dev' into dev-slice 2021-11-24 12:55:07 -08:00
daan
5f6246b2cb merge from dev 2021-11-23 19:05:19 -08:00
daan
03526e5535 Merge branch 'dev' into dev-slice 2021-11-23 18:39:13 -08:00
daan
ef6ea7e718 merge from dev 2021-11-23 18:00:12 -08:00
daan
6efd78c5e0 remove O3 flag 2021-11-15 10:52:39 -08:00
daan
4a456ba054 Merge branch 'dev' into dev-slice 2021-11-15 10:52:17 -08:00
daan
9f1b25e07d Merge branch 'dev' into dev-slice 2021-11-15 10:10:58 -08:00
daan
f412df7a2b make segment size smaller on 32-bit 2021-11-14 16:52:10 -08:00
daan
5a1c3c8a4a Merge branch 'dev' into dev-slice 2021-11-14 16:48:04 -08:00
daan
7cd5b22ca7 Merge branch 'dev' into dev-slice 2021-11-14 16:41:32 -08:00
Daan
18fc788201 merge from dev 2021-11-14 15:39:05 -08:00
Daan
5a05fd446a fix compilation on macos 2021-11-14 14:38:24 -08:00
daan
e4f0a95a56 Merge branch 'dev-slice-cmask' into dev-slice 2021-11-14 14:35:46 -08:00
daan
c520901069 fix slice count comment 2021-11-14 12:10:07 -08:00
daan
70547b5f16 fix slice count 2021-11-14 12:09:20 -08:00
daan
32170897dd make decommit size equal to slice size 2021-11-14 11:45:28 -08:00
daan
c46a6f66c6 Merge branch 'dev-slice' into dev-slice-cmask 2021-11-14 11:26:47 -08:00
daan
f039774cf5 adjust decommit delay 2021-11-14 11:26:30 -08:00
daan
a4ea2205ba merge from dev 2021-11-14 11:25:51 -08:00
daan
511a8996f3 increase commit mask blocks to 2xslice size 2021-11-13 20:12:03 -08:00
daan
7e22e5ce6e Merge branch 'dev-slice' into dev-slice-cmask 2021-11-13 19:44:05 -08:00
daan
fa66db840d increase decommit hysterisis 2021-11-13 19:43:52 -08:00
daan
fb5645a30d increase decommit hysterisis 2021-11-13 19:41:41 -08:00
daan
7a3cf405d3 Merge branch 'dev-slice' into dev-slice-cmask 2021-11-13 17:12:42 -08:00
daan
cdfbd6d08f decommit when abandoned segments move to the visited list 2021-11-13 17:12:21 -08:00
daan
12bfd18ba7 fix commit mask for huge segments 2021-11-13 16:15:03 -08:00
daan
627892852c merge from dev-slice 2021-11-13 15:53:57 -08:00
daan
b72065f04b move commit mask functions to segment.c 2021-11-13 15:50:26 -08:00
daan
4f9d5f7dc6 merge from dev-slice 2021-11-13 15:33:03 -08:00
daan
f1ce9228a1 use size_t for bitmask 2021-11-13 15:29:57 -08:00
daan
88e6b52b88 fix types to size_t 2021-11-13 15:25:51 -08:00
daan
f9597ba7cb merge from dev-slice 2021-11-13 15:18:56 -08:00
daan
83ffd92b2b merge from dev 2021-11-13 15:16:23 -08:00
daan
721486c82b merge from dev 2021-11-13 14:52:11 -08:00
daan
0a86b45a91 Merge branch 'dev' into dev-slice 2021-11-13 14:13:12 -08:00
daan
9afc253726 add comments, renaming 2021-11-13 14:03:16 -08:00
daan
8bf16746e9 Merge branch 'dev-slice' into dev-slice-cmask 2021-11-13 13:31:00 -08:00
daan
97a1584bb5 Merge branch 'dev' into dev-slice 2021-11-13 13:30:17 -08:00
daan
5dc4ec48fe lower default reset delay 2021-11-12 21:15:11 -08:00
daan
53e2260ca0 merge 2021-11-12 20:14:03 -08:00
daan
a2b08664f7 merge from dev 2021-11-12 20:00:43 -08:00
daan
f58b4d923a comment 2021-11-12 19:58:49 -08:00
daan
9322123a97 start eager commit delay at N>2 2021-11-12 19:32:57 -08:00
daan
6ace2fe4e0 Merge branch 'dev-slice' into dev-slice-cmask 2021-11-12 19:04:35 -08:00
daan
5c08f75d69 merge from dev 2021-11-12 19:04:18 -08:00
daan
9e6ace6bcc Merge branch 'dev-slice' into dev-slice-cmask 2021-11-12 18:46:38 -08:00
daan
e5a3f3d7c4 merge from dev 2021-11-12 18:46:16 -08:00
daan
335d554438 merge from dev-slice 2021-11-12 18:38:14 -08:00
daan
c6b82a4b37 wip: change decommit expiration 2021-11-12 17:31:21 -08:00
daan
b1aff903f5 fix decommit bug 2021-11-11 17:45:41 -08:00
daan
998c2de633 merge from dev-slice 2021-11-10 16:49:43 -08:00
daan
ba6b4bf622 merge from dev 2021-11-10 16:33:42 -08:00
daan
49d64dbc95 save decommit_mask for segments in the segment cache 2021-11-10 16:30:21 -08:00
daan
8cc7d0c019 increase segment size to 64MiB 2021-11-10 16:29:53 -08:00
daan
49c75a3157 wip: increase commit mask resolution 2021-11-09 20:19:31 -08:00
Daan
865baa3bb1 Merge branch 'dev-slice' of https://github.com/microsoft/mimalloc into dev-slice 2021-11-06 14:19:32 -07:00
Daan
a4e7ff8608 Merge branch 'dev' into dev-slice 2021-11-06 14:19:26 -07:00
daan
c17878d1a7 Merge branch 'dev' into dev-slice 2021-11-04 19:10:31 -07:00
Daan
464cba833e Merge branch 'dev' into dev-slice 2021-11-04 18:55:34 -07:00
Daan
f3ffa663f1 merge from dev 2021-11-02 22:42:25 -07:00
Daan Leijen
9c3e6a25f6 Merge branch 'dev' into dev-slice 2021-10-27 19:06:42 -07:00
Daan
db223e4adb merge from dev 2021-10-27 18:09:16 -07:00
Daan
7756e1b5fe fix assertion 2021-10-27 10:45:19 -07:00
Daan
e477633779 fix assertion 2021-10-27 10:41:14 -07:00
Daan
1568dbb9e4 fix mi_is_valid_pointer bit index search (related to issue #478) 2021-10-27 10:35:16 -07:00
Daan
54b65a556c fix mi_cfree assertion failure for NULL pointer, issue #478 2021-10-27 10:15:12 -07:00
Daan
6d9e79a498 merge from dev 2021-10-27 10:11:51 -07:00
Daan
725fe2ac7d Merge branch 'dev' into dev-slice 2021-10-21 16:17:31 -07:00
Daan
de00de96fd merge with dev 2021-10-20 09:56:03 -07:00
Daan
b47d0802d1 Merge branch 'dev' into dev-slice 2021-10-20 09:36:08 -07:00
Daan Leijen
d4397ce16c merge from dev 2021-10-19 15:13:53 -07:00
Daan
3bf7b4313c add comment 2021-10-19 14:03:48 -07:00
Daan
2583ab73dc remove region.c which belongs in dev only 2021-10-19 13:57:36 -07:00
Daan
35b928b08f use MADV_DONTNEED instead of mmap fixedfor simplification and possibly better performance on Linux 2021-10-19 13:18:54 -07:00
Daan
aeb73b0cd4 merge from dev 2021-10-19 12:55:10 -07:00
Daan
f945dbb390 add space after _Atomic to prevent errors on msvc without /TP (see PR #452) 2021-10-19 10:18:44 -07:00
Daan
a4078df9d5 Merge branch 'dev' into dev-slice 2021-10-19 10:17:53 -07:00
Daan Leijen
8d2a21eb78 Merge branch 'dev' into dev-slice 2021-10-18 16:46:18 -07:00
Daan Leijen
54659aec9e merge from dev 2021-10-18 16:28:08 -07:00
Daan Leijen
e6b58052da add start offset to pages to reduce cache/page effects 2021-10-02 11:13:00 -07:00
Daan Leijen
262022c1d1 fix segment map for 32-bit systems (including wasm) 2021-10-01 15:10:11 -07:00
Daan Leijen
d7ac4478a8 Merge branch 'dev' into dev-slice 2021-10-01 15:05:41 -07:00
Daan Leijen
080cffe064 Merge branch 'dev' into dev-slice 2021-06-17 20:20:28 -07:00
Daan Leijen
b3b0fb5832 merge from dev 2021-06-17 20:05:40 -07:00
Daan Leijen
5869c85749 merge from dev 2021-06-17 19:18:57 -07:00
Daan Leijen
e592360d4d revert relative includes 2021-06-07 17:53:03 -07:00
Daan Leijen
6ba9387bf8 Merge branch 'dev' into dev-slice 2021-06-07 17:51:42 -07:00
Daan Leijen
d7eb0bab75 Merge branch 'dev' into dev-slice 2021-06-07 17:01:00 -07:00
Daan
8af2511e66
Merge pull request #412 from diorszeng/dev-slice
fix typo
2021-06-07 16:55:03 -07:00
Daan Leijen
9974b0ee23 Merge branch 'dev' into dev-slice 2021-06-07 16:51:14 -07:00
Daan Leijen
069b3276df merge from dev 2021-06-06 20:33:55 -07:00
Daan Leijen
7b595bd957 Merge branch 'dev' into dev-slice 2021-06-06 20:31:53 -07:00
diorszeng
f4e1563c4c
Merge pull request #1 from diorszeng/diorszeng-patch-1
Update mimalloc-types.h
2021-05-31 15:03:01 +08:00
diorszeng
0611058974
Update mimalloc-types.h
fix typo
2021-05-31 15:02:17 +08:00
Daan Leijen
54b2c3525c merge with dev 2021-05-21 15:36:30 -07:00
Daan Leijen
10ce8839fa merge from dev 2021-04-28 13:23:46 -07:00
Daan Leijen
34ba03951e merge from dev 2021-04-06 11:01:06 -07:00
Daan Leijen
c6f5092287 merge from dev 2021-04-06 11:00:28 -07:00
Daan Leijen
dc6bce256d bump version to v2.0.1 2021-04-06 10:58:12 -07:00
Daan Leijen
4e643b6d31 merge from dev 2021-02-24 15:53:26 -08:00
Daan Leijen
ad96d220f4 merge from dev 2021-02-24 15:17:35 -08:00
Daan Leijen
47050371a1 fix issue #363 and disable assertion for now 2021-02-22 15:05:47 -08:00
Daan Leijen
8f69e7095d Merge branch 'dev' into dev-slice 2021-02-22 14:28:22 -08:00
Daan Leijen
1b22da3c28 Merge branch 'dev' into dev-slice 2021-02-02 10:46:43 -08:00
Daan Leijen
ba84aa2783 Merge branch 'dev' into dev-slice 2021-02-01 15:47:37 -08:00
Daan Leijen
2762784364 Merge branch 'dev' into dev-slice 2021-01-31 14:12:51 -08:00
Daan Leijen
bd56782f26 bump version to 2.0.0 2021-01-31 14:02:06 -08:00
Daan Leijen
8bcc60edd9 Merge branch 'dev' into dev-slice 2021-01-31 13:57:35 -08:00
Daan Leijen
2aebb37fb0 merge from dev 2021-01-30 17:15:24 -08:00
Daan Leijen
36b7a3cb03 merge from dev 2021-01-30 16:37:38 -08:00
Daan Leijen
b93cba3b05 merge from dev 2021-01-29 16:53:52 -08:00
Daan Leijen
3bade4b1bd fix accounting of abandoned pages 2021-01-29 15:42:52 -08:00
Daan Leijen
542f577c81 Merge branch 'dev' into dev-slice 2021-01-29 15:23:36 -08:00
Daan Leijen
72559c5c49 merge from dev 2021-01-29 13:08:00 -08:00
Daan Leijen
f02643d9f2 Merge branch 'dev' into dev-slice 2021-01-29 12:33:52 -08:00
Daan Leijen
1e9a5c2d78 Merge branch 'dev' into dev-slice 2021-01-28 17:37:13 -08:00
Daan Leijen
e314699ee0 add debug view of arenas 2021-01-28 17:32:42 -08:00
Daan Leijen
217871cb45 fix search_idx start in managed arenas 2021-01-22 11:24:25 -08:00
Daan Leijen
da79629308 Merge branch 'dev' into dev-slice 2020-12-17 14:11:50 -08:00
Daan Leijen
3c70317393 merge from dev 2020-12-15 16:07:23 -08:00
Daan Leijen
b803095b83 merge from dev 2020-12-10 13:17:56 -08:00
unknown
ad05829195 remove shadow warning when building in static mode 2020-11-06 17:49:10 -08:00
daan
10aca1cfb9 merge from dev 2020-10-15 20:01:38 -07:00
daan
7e96634da4 merge from dev 2020-10-11 13:38:12 -07:00
daan
e1c38eef76 use allow_decommit option for both the segment cache and pages 2020-09-24 17:20:39 -07:00
daan
b149099bf3 use relaxed load for last search position in an arena 2020-09-24 16:55:00 -07:00
daan
2822e5c1f3 Merge branch 'dev' into dev-slice 2020-09-24 16:33:22 -07:00
daan
b59abce8ea Merge branch 'dev' into dev-slice 2020-09-24 10:16:54 -07:00
daan
680c9266bf Merge branch 'dev' into dev-slice 2020-09-24 09:29:43 -07:00
daan
165b64f553 Merge branch 'dev-exp' into dev-slice 2020-09-24 09:11:58 -07:00
daan
fbaa70e1eb increase default test load to 25% to increase azure pipeline test load 2020-09-14 11:01:17 -07:00
Daan Leijen
b1cc3d550c fix valid pointer detection on mac 2020-09-14 10:55:44 -07:00
daan
fba65c440c merge from dev-exp 2020-09-14 09:05:16 -07:00
daan
01307a25ff fix assertion 2020-09-11 11:00:19 -07:00
daan
1d946146cc fix all_committed 2020-09-11 10:40:22 -07:00
daan
fa01875eb2 merge from dev (with is_pinned/is_large separation) 2020-09-08 17:54:58 -07:00
daan
d87933a3b5 update comments 2020-09-08 15:50:37 -07:00
daan
037285ac09 refactor segment cache and map in a separate source file 2020-09-08 13:27:34 -07:00
daan
161f9a7751 refactor arena allocation 2020-09-08 11:12:44 -07:00
daan
97629cefaa tune performance options with longer reset delay 2020-09-08 11:12:23 -07:00
daan
a948724340 merge from dev (bitmap split) 2020-09-08 10:33:30 -07:00
daan
6b013d5f38 test for arena count early; skip test in bitmap_mask_ for perf 2020-09-07 22:55:36 -07:00
daan
371532ff02 merge from dev 2020-09-07 21:43:05 -07:00
daan
313008ecaa ensure page->retire_expire is always 1 2020-09-07 15:20:59 -07:00
daan
953bbde089 fix is_in_same_page check 2020-09-06 15:09:51 -07:00
daan
3826132240 use dynamic initial commit 2020-09-06 14:51:20 -07:00
daan
b7046934e5 Merge branch 'dev' into dev-slice 2020-09-06 13:53:30 -07:00
daan
45300ac43d merge from dev 2020-09-06 13:24:47 -07:00
daan
8c838a949f Merge branch 'dev' into dev-slice 2020-09-06 13:22:44 -07:00
daan
8e0d846b40 consistent commit order 2020-09-06 12:19:05 -07:00
daan
828613a694 use MADV_DONTNEED for commit/decommit on macOS 2020-09-06 12:06:56 -07:00
daan
5ae01fe4d9 experiment with commit strategy on macOS 2020-09-06 09:39:16 -07:00
daan
e2ae9f3125 fix pipeline script for macOS 2020-09-06 09:14:32 -07:00
daan
c821e5144a Merge branch 'dev' into dev-slice 2020-09-06 09:13:14 -07:00
daan
803e6f9e46 merge from dev 2020-09-06 09:09:55 -07:00
daan
e703bfc319 build windows pipeline in parallel 2020-09-06 09:02:15 -07:00
daan
a372847ccf verbose ctest on Linux pipeline 2020-09-06 08:57:56 -07:00
daan
4f7bc7d98e Merge branch 'dev' into dev-slice 2020-09-06 08:50:44 -07:00
daan
500a9208d5 Merge branch 'dev' into dev-slice 2020-09-05 22:55:52 -07:00
daan
f9ca7cd05a use proper file descriptor in mmap for decommit 2020-09-05 22:16:58 -07:00
daan
f7dc4847f2 keep commit_mask live in the cache for better reuse 2020-09-05 21:58:32 -07:00
daan
63a9f45ba6 add initial mi_commit_mask abstraction 2020-09-05 19:39:10 -07:00
daan
36da7e91c5 Merge branch 'dev' into dev-slice 2020-09-05 18:17:22 -07:00
daan
c1778acb93 Merge branch 'dev' into dev-slice 2020-09-05 15:03:54 -07:00
daan
8834fe02da again try to fix verbose ctest on mac pipeline 2020-09-05 12:31:28 -07:00
daan
7a08ca4dc6 again try to fix verbose ctest on mac pipeline 2020-09-05 12:30:13 -07:00
daan
5fe80671a2 again try to fix verbose ctest on mac pipeline 2020-09-05 12:26:47 -07:00
daan
0c5f03559d fix verbose ctest on mac pipeline 2020-09-05 12:22:52 -07:00
daan
a0370f347c more verbose ctest on mac pipeline 2020-09-05 12:20:21 -07:00
daan
85a8c138fc enable verbose ctest on mac pipeline 2020-09-05 12:18:09 -07:00
daan
3d708aa7e1 fix warning in g++ 2020-09-05 12:16:46 -07:00
daan
5f31f5c2b9 Merge branch 'dev' into dev-slice 2020-09-05 12:05:00 -07:00
daan
13bbb78907 add dev-slice to azure test pipeline 2020-09-05 11:48:23 -07:00
daan
a8539f6772 Merge branch 'dev' into dev-slice 2020-09-05 11:47:48 -07:00
daan
4df01218e2 fix msvc compilation with new atomics 2020-09-05 10:03:37 -07:00
daan
644e453709 Merge branch 'dev' into dev-slice 2020-09-05 09:37:38 -07:00
daan
dc858f6d29 fix c++ compilation with new atomics for dev-slice 2020-09-05 09:23:22 -07:00
daan
7c2b79bef0 Merge branch 'dev' into dev-slice 2020-09-05 09:17:59 -07:00
daan
97f56b1e08 merge from dev 2020-09-04 14:21:33 -07:00
daan
b22401deb3 layout 2020-09-03 20:31:11 -07:00
daan
f6109765d8 update whitespace and comments 2020-09-03 15:04:40 -07:00
Daan Leijen
7058e501cb use atomic ops for the expire field; passes TSAN now 2020-09-03 13:53:56 -07:00
daan
228b5f6e9d use atomic load for segment map 2020-09-03 12:19:04 -07:00
daan
03071dec0f merge from dev-atomic with new atomic interface 2020-09-03 12:13:09 -07:00
daan
c1a834e886 add checks for when memory commit fails to return NULL 2020-08-28 10:40:46 -07:00
daan
e4ddc75069 set delayed decommit mask more precisely to only decommit currently committed blocks 2020-08-28 08:46:51 -07:00
daan
2cffc3b851 merge from dev 2020-08-27 22:43:57 -07:00
daan
38c264ccdf merge from dev 2020-06-17 19:25:03 -07:00
daan
cb05ef9f2c merge from dev 2020-05-19 10:43:46 -07:00
daan
82e29f47b3 weaken assertion, #245 2020-05-18 18:51:06 -07:00
daan
53aa46890a merge from dev 2020-05-05 10:54:59 -07:00
daan
74ea69b784 increase default arena reset delay (behaves better on 36+ core systems) 2020-05-03 16:33:29 -07:00
daan
fd0891f224 merge from dev 2020-05-03 11:44:55 -07:00
daan
cce998a835 fix assertion for huge blocks 2020-05-03 11:42:49 -07:00
daan
30799bce73 fix assertion for huge segments 2020-05-03 11:42:38 -07:00
daan
28f4f1ce04 nice cache initialization 2020-05-03 10:45:46 -07:00
daan
f8dc2a3130 Merge branch 'dev' into dev-arena 2020-05-02 22:23:11 -07:00
daan
e5b72cdfe7 reduce segment size and increase cache 2020-05-02 22:22:35 -07:00
daan
ea92fb2fe4 lower arena reset delay 2020-05-02 21:40:14 -07:00
Daan
a4b7baf6fd
Update readme with descriptions of secure and debug mode 2020-05-02 18:08:31 -07:00
daan
69158f2c76 roll back again to new arena cache: previous perf regression was caused due to accidentally making secure mode default 2020-05-02 12:04:36 -07:00
daan
18d697a1e6 roll back to old arena cache as it seems to do better on AMD 2020-05-02 11:57:33 -07:00
daan
66e5484c1c fix assertions for huge pages in secure mode 2020-05-02 11:23:25 -07:00
daan
b8846f7a27 fix unprotect of guard pages 2020-05-02 10:51:10 -07:00
daan
37b43e4cea improved arena cache 2020-05-02 10:37:33 -07:00
daan
1b158d8e80 set max retire size to MAX_MEDIUM_OBJ_SIZE 2020-05-02 10:37:07 -07:00
daan
84e1f7c92e merge from dev 2020-05-02 00:23:22 -07:00
daan
dd18852946 reduce page retire cycles 2020-05-02 00:13:40 -07:00
daan
01ad553978 set default reset delay to 250ms 2020-05-02 00:13:03 -07:00
daan
79da2728c4 reduce cache 2020-05-02 00:12:45 -07:00
daan
8bfd5ec865 improve arena cache to avoid full scans 2020-05-01 23:00:17 -07:00
daan
dcb3574cf0 fix assertions for huge segment free 2020-05-01 21:14:41 -07:00
daan
dad3be3c64 update comments 2020-04-30 17:21:36 -07:00
daan
c609248f0e do delayed decommit if not reclaiming abandoned blocks 2020-04-30 13:30:19 -07:00
daan
0d25493c39 segment size to 16MiB to improve perf on mstress and rptest 2020-04-28 16:50:03 -07:00
daan
f86519bca6 make lazy commit default; add commit check on segment allocation 2020-04-28 16:46:00 -07:00
daan
1b0de9b4cf merge from dev 2020-04-28 16:22:38 -07:00
daan
1f396e64a0 merge from dev 2020-03-16 16:41:21 -07:00
daan
d221a4b904 merge from dev-exp 2020-01-27 23:36:53 -08:00
daan
54e206a0a1 increase retire page size 2020-01-27 22:41:24 -08:00
daan
09b98e0f7f merge from dev-exp; resolve conflicts 2020-01-27 22:14:10 -08:00
daan
b50bec463d merge from dev-exp; better abandoned reclamation 2020-01-27 22:12:23 -08:00
daan
a46d20a681 merge with new atomic macros 2020-01-22 20:53:44 -08:00
daan
e226ebcc97 Merge branch 'dev' into dev-arena 2020-01-22 20:39:33 -08:00
Daan Leijen
caa5e51a67 align size of page_t, increase slices per segment 2020-01-22 11:29:32 -08:00
daan
0028272cf4 small fixes, reduced segment size, fix merge conflicts 2020-01-20 22:33:29 -08:00
daan
394a7a92ab merge from dev 2020-01-20 19:06:08 -08:00
daan
88b141cf1f ensure proper padding for the page structure 2020-01-13 20:48:37 -08:00
daan
94bff89347 ensure page reset flag is always reset 2020-01-13 20:48:18 -08:00
daan
2808c9f4c8 default to non-eager commit 2020-01-13 18:01:52 -08:00
daan
4a27ea1643 merge from dev 2020-01-13 18:01:34 -08:00
daan
b5fbdb7180 merge from dev 2019-11-25 11:16:39 -08:00
daan
41af533a34 define commit unit in terms of segment size 2019-11-24 19:17:56 -08:00
daan
ec0005b919 more fine grained commit tracking per MiB 2019-11-24 19:09:15 -08:00
daan
128cdd1dfb merge from dev 2019-11-24 18:51:09 -08:00
daan
f45ec667a3 Merge branch 'dev' into dev-arena 2019-11-22 09:29:00 -08:00
daan
7da00c1220 wip: full decommit delay, for arena cache as well 2019-11-21 20:57:32 -08:00
daan
321e18777e wip: delayed decommit on segments 2019-11-21 19:53:43 -08:00
daan
1066be1594 merge from dev-exp 2019-11-21 17:03:30 -08:00
daan
aa61e6381d Merge branch 'dev-arena' of https://github.com/microsoft/mimalloc into dev-arena 2019-11-10 10:47:55 -08:00
Daan Leijen
b04206a9d3 add os cache to arena 2019-11-10 10:10:10 -08:00
Daan Leijen
268698b9ef fix vs2019 project 2019-11-10 08:00:51 -08:00
Daan Leijen
fed0068dac merge from dev-exp; bitmap based arena 2019-11-10 07:56:40 -08:00
daan
c3ef23e4f6 Merge branch 'dev-exp' into dev-arena 2019-11-04 09:40:25 -08:00
daan
62df2e2df9 merge from dev-exp 2019-11-04 08:56:42 -08:00
daan
2b005addd3 merge from dev-exp 2019-11-03 13:37:03 -08:00
daan
1a6d150687 merge from dev-exp 2019-11-03 12:21:22 -08:00
daan
5bdcda30b0 merge from dev-exp 2019-11-02 20:12:22 -07:00
daan
e0b8ec7f54 merge with dev-exp 2019-11-02 11:56:19 -07:00
daan
ae092e05a2 Merge branch 'dev-exp' into dev-arena 2019-11-02 10:39:27 -07:00
daan
b0182b2376 Merge branch 'dev-exp' into dev-arena 2019-11-02 10:30:33 -07:00
daan
08c4726043 merge from dev-exp 2019-11-01 22:04:20 -07:00
daan
6916e6590f Merge branch 'dev-exp' into dev-arena 2019-11-01 20:30:32 -07:00
daan
4be5b14869 merge from dev-exp 2019-11-01 20:19:32 -07:00
daan
6b26f0cb17 merge from dev-exp (numa support) 2019-11-01 20:08:56 -07:00
daan
eed42445e8 merge from dev-exp 2019-10-31 20:40:02 -07:00
daan
a74e072a9a set test-stress scale to 20 again 2019-10-31 19:00:26 -07:00
daan
62984c0a24 merge from dev-exp 2019-10-31 18:44:48 -07:00
daan
bbca1cd8d9 allow decommit by default 2019-10-31 12:42:23 -07:00
daan
6695f8ae91 add allow_decommit option 2019-10-31 10:59:50 -07:00
daan
ed4f60fc7e respect large pages for arena allocation 2019-10-31 10:59:40 -07:00
daan
28cb19148c fixed memory arena allocation for huge pages 2019-10-31 09:10:58 -07:00
daan
f7d2c45af3 initial experiment with fixed memory arena and sliced segments 2019-10-31 00:40:41 -07:00
daan
c7ec30ae25 fix secure mode 2019-10-30 15:36:13 -07:00
daan
93ae3e26b1 Merge branch 'dev' into dev-win-exp 2019-10-30 15:22:56 -07:00
daan
b73beede34 merge from dev 2019-10-30 15:19:34 -07:00
daan
9d4f57abf3 merge from dev-win 2019-10-28 12:33:01 -07:00
daan
4b15e2ed97 merge from dev 2019-10-17 18:24:35 -07:00
daan
25dca38ef9 merge from dev-win 2019-08-26 12:47:16 -07:00
daan
b0e38d5697 merge from dev-win 2019-08-25 13:12:57 -07:00
daan
80a36f1d7c reduce page retire words to 32 2019-08-24 17:02:32 -07:00
daan
19f473e49a merge from dev; free huge objects directly and keep them abandoned 2019-08-24 16:16:09 -07:00
daan
6f5492cef8 enable initial lazy commit and optional decommit to reduce commit charge with many threads 2019-08-24 15:00:55 -07:00
daan
612b2cc9b7 clean up segment slice handling 2019-08-24 12:20:32 -07:00
daan
cce38bc147 more conservative setting to avoid internal fragmentation 2019-08-24 07:32:23 -07:00
daan
082f012a91 merge from dev-win 2019-08-23 21:56:28 -07:00
daan
3e01eac105 Merge branch 'dev-win' into dev-win-exp 2019-08-21 14:38:58 -07:00
daan
5c912f16d4 merge from remote 2019-08-21 11:35:09 -07:00
daan
a3c4b1c95b merge from dev-win 2019-08-21 11:18:05 -07:00
daan
cd52d0a6d9 merge dev-win 2019-08-20 17:31:46 -07:00
Daan Leijen
fb12f298ca merge from dev-win, fix small object size check 2019-08-16 19:14:08 -07:00
Daan Leijen
91497e8d2d whitespace and warning fix 2019-08-16 17:49:49 -07:00
daan
a0b4ac2f66 new segment allocation; good results with Qas service 2019-08-15 23:19:52 -07:00
daan
f2ba95bc64 first working version of new segment allocation 2019-08-15 22:00:42 -07:00
daan
6ee248b012 wip: fixing bugs in new segment allocation 2019-08-15 14:40:15 -07:00
daan
f2bafbc57f wip: new segment allocation 2019-08-15 11:49:56 -07:00
daan
bbd81bbbd1 wip: new segment allocation with flexible large objects 2019-08-15 00:46:45 -07:00
40 changed files with 1890 additions and 1113 deletions

View file

@ -434,7 +434,7 @@ endif()
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku")
if(MI_OPT_ARCH)
if(APPLE AND CMAKE_C_COMPILER_ID STREQUAL "AppleClang" AND CMAKE_OSX_ARCHITECTURES) # to support multi-arch binaries (#999)
if(APPLE AND CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang" AND CMAKE_OSX_ARCHITECTURES) # to support multi-arch binaries (#999)
if("arm64" IN_LIST CMAKE_OSX_ARCHITECTURES)
list(APPEND MI_OPT_ARCH_FLAGS "-Xarch_arm64;-march=armv8.1-a")
endif()
@ -532,7 +532,9 @@ if(MI_TRACK_ASAN)
endif()
string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC)
list(APPEND mi_defines "MI_CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_LC}") #todo: multi-config project needs $<CONFIG> ?
if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$"))
if(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")
list(APPEND mi_defines MI_BUILD_RELEASE)
else()
set(mi_libname "${mi_libname}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version
endif()
@ -582,7 +584,7 @@ if(MI_BUILD_SHARED)
install(TARGETS mimalloc EXPORT mimalloc ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir})
if(WIN32)
if(WIN32 AND NOT MINGW)
# On windows, the import library name for the dll would clash with the static mimalloc.lib library
# so we postfix the dll import library with `.dll.lib` (and also the .pdb debug file)
set_property(TARGET mimalloc PROPERTY ARCHIVE_OUTPUT_NAME "${mi_libname}.dll" )
@ -592,6 +594,9 @@ if(MI_BUILD_SHARED)
# install(FILES "$<TARGET_FILE_DIR:mimalloc>/${mi_libname}.dll.pdb" DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
if(WIN32 AND MI_WIN_REDIRECT)
if(MINGW)
set_property(TARGET mimalloc PROPERTY PREFIX "")
endif()
# On windows, link and copy the mimalloc redirection dll too.
if(CMAKE_GENERATOR_PLATFORM STREQUAL "arm64ec")
set(MIMALLOC_REDIRECT_SUFFIX "-arm64ec")

View file

@ -7,9 +7,9 @@ trigger:
branches:
include:
- master
- dev
- dev2
- dev3
- dev2
- dev
tags:
include:
- v*
@ -34,6 +34,14 @@ jobs:
BuildType: secure
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
MSBuildConfiguration: Release
Debug x86:
BuildType: debug
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -A Win32
MSBuildConfiguration: Debug
Release x86:
BuildType: release
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -A Win32
MSBuildConfiguration: Release
steps:
- task: CMake@1
inputs:
@ -161,6 +169,7 @@ jobs:
- script: ctest --verbose --timeout 240
workingDirectory: $(BuildType)
displayName: CTest
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-macos-$(BuildType)

BIN
bin/mimalloc-redirect-arm64.dll Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect-arm64.lib Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect-arm64ec.dll Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect-arm64ec.lib Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect.dll Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect.lib Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect32.dll Normal file → Executable file

Binary file not shown.

BIN
bin/mimalloc-redirect32.lib Normal file → Executable file

Binary file not shown.

View file

@ -1,6 +1,6 @@
set(mi_version_major 1)
set(mi_version_minor 9)
set(mi_version_patch 2)
set(mi_version_major 2)
set(mi_version_minor 2)
set(mi_version_patch 3)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})

View file

@ -5,11 +5,11 @@ vcpkg_from_github(
# The "REF" can be a commit hash, branch name (dev2), or a version (v2.2.1).
# REF "v${VERSION}"
REF 866ce5b89db1dbc3e66bbf89041291fd16329518
REF e2db21e9ba9fb9172b7b0aa0fe9b8742525e8774
# The sha512 is the hash of the tar.gz bundle.
# (To get the sha512, run `vcpkg install mimalloc[override] --overlay-ports=<dir of this file>` and copy the sha from the error message.)
SHA512 0b0e5ff823c49b9534b8c32800679806c5d7c29020af058da043c3e6e36ae3c32a1cdd5a21ece97dd60bc7dd4703967f683beac435dbb8514638a6cc55e5dea8
SHA512 8cbb601fdf8b46dd6a9c0d314d6da9d4960699853829e96d2470753867f90689fb4caeaf30d628943fd388670dc11902dbecc9cc7c329b99a510524a09bdb612
)
vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS

View file

@ -1,7 +1,7 @@
{
"name": "mimalloc",
"version": "1.9.2",
"port-version": 2,
"version": "2.2.2",
"port-version": 1,
"description": "Compact general purpose allocator with excellent performance",
"homepage": "https://github.com/microsoft/mimalloc",
"license": "MIT",

View file

@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 192 // major + 2 digits minor
#define MI_MALLOC_VERSION 223 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@ -97,6 +97,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stddef.h> // size_t
#include <stdbool.h> // bool
#include <stdint.h> // INTPTR_MAX
#ifdef __cplusplus
extern "C" {

View file

@ -266,6 +266,13 @@ static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int6
return current;
#endif
}
static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
const int64_t add = *padd;
if (add != 0) {
mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add);
}
}
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
int64_t current;
do {
@ -363,8 +370,9 @@ static inline void mi_atomic_yield(void) {
_mm_pause();
}
#elif (defined(__GNUC__) || defined(__clang__)) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
(defined(__x86_64__) || defined(__i386__) || \
defined(__aarch64__) || defined(__arm__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
#if defined(__x86_64__) || defined(__i386__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("pause" ::: "memory");
@ -373,10 +381,16 @@ static inline void mi_atomic_yield(void) {
static inline void mi_atomic_yield(void) {
__asm__ volatile("wfe");
}
#elif (defined(__arm__) && __ARM_ARCH__ >= 7)
#elif defined(__arm__)
#if __ARM_ARCH >= 7
static inline void mi_atomic_yield(void) {
__asm__ volatile("yield" ::: "memory");
}
#else
static inline void mi_atomic_yield(void) {
__asm__ volatile ("nop" ::: "memory");
}
#endif
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
#ifdef __APPLE__
static inline void mi_atomic_yield(void) {
@ -387,10 +401,6 @@ static inline void mi_atomic_yield(void) {
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
}
#endif
#elif defined(__armel__) || defined(__ARMEL__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("nop" ::: "memory");
}
#endif
#elif defined(__sun)
// Fallback for other archs

View file

@ -127,6 +127,7 @@ bool _mi_os_has_virtual_reserve(void);
bool _mi_os_reset(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, bool* is_zero);
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
@ -177,10 +178,11 @@ void _mi_segment_map_freed_at(const mi_segment_t* segment);
void _mi_segment_map_unsafe_destroy(void);
// "segment.c"
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
void _mi_segment_collect(mi_segment_t* segment, bool force);
#if MI_HUGE_PAGE_ABANDON
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
@ -188,10 +190,11 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, m
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif
void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@ -341,12 +344,28 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
}
}
// Align downwards
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
uintptr_t mask = alignment - 1;
if ((alignment & mask) == 0) { // power of two?
return (sz & ~mask);
}
else {
return ((sz / alignment) * alignment);
}
}
// Align a pointer upwards
static inline void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
// Align a pointer downwards
static inline void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
@ -370,6 +389,7 @@ static inline bool mi_mem_is_zero(const void* p, size_t size) {
return true;
}
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size(size_t size) {
@ -464,29 +484,44 @@ static inline mi_segment_t* _mi_ptr_segment(const void* p) {
#endif
}
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
return (mi_page_t*)(s);
}
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
return (mi_slice_t*)(p);
}
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_assert_internal(page!=NULL);
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || page == &segment->pages[page->segment_idx]);
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment;
}
// used internally
static inline size_t _mi_segment_page_idx_of(const mi_segment_t* segment, const void* p) {
// if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff >= 0 && (size_t)diff <= MI_SEGMENT_SIZE /* for huge alignment it can be equal */);
size_t idx = (size_t)diff >> segment->page_shift;
mi_assert_internal(idx < segment->capacity);
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || idx == 0);
return idx;
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
mi_assert_internal(start->slice_offset == 0);
mi_assert_internal(start + start->slice_count > slice);
return start;
}
// Get the page containing the pointer
// Get the page containing the pointer (performance critical as it is called in mi_free)
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
size_t idx = _mi_segment_page_idx_of(segment, p);
return &((mi_segment_t*)segment)->pages[idx];
mi_assert_internal(p > (void*)segment);
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
mi_assert_internal(idx <= segment->slice_entries);
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
mi_assert_internal(slice->slice_offset == 0);
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
return mi_slice_to_page(slice);
}
// Quick page start for initialized pages
@ -509,8 +544,8 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
}
static inline bool mi_page_is_huge(const mi_page_t* page) {
mi_assert_internal((page->is_huge && _mi_page_segment(page)->page_kind == MI_PAGE_HUGE) ||
(!page->is_huge && _mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) ||
(!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE));
return page->is_huge;
}
@ -522,7 +557,11 @@ static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
// size of a segment
static inline size_t mi_segment_size(mi_segment_t* segment) {
return segment->segment_size;
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
}
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
return (uint8_t*)segment + mi_segment_size(segment);
}
// Thread free access
@ -677,12 +716,13 @@ static inline bool mi_is_in_same_segment(const void* p, const void* q) {
}
static inline bool mi_is_in_same_page(const void* p, const void* q) {
mi_segment_t* segmentp = _mi_ptr_segment(p);
mi_segment_t* segmentq = _mi_ptr_segment(q);
if (segmentp != segmentq) return false;
size_t idxp = _mi_segment_page_idx_of(segmentp, p);
size_t idxq = _mi_segment_page_idx_of(segmentq, q);
return (idxp == idxq);
mi_segment_t* segment = _mi_ptr_segment(p);
if (_mi_ptr_segment(q) != segment) return false;
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
mi_page_t* page = _mi_segment_page_of(segment, p);
size_t psize;
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
}
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
@ -764,6 +804,50 @@ static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, c
}
// -------------------------------------------------------------------
// commit mask
// -------------------------------------------------------------------
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
cm->mask[i] = 0;
}
}
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
cm->mask[i] = ~((size_t)0);
}
}
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
if (cm->mask[i] != 0) return false;
}
return true;
}
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
if (cm->mask[i] != ~((size_t)0)) return false;
}
return true;
}
// defined in `segment.c`:
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
#define mi_commit_mask_foreach(cm,idx,count) \
idx = 0; \
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
#define mi_commit_mask_foreach_end() \
idx += count; \
}
/* -----------------------------------------------------------
memory id's
----------------------------------------------------------- */
@ -947,6 +1031,21 @@ static inline size_t mi_bsr(size_t x) {
return (x==0 ? MI_SIZE_BITS : MI_SIZE_BITS - 1 - mi_clz(x));
}
size_t _mi_popcount_generic(size_t x);
static inline size_t mi_popcount(size_t x) {
if (x<=1) return x;
if (x==SIZE_MAX) return MI_SIZE_BITS;
#if defined(__GNUC__)
#if (SIZE_MAX == ULONG_MAX)
return __builtin_popcountl(x);
#else
return __builtin_popcountll(x);
#endif
#else
return _mi_popcount_generic(x);
#endif
}
// ---------------------------------------------------------------------------------
// Provide our own `_mi_memcpy` for potential performance optimizations.

View file

@ -13,8 +13,9 @@ terms of the MIT license. A copy of the license can be found in the file
// mi_heap_t : all data for a thread-local heap, contains
// lists of all managed heap pages.
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
// are allocated.
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
// are allocated. A segment is divided in slices (64KiB) from
// which pages are allocated.
// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from
// where objects are allocated.
// Note: we write "OS page" for OS memory pages while
// using plain "page" for mimalloc pages (`mi_page_t`).
@ -66,10 +67,10 @@ terms of the MIT license. A copy of the license can be found in the file
// #define MI_DEBUG 2 // + internal assertion checks
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
#if !defined(MI_DEBUG)
#if !defined(NDEBUG) || defined(_DEBUG)
#define MI_DEBUG 2
#else
#if defined(MI_BUILD_RELEASE) || defined(NDEBUG)
#define MI_DEBUG 0
#else
#define MI_DEBUG 2
#endif
#endif
@ -167,38 +168,40 @@ typedef int32_t mi_ssize_t;
// ------------------------------------------------------
// Main tuning parameters for segment and page sizes
// Sizes for 64-bit, divide by two for 32-bit
// Sizes for 64-bit (usually divide by two for 32-bit)
#ifndef MI_SEGMENT_SLICE_SHIFT
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
#endif
#ifndef MI_SEGMENT_SHIFT
#if MI_INTPTR_SIZE > 4
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
#else
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
#endif
#endif
#ifndef MI_SMALL_PAGE_SHIFT
#define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
#endif
#ifndef MI_MEDIUM_PAGE_SHIFT
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
#endif
#ifndef MI_LARGE_PAGE_SHIFT
#define MI_LARGE_PAGE_SHIFT ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4MiB
#endif
#ifndef MI_SEGMENT_SHIFT
#define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4MiB -- must be equal to `MI_LARGE_PAGE_SHIFT`
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
#endif
// Derived constants
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
#define MI_SEGMENT_ALIGN (MI_SEGMENT_SIZE)
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
#define MI_LARGE_PAGE_SIZE (MI_ZU(1)<<MI_LARGE_PAGE_SHIFT)
#define MI_SMALL_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_SMALL_PAGE_SIZE)
#define MI_MEDIUM_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
#define MI_LARGE_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
// (Except for large pages since huge objects are allocated in 4MiB chunks)
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/8) // 8 KiB
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/8) // 64 KiB
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/4) // 1 MiB
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/8) // 8 KiB on 64-bit
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/8) // 64 KiB on 64-bit
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 16 MiB on 64-bit
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
@ -206,18 +209,27 @@ typedef int32_t mi_ssize_t;
#error "mimalloc internal: expecting 73 bins"
#endif
#if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
#error "mimalloc internal: define more bins"
#endif
// Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`)
#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
// Maximum slice count (255) for which we can find the page for interior pointers
#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// we never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877)
#if (PTRDIFF_MAX > INT32_MAX) && (PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX))
#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1))
#else
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
#endif
// ------------------------------------------------------
// Mimalloc pages contain allocated blocks
@ -296,8 +308,8 @@ typedef uintptr_t mi_thread_free_t;
// Notes:
// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
// - Using `uint16_t` does not seem to slow things down
// - The size is 10 words on 64-bit which helps the page index calculations
// (and 12 words on 32-bit, and encoded free lists add 2 words)
// - The size is 12 words on 64-bit which helps the page index calculations
// (and 14 words on 32-bit, and encoded free lists add 2 words)
// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
// concurrent frees where only the first concurrent free adds to the owning
// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
@ -307,12 +319,12 @@ typedef uintptr_t mi_thread_free_t;
// will be freed correctly even if only other threads free blocks.
typedef struct mi_page_s {
// "owned" by the segment
uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
uint8_t segment_in_use:1; // `true` if the segment allocated this page
uint32_t slice_count; // slices in this page (0 if not a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed:1; // `true` if the page virtual memory is committed
uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
uint8_t is_huge:1; // `true` if the page is in a huge segment
uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`)
// padding
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
@ -336,12 +348,11 @@ typedef struct mi_page_s {
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap;
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
#if MI_INTPTR_SIZE==4 // pad to 12 words on 32-bit
// 64-bit 11 words, 32-bit 13 words, (+2 for secure)
void* padding[1];
#endif
} mi_page_t;
@ -354,10 +365,44 @@ typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
MI_PAGE_HUGE // a huge page is a single page in a segment of variable size (but still 2MiB aligned)
// used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an alignment `> MI_BLOCK_ALIGNMENT_MAX`.
MI_PAGE_HUGE // a huge page is a single page in a segment of variable size
// used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
} mi_page_kind_t;
typedef enum mi_segment_kind_e {
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
MI_SEGMENT_HUGE, // segment with just one huge page inside.
} mi_segment_kind_t;
// ------------------------------------------------------
// A segment holds a commit mask where a bit is set if
// the corresponding MI_COMMIT_SIZE area is committed.
// The MI_COMMIT_SIZE must be a multiple of the slice
// size. If it is equal we have the most fine grained
// decommit (but setting it higher can be more efficient).
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
// be committed in one go which can be set higher than
// MI_COMMIT_SIZE for efficiency (while the decommit mask
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
#endif
typedef struct mi_commit_mask_s {
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
} mi_commit_mask_t;
typedef mi_page_t mi_slice_t;
typedef int64_t mi_msecs_t;
// ---------------------------------------------------------------
// a memory id tracks the provenance of arena/OS allocated memory
@ -401,43 +446,57 @@ typedef struct mi_memid_s {
} mi_memid_t;
// ---------------------------------------------------------------
// Segments contain mimalloc pages
// ---------------------------------------------------------------
// -----------------------------------------------------------------------------------------
// Segments are large allocated memory blocks (32mb on 64 bit) from arenas or the OS.
//
// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks.
// The start of a segment is this structure with a fixed number of slice entries (`slices`)
// usually followed by a guard OS page and the actual allocation area with pages.
// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`).
// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent
// slices part of the area, the `slice_offset` is the byte offset back to the first slice
// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`).
// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`).
// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while
// large and huge pages span a variable amount of slices.
typedef struct mi_subproc_s mi_subproc_t;
// Segments are large allocated memory blocks (2MiB on 64 bit) from the OS.
// Inside segments we allocated fixed size _pages_ that contain blocks.
typedef struct mi_segment_s {
// constant fields
mi_memid_t memid; // memory id to track provenance
bool allow_decommit;
bool allow_purge;
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
mi_subproc_t* subproc; // segment belongs to sub process
mi_memid_t memid; // memory id for arena/OS allocation
bool allow_decommit; // can we decommmit the memory
bool allow_purge; // can we purge the memory (reset or decommit)
size_t segment_size;
mi_subproc_t* subproc; // segment belongs to sub process
// segment fields
struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init`
struct mi_segment_s* prev;
bool was_reclaimed; // true if it was reclaimed (used to limit reclaim-on-free reclamation)
bool dont_free; // can be temporarily true to ensure the segment is not freed
mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
mi_commit_mask_t purge_mask; // slices that can be purged
mi_commit_mask_t commit_mask; // slices that are currently committed
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
// from here is zero initialized
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
bool dont_free; // can be temporarily true to ensure the segment is not freed
size_t used; // count of pages in use (`used <= capacity`)
size_t capacity; // count of available pages (`#free + used`)
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
struct mi_segment_s* abandoned_os_prev;
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages.
// layout like this to optimize access in `mi_free`
mi_segment_kind_t kind;
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
mi_page_kind_t page_kind; // kind of pages: small, medium, large, or huge
mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment
} mi_segment_t;
@ -541,20 +600,19 @@ struct mi_subproc_s {
// Thread Local data
// ------------------------------------------------------
// Milliseconds as in `int64_t` to avoid overflows
typedef int64_t mi_msecs_t;
// A "span" is is an available range of slices. The span queues keep
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
typedef struct mi_span_queue_s {
mi_slice_t* first;
mi_slice_t* last;
size_t slice_count;
} mi_span_queue_t;
// Queue of segments
typedef struct mi_segment_queue_s {
mi_segment_t* first;
mi_segment_t* last;
} mi_segment_queue_t;
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
// Segments thread local data
typedef struct mi_segments_tld_s {
mi_segment_queue_t small_free; // queue of segments with free small pages
mi_segment_queue_t medium_free; // queue of segments with free medium pages
mi_page_queue_t pages_purge; // queue of freed pages that are delay purged
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments
@ -625,22 +683,25 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line
// add to stat keeping track of the peak
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
// counters can just be increased
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT)
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
#else
#define mi_stat_increase(stat,amount) ((void)0)
#define mi_stat_decrease(stat,amount) ((void)0)
#define mi_stat_adjust_decrease(stat,amount) ((void)0)
#define mi_stat_counter_increase(stat,amount) ((void)0)
#endif
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_stat_adjust_decrease( (heap)->tld->stats.stat, amount)
#endif

View file

@ -12,9 +12,9 @@ is a general purpose allocator with excellent [performance](#performance) charac
Initially developed by Daan Leijen for the runtime systems of the
[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
Latest release : `v3.0.2` (beta) (2025-03-06).
Latest v2 release: `v2.2.2` (2025-03-06).
Latest v1 release: `v1.9.2` (2024-03-06).
Latest release : `v3.0.3` (beta) (2025-03-28).
Latest v2 release: `v2.2.3` (2025-03-28).
Latest v1 release: `v1.9.3` (2024-03-28).
mimalloc is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
@ -73,7 +73,7 @@ Enjoy!
### Branches
* `master`: latest stable release (still based on `dev2`).
* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's.
* `dev`: development branch for mimalloc v1. **Use this branch for submitting PR's**.
* `dev2`: development branch for mimalloc v2. This branch is downstream of `dev`
(and is essentially equal to `dev` except for `src/segment.c`). Uses larger sliced segments to manage
mimalloc pages that can reduce fragmentation.
@ -84,6 +84,9 @@ Enjoy!
### Releases
* 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta): Various small bug and build fixes, including:
fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts,
fix execution on non BMI1 x64 systems.
* 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes.
Add `mi_options_print`, `mi_arenas_print`, and the experimental `mi_stat_get` and `mi_stat_get_json`.
Add `mi_thread_set_in_threadpool` and `mi_heap_set_numa_affinity` (v3 only). Add vcpkg portfile.

View file

@ -115,7 +115,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
// now zero the block if needed
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
// for the tracker, on huge aligned allocations only from the start of the large block is defined
// for the tracker, on huge aligned allocations only the memory from the start of the large block is defined
mi_track_mem_undefined(aligned_p, size);
if (zero) {
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
@ -191,9 +191,6 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
if mi_likely(is_aligned)
{
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc_requested, size);
#endif
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
@ -220,6 +217,11 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap,
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
}
// ensure a definition is emitted
#if defined(__cplusplus)
void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
#endif
// ------------------------------------------------------
// Aligned Allocation
// ------------------------------------------------------

View file

@ -30,6 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{
mi_assert_internal(size >= MI_PADDING_SIZE);
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
// check the free list
@ -82,12 +83,13 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
#if (MI_STAT>0)
const size_t bsize = mi_page_usable_block_size(page);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_increase(heap, malloc_normal, bsize);
mi_heap_stat_counter_increase(heap, malloc_normal_count, 1);
#if (MI_STAT>1)
const size_t bin = _mi_bin(bsize);
mi_heap_stat_increase(heap, malloc_bins[bin], 1);
mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE);
#endif
}
#endif
@ -146,12 +148,6 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
}
#endif
#if MI_DEBUG>3
if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size));
@ -188,12 +184,6 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
}
#endif
#if MI_DEBUG>3
if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size));
@ -640,7 +630,7 @@ static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
// give up to place it right in front of the guard page if the offset is too large for unalignment
offset = MI_BLOCK_ALIGNMENT_MAX;
}
void* p = (uint8_t*)block + offset;
void* p = (uint8_t*)block + offset;
mi_track_align(block, p, offset, obj_size);
mi_track_mem_defined(block, sizeof(mi_block_t));
return p;
@ -662,11 +652,12 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
void* const p = mi_block_ptr_set_guarded(block, obj_size);
// stats
mi_track_malloc(p, size, zero);
mi_track_malloc(p, size, zero);
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size);
mi_heap_stat_increase(heap, malloc_requested, size);
#endif
_mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1);
}
@ -694,7 +685,7 @@ void* _mi_externs[] = {
(void*)&mi_zalloc_small,
(void*)&mi_heap_malloc,
(void*)&mi_heap_zalloc,
(void*)&mi_heap_malloc_small
(void*)&mi_heap_malloc_small,
// (void*)&mi_heap_alloc_new,
// (void*)&mi_heap_alloc_new_n
};

View file

@ -99,6 +99,10 @@ bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_i
}
}
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
return (memid.memkind == MI_MEM_OS);
}
size_t mi_arena_get_count(void) {
return mi_atomic_load_relaxed(&mi_arena_count);
}
@ -255,7 +259,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// set the dirty bits (todo: no need for an atomic op here?)
if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL, NULL);
}
// set commit state
@ -267,10 +271,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
// commit requested, but the range may not be committed as a whole: ensure it is committed now
memid->initially_committed = true;
bool any_uncommitted;
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
size_t already_committed = 0;
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted, &already_committed);
if (any_uncommitted) {
mi_assert_internal(already_committed < needed_bcount);
const size_t commit_size = mi_arena_block_size(needed_bcount);
const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed);
bool commit_zero = false;
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) {
memid->initially_committed = false;
}
else {
@ -280,7 +288,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
}
else {
// no need to commit, but check if already fully committed
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
size_t already_committed = 0;
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &already_committed);
if (!memid->initially_committed && already_committed > 0) {
// partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted.
mi_assert_internal(already_committed < needed_bcount);
_mi_stat_decrease(&_mi_stats_main.committed, mi_arena_block_size(already_committed));
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
}
}
return p;
@ -464,17 +479,19 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
const size_t size = mi_arena_block_size(blocks);
void* const p = mi_arena_block_start(arena, bitmap_idx);
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
size_t already_committed = 0;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx, &already_committed)) {
// all blocks are committed, we can purge freely
mi_assert_internal(already_committed == blocks);
needs_recommit = _mi_os_purge(p, size);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
// and also undo the decommit stats (as it was already adjusted)
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
mi_assert_internal(already_committed < blocks);
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, 0);
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
}
// clear the purged blocks
@ -508,7 +525,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
else {
// already an expiration was set
}
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL, NULL);
}
}
@ -648,15 +665,16 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
if (p==NULL) return;
if (size==0) return;
const bool all_committed = (committed_size == size);
const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0);
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
mi_track_mem_undefined(p,size);
if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
if (!all_committed && decommitted_size > 0) {
// if partially committed, adjust the committed stats (as `_mi_os_free` will decrease commit by the full size)
_mi_stat_increase(&_mi_stats_main.committed, decommitted_size);
}
_mi_os_free(p, size, memid);
}
@ -690,14 +708,14 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) {
// mark the entire range as no longer committed (so we recommit the full range when re-using)
// mark the entire range as no longer committed (so we will recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
mi_track_mem_noaccess(p,size);
if (committed_size > 0) {
//if (committed_size > 0) {
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
// in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
// in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed.
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
}
//}
// note: if not all committed, it may be that the purge will reset/decommit the entire range
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
// works (as we should never reset decommitted parts).

View file

@ -34,17 +34,17 @@ static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
}
/* -----------------------------------------------------------
Claim a bit sequence atomically
----------------------------------------------------------- */
// Try to atomically claim a sequence of `count` bits in a single
// field at `idx` in `bitmap`. Returns `true` on success.
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
mi_assert_internal(count > 0);
mi_bitmap_field_t* field = &bitmap[idx];
size_t map = mi_atomic_load_relaxed(field);
if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
@ -94,9 +94,9 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
return false;
}
// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
size_t idx = start_field_idx;
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
@ -108,6 +108,24 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
return false;
}
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
const size_t start_field_idx, const size_t count,
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
mi_bitmap_index_t* bitmap_idx) {
size_t idx = start_field_idx;
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
if (idx >= bitmap_fields) idx = 0; // wrap
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
return true;
}
// predicate returned false, unclaim and look further
_mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
}
}
return false;
}
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
@ -228,7 +246,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
// intermediate fields
while (++field < final_field) {
newmap = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
newmap = MI_BITMAP_FIELD_FULL;
map = 0;
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
}
@ -250,7 +268,7 @@ rollback:
// (we just failed to claim `field` so decrement first)
while (--field > initial_field) {
newmap = 0;
map = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
map = MI_BITMAP_FIELD_FULL;
mi_assert_internal(mi_atomic_load_relaxed(field) == map);
mi_atomic_store_release(field, newmap);
}
@ -351,7 +369,7 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) {
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
size_t pre_mask;
size_t mid_mask;
@ -359,28 +377,31 @@ bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t co
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_zero = true;
bool any_zero = false;
size_t one_count = 0;
_Atomic(size_t)*field = &bitmap[idx];
size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
if ((prev & pre_mask) != 0) all_zero = false;
if ((prev & pre_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & pre_mask); }
if ((prev & pre_mask) != pre_mask) any_zero = true;
while (mid_count-- > 0) {
prev = mi_atomic_or_acq_rel(field++, mid_mask);
if ((prev & mid_mask) != 0) all_zero = false;
if ((prev & mid_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & mid_mask); }
if ((prev & mid_mask) != mid_mask) any_zero = true;
}
if (post_mask!=0) {
prev = mi_atomic_or_acq_rel(field, post_mask);
if ((prev & post_mask) != 0) all_zero = false;
if ((prev & post_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & post_mask); }
if ((prev & post_mask) != post_mask) any_zero = true;
}
if (pany_zero != NULL) { *pany_zero = any_zero; }
if (already_set != NULL) { *already_set = one_count; };
mi_assert_internal(all_zero ? one_count == 0 : one_count <= count);
return all_zero;
}
// Returns `true` if all `count` bits were 1.
// `any_ones` is `true` if there was at least one bit set to one.
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones, size_t* already_set) {
size_t idx = mi_bitmap_index_field(bitmap_idx);
size_t pre_mask;
size_t mid_mask;
@ -388,30 +409,33 @@ static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_field
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
bool all_ones = true;
bool any_ones = false;
size_t one_count = 0;
mi_bitmap_field_t* field = &bitmap[idx];
size_t prev = mi_atomic_load_relaxed(field++);
if ((prev & pre_mask) != pre_mask) all_ones = false;
if ((prev & pre_mask) != 0) any_ones = true;
if ((prev & pre_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & pre_mask); }
while (mid_count-- > 0) {
prev = mi_atomic_load_relaxed(field++);
if ((prev & mid_mask) != mid_mask) all_ones = false;
if ((prev & mid_mask) != 0) any_ones = true;
if ((prev & mid_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & mid_mask); }
}
if (post_mask!=0) {
prev = mi_atomic_load_relaxed(field);
if ((prev & post_mask) != post_mask) all_ones = false;
if ((prev & post_mask) != 0) any_ones = true;
if ((prev & post_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & post_mask); }
}
if (pany_ones != NULL) { *pany_ones = any_ones; }
if (already_set != NULL) { *already_set = one_count; }
mi_assert_internal(all_ones ? one_count == count : one_count < count);
return all_ones;
}
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set) {
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL, already_set);
}
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
bool any_ones;
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones, NULL);
return any_ones;
}

View file

@ -44,6 +44,11 @@ static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx
return mi_bitmap_index_create_ex(idx,bitidx);
}
// Create a bit index.
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
}
// Get the field index from a bit index.
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
return (bitmap_idx / MI_BITMAP_FIELD_BITS);
@ -71,6 +76,10 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
@ -102,9 +111,9 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set);
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set);
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
#endif

View file

@ -35,7 +35,9 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
mi_check_padding(page, block);
if (track_stats) { mi_stat_free(page, block); }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
#endif
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
@ -121,10 +123,16 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
#if (MI_DEBUG>0)
if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
#if (MI_INTPTR_SIZE == 8 && defined(__linux__))
if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
#else
{
#endif
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
}
#endif
@ -272,7 +280,7 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* seg
// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
_mi_padding_shrink(page, block, sizeof(mi_block_t));
if (segment->page_kind == MI_PAGE_HUGE) {
if (segment->kind == MI_SEGMENT_HUGE) {
#if MI_HUGE_PAGE_ABANDON
// huge page segments are always abandoned and can be freed immediately
_mi_segment_huge_page_free(segment, page, block);
@ -514,24 +522,24 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
#if (MI_STAT < 2)
MI_UNUSED(block);
#endif
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
#if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc_requested, usize);
#endif
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
// #if (MI_STAT>1)
// const size_t usize = mi_page_usable_size_of(page, block);
// mi_heap_stat_decrease(heap, malloc_requested, usize);
// #endif
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, malloc_normal, bsize);
#if (MI_STAT > 1)
#if (MI_STAT > 1)
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1);
#endif
#endif
}
//else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
// mi_heap_stat_decrease(heap, malloc_large, bsize);
//}
else {
const size_t bpsize = mi_page_block_size(page); // match stat in page.c:mi_huge_page_alloc
mi_heap_stat_decrease(heap, malloc_huge, bpsize);
mi_heap_stat_decrease(heap, malloc_huge, bsize);
}
}
#else

View file

@ -95,6 +95,11 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
mi_collect_t collect = *((mi_collect_t*)arg_collect);
_mi_page_free_collect(page, collect >= MI_FORCE);
if (collect == MI_FORCE) {
// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
mi_segment_t* segment = _mi_page_segment(page);
_mi_segment_collect(segment, true /* force? */);
}
if (mi_page_all_free(page)) {
// no more used blocks, free the page.
// note: this will free retired pages as well.
@ -127,14 +132,15 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
// note: never reclaim on collect but leave it to threads that need storage to reclaim
if (
#ifdef NDEBUG
const bool force_main =
#ifdef NDEBUG
collect == MI_FORCE
#else
#else
collect >= MI_FORCE
#endif
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim)
{
#endif
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
if (force_main) {
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
// if all memory is freed by now, all segments should be freed.
// note: this only collects in the current subprocess
@ -157,8 +163,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
// collect segments (purge pages, this can be expensive so don't force on abandonment)
_mi_segments_collect(collect == MI_FORCE, &heap->tld->segments);
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
// if forced, collect thread data cache on program-exit (or shared library unload)
if (force && is_main_thread && mi_heap_is_backing(heap)) {
@ -328,20 +335,26 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
// stats
const size_t bsize = mi_page_block_size(page);
if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, malloc_huge, bsize);
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
//if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
// mi_heap_stat_decrease(heap, malloc_large, bsize);
//}
//else
{
mi_heap_stat_decrease(heap, malloc_huge, bsize);
}
}
#if (MI_STAT)
#if (MI_STAT>0)
_mi_page_free_collect(page, false); // update used count
const size_t inuse = page->used;
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse);
#if (MI_STAT>1)
#if (MI_STAT>1)
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse);
#endif
#endif
}
mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
#endif
// mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
#endif
/// pretend it is all free now
mi_assert_internal(mi_page_thread_free(page) == NULL);

View file

@ -34,13 +34,12 @@ const mi_page_t _mi_page_empty = {
MI_ATOMIC_VAR_INIT(0), // xthread_free
MI_ATOMIC_VAR_INIT(0), // xheap
NULL, NULL
#if MI_INTPTR_SIZE==4
, { NULL }
#endif
, { 0 } // padding
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
#if (MI_SMALL_WSIZE_MAX==128)
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#elif (MI_PADDING>0)
@ -48,7 +47,9 @@ const mi_page_t _mi_page_empty = {
#else
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
#endif
#else
#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
#endif
// Empty page queues for every bin
#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
@ -63,8 +64,8 @@ const mi_page_t _mi_page_empty = {
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ }
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
#define MI_STAT_COUNT_NULL() {0,0,0}
@ -86,6 +87,18 @@ const mi_page_t _mi_page_empty = {
{ MI_INIT74(MI_STAT_COUNT_NULL) }, \
{ MI_INIT74(MI_STAT_COUNT_NULL) }
// Empty slice span queues for every bin
#define SQNULL(sz) { NULL, NULL, sz }
#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
{ SQNULL(1), \
SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
// --------------------------------------------------------
// Statically allocate an empty heap as the initial
// thread local value for the default heap,
@ -95,7 +108,7 @@ const mi_page_t _mi_page_empty = {
// may lead to allocation itself on some platforms)
// --------------------------------------------------------
mi_decl_hidden mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
NULL,
MI_ATOMIC_VAR_INIT(NULL),
0, // tid
@ -116,6 +129,17 @@ mi_decl_hidden mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
MI_PAGE_QUEUES_EMPTY
};
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
mi_decl_cache_align static const mi_tld_t tld_empty = {
0,
false,
NULL, NULL,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats }, // segments
{ MI_STAT_VERSION, MI_STATS_NULL } // stats
};
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
return _mi_prim_thread_id();
@ -126,15 +150,10 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
extern mi_decl_hidden mi_heap_t _mi_heap_main;
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
static mi_decl_cache_align mi_tld_t tld_main = {
0, false,
&_mi_heap_main, &_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
0, 0, 0, 0, 0, &mi_subproc_default,
&tld_main.stats
}, // segments
&_mi_heap_main, & _mi_heap_main,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats }, // segments
{ MI_STAT_VERSION, MI_STATS_NULL } // stats
};
@ -391,7 +410,7 @@ static bool _mi_thread_heap_init(void) {
// initialize thread local data
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
_mi_memzero_aligned(tld,sizeof(mi_tld_t));
_mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t));
tld->heap_backing = bheap;
tld->heaps = NULL;
tld->segments.subproc = &mi_subproc_default;
@ -432,7 +451,10 @@ static bool _mi_thread_heap_done(mi_heap_t* heap) {
// free if not the main thread
if (heap != &_mi_heap_main) {
mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
// the following assertion does not always hold for huge segments as those are always treated
// as abondened: one may allocate it in one thread, but deallocate in another in which case
// the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
// mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
mi_thread_data_free((mi_thread_data_t*)heap);
}
else {
@ -647,7 +669,7 @@ void mi_process_init(void) mi_attr_noexcept {
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
long ksize = mi_option_get(mi_option_reserve_os_memory);
if (ksize > 0) {
mi_reserve_os_memory((size_t)ksize*MI_KiB, true, true);
mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
}
}
}

View file

@ -275,3 +275,60 @@ int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) {
va_end(args);
return written;
}
#if MI_SIZE_SIZE == 4
#define mi_mask_even_bits32 (0x55555555)
#define mi_mask_even_pairs32 (0x33333333)
#define mi_mask_even_nibbles32 (0x0F0F0F0F)
// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
static size_t mi_byte_sum32(uint32_t x) {
// perform `x * 0x01010101`: the highest byte contains the sum of all bytes.
x += (x << 8);
x += (x << 16);
return (size_t)(x >> 24);
}
static size_t mi_popcount_generic32(uint32_t x) {
// first count each 2-bit group `a`, where: a==0b00 -> 00, a==0b01 -> 01, a==0b10 -> 01, a==0b11 -> 10
// in other words, `a - (a>>1)`; to do this in parallel, we need to mask to prevent spilling a bit pair
// into the lower bit-pair:
x = x - ((x >> 1) & mi_mask_even_bits32);
// add the 2-bit pair results
x = (x & mi_mask_even_pairs32) + ((x >> 2) & mi_mask_even_pairs32);
// add the 4-bit nibble results
x = (x + (x >> 4)) & mi_mask_even_nibbles32;
// each byte now has a count of its bits, we can sum them now:
return mi_byte_sum32(x);
}
mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
return mi_popcount_generic32(x);
}
#else
#define mi_mask_even_bits64 (0x5555555555555555)
#define mi_mask_even_pairs64 (0x3333333333333333)
#define mi_mask_even_nibbles64 (0x0F0F0F0F0F0F0F0F)
// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
static size_t mi_byte_sum64(uint64_t x) {
x += (x << 8);
x += (x << 16);
x += (x << 32);
return (size_t)(x >> 56);
}
static size_t mi_popcount_generic64(uint64_t x) {
x = x - ((x >> 1) & mi_mask_even_bits64);
x = (x & mi_mask_even_pairs64) + ((x >> 2) & mi_mask_even_pairs64);
x = (x + (x >> 4)) & mi_mask_even_nibbles64;
return mi_byte_sum64(x);
}
mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
return mi_popcount_generic64(x);
}
#endif

View file

@ -106,11 +106,11 @@ typedef struct mi_option_desc_s {
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
{ 1, UNINIT, MI_OPTION(show_errors) },
#else
#else
{ 0, UNINIT, MI_OPTION(show_errors) },
#endif
#endif
{ 0, UNINIT, MI_OPTION(show_stats) },
{ MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) },
@ -129,7 +129,7 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_purge) }, // purge free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
#if defined(__NetBSD__)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed

View file

@ -91,21 +91,6 @@ void _mi_os_init(void) {
bool _mi_os_decommit(void* addr, size_t size);
bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
uintptr_t mask = alignment - 1;
if ((alignment & mask) == 0) { // power of two?
return (sz & ~mask);
}
else {
return ((sz / alignment) * alignment);
}
}
static void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
/* -----------------------------------------------------------
aligned hinting
@ -519,7 +504,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size)
mi_os_stat_increase(purged, size);
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
!_mi_preloading()) // don't decommit during preloading (unsafe)
!_mi_preloading()) // don't decommit during preloading (unsafe)
{
bool needs_recommit = true;
mi_os_decommit_ex(p, size, &needs_recommit, stat_size);
@ -539,7 +524,6 @@ bool _mi_os_purge(void* p, size_t size) {
return _mi_os_purge_ex(p, size, true, size);
}
// Protect a region in memory to be not accessible.
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
// page align conservatively within the range

View file

@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MI_IN_PAGE_C
#error "this file should be included from 'page.c'"
// include to help an IDE
#include "mimalloc.h"
#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/atomic.h"
#endif
@ -38,15 +38,15 @@ terms of the MIT license. A copy of the license can be found in the file
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
}
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
}
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
}
/* -----------------------------------------------------------
@ -58,7 +58,7 @@ static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
// We use `wsize` for the size in "machine word sizes",
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t mi_bin(size_t size) {
size_t wsize = _mi_wsize_from_size(size);
size_t wsize = _mi_wsize_from_size(size);
#if defined(MI_ALIGN4W)
if mi_likely(wsize <= 4) {
return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes
@ -72,7 +72,7 @@ static inline size_t mi_bin(size_t size) {
return (wsize == 0 ? 1 : wsize);
}
#endif
else if mi_unlikely(wsize > MI_LARGE_OBJ_WSIZE_MAX) {
else if mi_unlikely(wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
return MI_BIN_HUGE;
}
else {
@ -107,7 +107,7 @@ size_t _mi_bin_size(size_t bin) {
// Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept {
if (size <= MI_LARGE_OBJ_SIZE_MAX) {
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
}
else {
@ -136,6 +136,10 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
}
#endif
static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {
return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
}
static size_t mi_page_bin(const mi_page_t* page) {
const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
mi_assert_internal(bin <= MI_BIN_FULL);
@ -147,7 +151,7 @@ static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t*
const size_t bin = mi_page_bin(page);
mi_page_queue_t* pq = &heap->pages[bin];
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
(mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) ||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
return pq;
}
@ -210,10 +214,11 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page));
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_heap_t* heap = mi_page_heap(page);
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == queue->last) queue->last = page->prev;
@ -235,10 +240,10 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(!mi_page_queue_contains(queue, page));
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
#endif
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@ -277,8 +282,8 @@ static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t*
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
(mi_page_is_huge(page) && mi_page_queue_is_huge(to)) ||
(mi_page_is_huge(page) && mi_page_queue_is_full(to)));
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page);
@ -317,8 +322,8 @@ static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t*
page->prev = to->first;
page->next = next;
to->first->next = page;
if (next != NULL) {
next->prev = page;
if (next != NULL) {
next->prev = page;
}
else {
to->last = page;

View file

@ -82,11 +82,9 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved);
// const size_t bsize = mi_page_block_size(page);
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = mi_page_start(page);
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
mi_assert_internal(page->is_huge == (segment->page_kind == MI_PAGE_HUGE));
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
mi_assert_internal(mi_page_list_is_valid(page,page->free));
@ -123,14 +121,15 @@ bool _mi_page_is_valid(mi_page_t* page) {
#endif
if (mi_page_heap(page)!=NULL) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
#if MI_HUGE_PAGE_ABANDON
if (segment->page_kind != MI_PAGE_HUGE)
if (segment->kind != MI_SEGMENT_HUGE)
#endif
{
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
}
}
@ -257,10 +256,11 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// called from segments when reclaiming abandoned pages
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
#endif
// TODO: push on full queue immediately if it is full?
@ -274,7 +274,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
#if !MI_HUGE_PAGE_ABANDON
mi_assert_internal(pq != NULL);
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
if (page == NULL) {
@ -284,6 +284,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
// a fresh page was found, initialize it
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
@ -426,6 +427,7 @@ void _mi_page_force_abandon(mi_page_t* page) {
}
}
// Free a page with no more free blocks
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_assert_internal(page != NULL);
@ -449,7 +451,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
#define MI_RETIRE_CYCLES (16)
// Retire a page with no more used blocks
@ -623,7 +625,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
#if (MI_SECURE>0)
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
#else
#define MI_MIN_EXTEND (1)
#define MI_MIN_EXTEND (4)
#endif
// Extend the capacity (up to reserved) by initializing a free list
@ -632,6 +634,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
// allocations but this did not speed up any benchmark (due to an
// extra test in malloc? or cache effects?)
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
MI_UNUSED(tld);
mi_assert_expensive(mi_page_is_valid_init(page));
#if (MI_SECURE<=2)
mi_assert(page->free == NULL);
@ -640,9 +643,6 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
#endif
if (page->capacity >= page->reserved) return;
size_t page_size;
//uint8_t* page_start =
_mi_segment_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_counter_increase(tld->stats.pages_extended, 1);
// calculate the extend count
@ -688,6 +688,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
size_t page_size;
page->page_start = _mi_segment_page_start(segment, page, &page_size);
mi_track_mem_noaccess(page->page_start,page_size);
mi_assert_internal(mi_page_block_size(page) <= page_size);
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
mi_assert_internal(page->reserved > 0);
@ -702,6 +704,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
}
#endif
mi_assert_internal(page->is_committed);
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
}
@ -824,7 +827,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
}
if (page == NULL) {
_mi_heap_collect_retired(heap, false); // perhaps make a page available
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
@ -902,31 +905,47 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
General allocation
----------------------------------------------------------- */
// Huge pages contain just one block, and the segment contains just that page.
// Large and huge page allocation.
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
#if MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = NULL;
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
#else
mi_page_queue_t* pq = mi_page_queue(heap, MI_LARGE_OBJ_SIZE_MAX+1); // always in the huge queue regardless of the block size
mi_assert_internal(mi_page_queue_is_huge(pq));
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
#endif
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) {
mi_assert_internal(mi_page_block_size(page) >= size);
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_is_huge(page));
mi_assert_internal(_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1);
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
mi_page_set_heap(page, NULL);
#endif
mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page));
mi_heap_stat_counter_increase(heap, malloc_huge_count, 1);
if (is_huge) {
mi_assert_internal(mi_page_is_huge(page));
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1);
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
mi_page_set_heap(page, NULL);
#endif
}
else {
mi_assert_internal(!mi_page_is_huge(page));
}
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
/*if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_increase(heap, malloc_large, bsize);
mi_heap_stat_counter_increase(heap, malloc_large_count, 1);
}
else */
{
_mi_stat_increase(&heap->tld->stats.malloc_huge, bsize);
_mi_stat_counter_increase(&heap->tld->stats.malloc_huge_count, 1);
}
}
return page;
}
@ -937,13 +956,13 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
else {
return mi_huge_page_alloc(heap,size,huge_alignment);
return mi_large_huge_page_alloc(heap,size,huge_alignment);
}
}
else {

View file

@ -31,11 +31,12 @@ terms of the MIT license. A copy of the license can be found in the file
#if defined(__linux__)
#include <features.h>
#include <linux/prctl.h> // PR_SET_VMA
//#if defined(MI_NO_THP)
#include <sys/prctl.h> // THP disable
#include <sys/prctl.h> // THP disable
//#endif
#if defined(__GLIBC__)
#include <linux/mman.h> // linux mmap flags
#include <linux/mman.h> // linux mmap flags
#else
#include <sys/mman.h>
#endif
@ -205,14 +206,24 @@ static int unix_madvise(void* addr, size_t size, int advice) {
return (res==0 ? 0 : errno);
}
static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) {
void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */);
#if (defined(__linux__) && defined(PR_SET_VMA))
if (p!=MAP_FAILED && p!=NULL) {
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc");
}
#endif
return p;
}
static void* unix_mmap_prim_aligned(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
MI_UNUSED(try_alignment);
void* p = NULL;
#if defined(MAP_ALIGNED) // BSD
if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
size_t n = mi_bsr(try_alignment);
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
p = unix_mmap_prim(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
int err = errno;
_mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
@ -223,7 +234,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
}
#elif defined(MAP_ALIGN) // Solaris
if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment
p = unix_mmap_prim((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd); // addr parameter is the required alignment
if (p!=MAP_FAILED) return p;
// fall back to regular mmap
}
@ -233,7 +244,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
if (addr == NULL) {
void* hint = _mi_os_get_aligned_hint(try_alignment, size);
if (hint != NULL) {
p = mmap(hint, size, protect_flags, flags, fd, 0);
p = unix_mmap_prim(hint, size, protect_flags, flags, fd);
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
int err = 0;
@ -248,7 +259,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
}
#endif
// regular mmap
p = mmap(addr, size, protect_flags, flags, fd, 0);
p = unix_mmap_prim(addr, size, protect_flags, flags, fd);
if (p!=MAP_FAILED) return p;
// failed to allocate
return NULL;
@ -319,7 +330,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
if (large_only || lflags != flags) {
// try large OS page allocation
*is_large = true;
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
#ifdef MAP_HUGE_1GB
if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) {
mi_huge_pages_available = false; // don't try huge 1GiB pages again
@ -327,7 +338,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
_mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
}
lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
}
#endif
if (large_only) return p;
@ -340,7 +351,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
// regular allocation
if (p == NULL) {
*is_large = false;
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd);
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, flags, fd);
if (p != NULL) {
#if defined(MADV_HUGEPAGE)
// Many Linux systems don't allow MAP_HUGETLB but they support instead
@ -398,10 +409,6 @@ static void unix_mprotect_hint(int err) {
#endif
}
int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
// commit: ensure we can access the area
// note: we may think that *is_zero can be true since the memory

View file

@ -143,13 +143,17 @@ void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) {
uintptr_t _mi_random_next(mi_random_ctx_t* ctx) {
mi_assert_internal(mi_random_is_initialized(ctx));
#if MI_INTPTR_SIZE <= 4
return chacha_next32(ctx);
#elif MI_INTPTR_SIZE == 8
return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx));
#else
# error "define mi_random_next for this platform"
#endif
uintptr_t r;
do {
#if MI_INTPTR_SIZE <= 4
r = chacha_next32(ctx);
#elif MI_INTPTR_SIZE == 8
r = (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx));
#else
# error "define mi_random_next for this platform"
#endif
} while (r==0);
return r;
}
@ -163,7 +167,7 @@ uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
x ^= _mi_prim_clock_now();
// and do a few randomization steps
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
for (uintptr_t i = 0; i < max; i++) {
for (uintptr_t i = 0; i < max || x==0; i++, x++) {
x = _mi_random_shuffle(x);
}
mi_assert_internal(x != 0);
@ -179,7 +183,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
#endif
uintptr_t x = _mi_os_random_weak(0);
for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words.
for (size_t i = 0; i < 8; i++, x++) { // key is eight 32-bit words.
x = _mi_random_shuffle(x);
((uint32_t*)key)[i] = (uint32_t)x;
}

File diff suppressed because it is too large Load diff

View file

@ -30,6 +30,7 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
{
// add atomically (for abandoned pages)
int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount);
// if (stat == &_mi_stats_main.committed) { mi_assert_internal(current + amount >= 0); };
mi_atomic_maxi64_relaxed(&stat->peak, current + amount);
if (amount > 0) {
mi_atomic_addi64_relaxed(&stat->total,amount);
@ -61,6 +62,25 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
}
static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
if (amount == 0) return;
if mi_unlikely(mi_is_in_main(stat))
{
// adjust atomically
mi_atomic_addi64_relaxed(&stat->current, amount);
mi_atomic_addi64_relaxed(&stat->total,amount);
}
else {
// adjust local
stat->current += amount;
stat->total += amount;
}
}
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
mi_stat_adjust(stat, -((int64_t)amount));
}
// must be thread safe as it is called from stats_merge
static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) {
@ -94,8 +114,8 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
}
#endif
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
}
mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
}
}
#undef MI_STAT_COUNT
@ -198,6 +218,13 @@ static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int
_mi_fprintf(out, arg, "\n");
}
static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
_mi_fprintf(out, arg, "%10s:", msg);
_mi_fprintf(out, arg, "%12s", " "); // no peak
mi_print_amount(stat->total, unit, out, arg);
_mi_fprintf(out, arg, "\n");
}
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s:", msg);
mi_print_amount(stat->total, -1, out, arg);
@ -214,7 +241,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char*
static void mi_print_header(mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "unit ", "total# ");
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# ");
}
#if MI_STAT>1
@ -283,18 +310,20 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
// and print using that
mi_print_header(out,arg);
#if MI_STAT>1
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "normal",out,arg);
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg);
#endif
#if MI_STAT
mi_stat_print(&stats->malloc_normal, "normal", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
// mi_stat_print(&stats->malloc_large, "large", (stats->malloc_large_count.total == 0 ? 1 : -1), out, arg);
mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg);
mi_stat_count_t total = { 0,0,0 };
mi_stat_count_add_mt(&total, &stats->malloc_normal);
// mi_stat_count_add(&total, &stats->malloc_large);
mi_stat_count_add_mt(&total, &stats->malloc_huge);
mi_stat_print_ex(&total, "total", 1, out, arg, "");
#endif
#if MI_STAT>1
mi_stat_print_ex(&stats->malloc_requested, "malloc req", 1, out, arg, "");
mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg);
_mi_fprintf(out, arg, "\n");
#endif
mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, "");
@ -496,7 +525,12 @@ static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix,
const size_t binsize = _mi_bin_size(bin);
const size_t pagesize = (binsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_SMALL_PAGE_SIZE :
(binsize <= MI_MEDIUM_OBJ_SIZE_MAX ? MI_MEDIUM_PAGE_SIZE :
(binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0)));
#if MI_LARGE_PAGE_SIZE
(binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0)
#else
0
#endif
));
char buf[128];
_mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : ""));
buf[127] = 0;
@ -589,7 +623,7 @@ char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept {
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE);
}
mi_heap_buf_print(&hbuf, " ]\n");
mi_heap_buf_print(&hbuf, " ]\n");
mi_heap_buf_print(&hbuf, "}\n");
return hbuf.buf;
}

View file

@ -16,7 +16,7 @@ if (NOT CMAKE_BUILD_TYPE)
endif()
# Import mimalloc (if installed)
find_package(mimalloc 1.9 CONFIG REQUIRED)
find_package(mimalloc 2.2 CONFIG REQUIRED)
message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})")

View file

@ -10,7 +10,6 @@
#include <mimalloc.h>
#include <mimalloc-override.h> // redefines malloc etc.
static void mi_bins(void);
static void double_free1();
static void double_free2();
@ -24,11 +23,12 @@ static void test_reserved(void);
static void negative_stat(void);
static void alloc_huge(void);
static void test_heap_walk(void);
static void test_heap_arena(void);
static void test_align(void);
static void test_canary_leak(void);
static void test_manage_os_memory(void);
// static void test_large_pages(void);
int main() {
mi_version();
mi_stats_reset();
@ -50,8 +50,10 @@ int main() {
// negative_stat();
// test_heap_walk();
// alloc_huge();
// test_heap_walk();
// test_heap_arena();
// test_align();
void* p1 = malloc(78);
void* p2 = malloc(24);
free(p1);
@ -67,7 +69,7 @@ int main() {
free(p1);
free(p2);
free(s);
/* now test if override worked by allocating/freeing across the api's*/
//p1 = mi_malloc(32);
//free(p1);
@ -82,6 +84,13 @@ int main() {
return 0;
}
static void test_align() {
void* p = mi_malloc_aligned(256, 256);
if (((uintptr_t)p % 256) != 0) {
fprintf(stderr, "%p is not 256 alignend!\n", p);
}
}
static void invalid_free() {
free((void*)0xBADBEEF);
realloc((void*)0xBADBEEF,10);
@ -239,6 +248,20 @@ static void test_heap_walk(void) {
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
}
static void test_heap_arena(void) {
mi_arena_id_t arena_id;
int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id);
if (err) abort();
mi_heap_t* heap = mi_heap_new_in_arena(arena_id);
for (int i = 0; i < 500000; i++) {
void* p = mi_heap_malloc(heap, 1024);
if (p == NULL) {
printf("out of memory after %d kb (expecting about 100_000kb)\n", i);
break;
}
}
}
static void test_canary_leak(void) {
char* p = mi_mallocn_tp(char,23);
for(int i = 0; i < 23; i++) {

View file

@ -27,9 +27,12 @@ static void heap_late_free(); // issue #204
static void padding_shrink(); // issue #209
static void various_tests();
static void test_mt_shutdown();
static void large_alloc(void); // issue #363
static void fail_aslr(); // issue #372
static void tsan_numa_test(); // issue #414
static void strdup_test(); // issue #445
static void bench_alloc_large(void); // issue #xxx
//static void test_large_migrate(void); // issue #691
static void heap_thread_free_huge();
static void test_std_string(); // issue #697
static void test_thread_local(); // issue #944
@ -55,18 +58,20 @@ int main() {
//test_thread_local();
// heap_thread_free_huge();
/*
heap_thread_free_large();
heap_no_delete();
heap_late_free();
padding_shrink();
heap_thread_free_huge();
heap_thread_free_large();
heap_no_delete();
heap_late_free();
padding_shrink();
various_tests();
large_alloc();
tsan_numa_test();
strdup_test();
*/
// test_stl_allocators();
// test_mt_shutdown();
// test_large_migrate();
tsan_numa_test();
*/
/*
strdup_test();
test_stl_allocators();
test_mt_shutdown();
*/
//fail_aslr();
mi_stats_print(NULL);
return 0;
@ -358,7 +363,7 @@ static void heap_thread_free_large_worker() {
static void heap_thread_free_large() {
for (int i = 0; i < 100; i++) {
shared_p = mi_malloc_aligned(2*1024*1024 + 1, 8);
shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8);
auto t1 = std::thread(heap_thread_free_large_worker);
t1.join();
}
@ -369,14 +374,13 @@ static void heap_thread_free_huge_worker() {
}
static void heap_thread_free_huge() {
for (int i = 0; i < 10; i++) {
for (int i = 0; i < 100; i++) {
shared_p = mi_malloc(1024 * 1024 * 1024);
auto t1 = std::thread(heap_thread_free_huge_worker);
t1.join();
}
}
static void test_mt_shutdown()
{
const int threads = 5;
@ -401,6 +405,18 @@ static void test_mt_shutdown()
std::cout << "done" << std::endl;
}
// issue #363
using namespace std;
void large_alloc(void)
{
char* a = new char[1ull << 25];
thread th([&] {
delete[] a;
});
th.join();
}
// issue #372
static void fail_aslr() {
size_t sz = (size_t)(4ULL << 40); // 4TiB
@ -421,6 +437,36 @@ static void tsan_numa_test() {
t1.join();
}
// issue #?
#include <chrono>
#include <random>
#include <iostream>
static void bench_alloc_large(void) {
static constexpr int kNumBuffers = 20;
static constexpr size_t kMinBufferSize = 5 * 1024 * 1024;
static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
std::unique_ptr<char[]> buffers[kNumBuffers];
std::random_device rd; (void)rd;
std::mt19937 gen(42); //rd());
std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
static constexpr int kNumIterations = 2000;
const auto start = std::chrono::steady_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
int buffer_idx = buf_number_distribution(gen);
size_t new_size = size_distribution(gen);
buffers[buffer_idx] = std::make_unique<char[]>(new_size);
}
const auto end = std::chrono::steady_clock::now();
const auto num_ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
const auto us_per_allocation = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / kNumIterations;
std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl;
std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl;
}
class MTest
{
@ -447,4 +493,4 @@ void test_thread_local()
mi_stats_print(NULL);
}
return;
}
}

View file

@ -203,7 +203,11 @@ int main(void) {
CHECK_BODY("malloc-aligned9") { // test large alignments
bool ok = true;
void* p[8];
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1,
#if SIZE_MAX > UINT32_MAX
2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX,
#endif
0 };
for (int i = 0; i < 28 && ok; i++) {
int align = (1 << i);
for (int j = 0; j < 8 && ok; j++) {

View file

@ -320,11 +320,17 @@ int main(int argc, char** argv) {
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
srand(0x7feb352d);
// mi_stats_reset();
//mi_reserve_os_memory(512ULL << 20, true, true);
#if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
mi_stats_reset();
#endif
#ifdef STRESS
test_stress();
test_stress();
#else
test_leak();
test_leak();
#endif
#ifndef USE_STD_MALLOC