mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-03 22:19:32 +03:00
Compare commits
880 commits
v3.0.2-bet
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
af21001f7a | ||
![]() |
9f5a2969b8 | ||
![]() |
13cd3f7f05 | ||
![]() |
94036de6fe | ||
![]() |
8a81fc73c8 | ||
![]() |
401a043849 | ||
![]() |
f46c1d2624 | ||
![]() |
b843fead22 | ||
![]() |
1052c30f03 | ||
![]() |
eb992b1d19 | ||
![]() |
23fbee7ec6 | ||
![]() |
e1da869d9b | ||
![]() |
632eab958b | ||
![]() |
881d5e3c06 | ||
![]() |
246b3bc120 | ||
![]() |
797ca19ba9 | ||
![]() |
34cc5c8fd9 | ||
![]() |
9155fe98f1 | ||
![]() |
26b792d93b | ||
![]() |
ea2c7c6e56 | ||
![]() |
a077311a5e | ||
![]() |
d48bafe2bb | ||
![]() |
02607f2b8d | ||
![]() |
1463ead070 | ||
![]() |
6ed451c555 | ||
![]() |
01ee3568c1 | ||
![]() |
fdde679de1 | ||
![]() |
cf08c27d2b | ||
![]() |
891f9f4cf6 | ||
![]() |
e46877a5e1 | ||
![]() |
660d749d77 | ||
![]() |
7eafaa9685 | ||
![]() |
b2dcab58f7 | ||
![]() |
acba60a248 | ||
![]() |
afbc581f8d | ||
![]() |
1aa88e0d9a | ||
![]() |
c49c8acfda | ||
![]() |
47bf3a5b1b | ||
![]() |
992ebd4eae | ||
![]() |
26fa8be427 | ||
![]() |
6e5ed1ea67 | ||
![]() |
9a35bca556 | ||
![]() |
3d6065f987 | ||
![]() |
f11732acdf | ||
![]() |
4aae566191 | ||
![]() |
2b895f4e97 | ||
![]() |
2fc6b14bab | ||
![]() |
e112300059 | ||
![]() |
a62932135a | ||
![]() |
f81bf1b31a | ||
![]() |
2697d55fa5 | ||
![]() |
26aade92cf | ||
![]() |
bd4ee09dc6 | ||
![]() |
a3bbb32ab1 | ||
![]() |
ddff485628 | ||
![]() |
5bed67cda9 | ||
![]() |
3e69b5323a | ||
![]() |
2b9385911b | ||
![]() |
346a5cb3ee | ||
![]() |
a7a3d317ef | ||
![]() |
a24d71f374 | ||
![]() |
2d81b6fee9 | ||
![]() |
fcc2b561e9 | ||
![]() |
cd6737a1ee | ||
![]() |
7758bb1067 | ||
![]() |
2e52ef2f69 | ||
![]() |
7a4d7b8d18 | ||
![]() |
b0c1765ab9 | ||
![]() |
26a0299537 | ||
![]() |
b7779c7770 | ||
![]() |
482d8e1ae7 | ||
![]() |
0d7956c7c2 | ||
![]() |
5ce6f9f407 | ||
![]() |
34a66514c0 | ||
![]() |
8199a7f952 | ||
![]() |
5f43fe91e0 | ||
![]() |
06ced84829 | ||
![]() |
f945d35d27 | ||
![]() |
65a89854d1 | ||
![]() |
402a560c31 | ||
![]() |
81af010484 | ||
![]() |
5e109f8b3f | ||
![]() |
39b7af9d5f | ||
![]() |
67f733281f | ||
![]() |
0ef19762fe | ||
![]() |
e2db21e9ba | ||
![]() |
03e501bddb | ||
![]() |
2e597fbbf3 | ||
![]() |
65c3a5c015 | ||
![]() |
34b5d3c779 | ||
![]() |
191ea046e4 | ||
![]() |
fab7397e6b | ||
![]() |
24297c8dba | ||
![]() |
1f99d3d91b | ||
![]() |
248d8aad71 | ||
![]() |
c23fbaa16a | ||
![]() |
53d8b771aa | ||
![]() |
4ded84afdc | ||
![]() |
8d8f2ad190 | ||
![]() |
1d8348b411 | ||
![]() |
a3070dc57f | ||
![]() |
adc4daa64e | ||
![]() |
6f90b55093 | ||
![]() |
17dd7e1901 | ||
![]() |
c9b8b82bf6 | ||
![]() |
d984042ca6 | ||
![]() |
49c3dbec63 | ||
![]() |
7cac1e4a16 | ||
![]() |
fcdec6dec7 | ||
![]() |
3ad1461db0 | ||
![]() |
c761d2f933 | ||
![]() |
7ebdfac18b | ||
![]() |
fbae6a98d5 | ||
![]() |
1f0ddbf59b | ||
![]() |
cb66bd7055 | ||
![]() |
7407057aca | ||
![]() |
34cdf1a49f | ||
![]() |
a2cb1d5d19 | ||
![]() |
a1cfe9667c | ||
![]() |
75459a1bd7 | ||
![]() |
2d01c22cd8 | ||
![]() |
3903f09b28 | ||
![]() |
e22f19ed61 | ||
![]() |
9c5c628f99 | ||
![]() |
41ccb766f6 | ||
![]() |
b6019897c1 | ||
![]() |
a188fe1a5e | ||
![]() |
27959c2403 | ||
![]() |
c9eafa8536 | ||
![]() |
6a930f542d | ||
![]() |
615043af7c | ||
![]() |
c57873ede7 | ||
![]() |
806bf8ea7e | ||
![]() |
577246d9ed | ||
![]() |
e333491952 | ||
![]() |
25f84f5fd1 | ||
![]() |
829ae9fdc3 | ||
![]() |
c58990d4eb | ||
![]() |
bb83080289 | ||
![]() |
5f585c83cd | ||
![]() |
9bb29b177a | ||
![]() |
ea3ac0750e | ||
![]() |
0fa99d41fc | ||
![]() |
54c5af5862 | ||
![]() |
54ee4e7632 | ||
![]() |
d2b6455749 | ||
![]() |
826425d5ab | ||
![]() |
4f46cf7d5a | ||
![]() |
5cd09cec76 | ||
![]() |
3f3aee918a | ||
![]() |
deba02e7be | ||
![]() |
eda16d7c91 | ||
![]() |
471bc768e3 | ||
![]() |
c0e1132674 | ||
![]() |
06ee1d7949 | ||
![]() |
d3455ea29e | ||
![]() |
164bd8c06c | ||
![]() |
e58e41c8da | ||
![]() |
81da26d7d3 | ||
![]() |
19ce2c6461 | ||
![]() |
723869014f | ||
![]() |
4913c2c65b | ||
![]() |
428e3b9ee0 | ||
![]() |
933ac5c14c | ||
![]() |
03fbaedec5 | ||
![]() |
b3d502179a | ||
![]() |
ad02086d3b | ||
![]() |
60206ec075 | ||
![]() |
532141fda2 | ||
![]() |
3ec3aaf858 | ||
![]() |
6a21db1017 | ||
![]() |
4ea9df3ee4 | ||
![]() |
f163164d36 | ||
![]() |
9731941c7b | ||
![]() |
1ab67d44c8 | ||
![]() |
01503df7f3 | ||
![]() |
349b7dc2f4 | ||
![]() |
3c5e480ce7 | ||
![]() |
3726cf94ba | ||
![]() |
64f3afdda4 | ||
![]() |
f79ea2461a | ||
![]() |
614c4944e5 | ||
![]() |
7c67114fd4 | ||
![]() |
043df00a97 | ||
![]() |
3f2b6e6df9 | ||
![]() |
52a4ff2cb9 | ||
![]() |
f77adf4a18 | ||
![]() |
556b81b2cc | ||
![]() |
2765ec9302 | ||
![]() |
8c532c32c3 | ||
![]() |
b907d76439 | ||
![]() |
998401b6d7 | ||
![]() |
1462cc4e9a | ||
![]() |
65a0c82ac9 | ||
![]() |
b822a44cfe | ||
![]() |
d4a7c0ffcc | ||
![]() |
1daa4ea627 | ||
![]() |
21434a2a9e | ||
![]() |
69f81732f8 | ||
![]() |
d468745b6b | ||
![]() |
c27b7c4fae | ||
![]() |
4a26a4568e | ||
![]() |
44b65b19df | ||
![]() |
a1b284de0a | ||
![]() |
6141843614 | ||
![]() |
d824b9db2b | ||
![]() |
7b2e9c6e77 | ||
![]() |
8b15203950 | ||
![]() |
16d6baddf4 | ||
![]() |
f94f101d22 | ||
![]() |
ee2167da55 | ||
![]() |
2b42f64b19 | ||
![]() |
e8206e9f6f | ||
![]() |
e17b99de77 | ||
![]() |
5b3ccc3cf4 | ||
![]() |
6c5d6e1f72 | ||
![]() |
3b4c61b4cf | ||
![]() |
bc5dca39ba | ||
![]() |
bbcfe0f6c7 | ||
![]() |
45de947be5 | ||
![]() |
229ec9cbdc | ||
![]() |
1b3cb0258f | ||
![]() |
6ba166f528 | ||
![]() |
79aa5bbb7a | ||
![]() |
70782c3670 | ||
![]() |
19626c5381 | ||
![]() |
25002c03df | ||
![]() |
83af243bca | ||
![]() |
35658681e9 | ||
![]() |
833121dbc0 | ||
![]() |
e688d5cbc8 | ||
![]() |
01ba98b183 | ||
![]() |
a7c033caed | ||
![]() |
da1d64f05a | ||
![]() |
78418b3d24 | ||
![]() |
f199b888b4 | ||
![]() |
bcf975c028 | ||
![]() |
c6077376d4 | ||
![]() |
415127ba31 | ||
![]() |
630521e8db | ||
![]() |
c1d7d7f563 | ||
![]() |
cc8d89a085 | ||
![]() |
7c17c3d33e | ||
![]() |
af3f2f9168 | ||
![]() |
3c85983a35 | ||
![]() |
e363f477a7 | ||
![]() |
6399dbdc30 | ||
![]() |
2cca58dfc8 | ||
![]() |
abb8eab9b2 | ||
![]() |
757f15fb7c | ||
![]() |
bcb8ce94f1 | ||
![]() |
ac5d261e08 | ||
![]() |
c8f840741a | ||
![]() |
7986e92e9f | ||
![]() |
e5283eda92 | ||
![]() |
61cc89a98d | ||
![]() |
0a5cf933fc | ||
![]() |
5a2ed6d977 | ||
![]() |
8e1f8f4d5c | ||
![]() |
656c81a096 | ||
![]() |
ccd2ac644d | ||
![]() |
7ff4607f6c | ||
![]() |
4482237a33 | ||
![]() |
2c433a2b23 | ||
![]() |
2845f0cab3 | ||
![]() |
280123bd5c | ||
![]() |
f6320bd3be | ||
![]() |
e5d1cb3092 | ||
![]() |
f57c9e16bd | ||
![]() |
8fb51aae4d | ||
![]() |
10efe291af | ||
![]() |
578d20f237 | ||
![]() |
4e50d6714d | ||
![]() |
5c90133021 | ||
![]() |
24668b9daf | ||
![]() |
c569b77b77 | ||
![]() |
2f8fb6aade | ||
![]() |
5e09f1b051 | ||
![]() |
2cbf68b5e7 | ||
![]() |
43ce4bd7fd | ||
![]() |
56c0a8025a | ||
![]() |
074adc14e5 | ||
![]() |
c93a0775cb | ||
![]() |
95c2059e89 | ||
![]() |
c0bc8abe14 | ||
![]() |
8f768ac841 | ||
![]() |
a6e7baec73 | ||
![]() |
c0695fefd0 | ||
![]() |
2d85abdece | ||
![]() |
6303cd46cf | ||
![]() |
f22801bc15 | ||
![]() |
426d2b3643 | ||
![]() |
063f25ba11 | ||
![]() |
6faff49998 | ||
![]() |
672e3dde8f | ||
![]() |
eff7940f30 | ||
![]() |
fb07276d48 | ||
![]() |
3d7635b7b3 | ||
![]() |
015aac05a5 | ||
![]() |
61fc830014 | ||
![]() |
a200f013bf | ||
![]() |
15e6b6e634 | ||
![]() |
6d42f2ac39 | ||
![]() |
a2954397d9 | ||
![]() |
c3200d1623 | ||
![]() |
866d402d0f | ||
![]() |
7de3201767 | ||
![]() |
4dce9c0f00 | ||
![]() |
66aa7a17ac | ||
![]() |
f890679316 | ||
![]() |
5693506cb3 | ||
![]() |
689147e089 | ||
![]() |
0be48b19a7 | ||
![]() |
becf379ecd | ||
![]() |
b0104ef4fd | ||
![]() |
6f531a61ef | ||
![]() |
62708b9843 | ||
![]() |
e35e919ea4 | ||
![]() |
e6681f2d4b | ||
![]() |
991d04b2b1 | ||
![]() |
96b02dda1f | ||
![]() |
269e0ea80b | ||
![]() |
fa621d5224 | ||
![]() |
2715191f58 | ||
![]() |
940e890dd0 | ||
![]() |
1e36f7efe9 | ||
![]() |
c0c81a1b7b | ||
![]() |
9f0da5c195 | ||
![]() |
24034c997c | ||
![]() |
b6603c2ee0 | ||
![]() |
a836d233ff | ||
![]() |
09297ba8cf | ||
![]() |
33d7503fdb | ||
![]() |
d22a13c990 | ||
![]() |
a9f42376b7 | ||
![]() |
f5ab38f87b | ||
![]() |
94a867869e | ||
![]() |
fcec09832a | ||
![]() |
e96af1dba0 | ||
![]() |
5e4f7d332e | ||
![]() |
5ac9e36ed6 | ||
![]() |
3e313478d9 | ||
![]() |
80d7267dad | ||
![]() |
c344bf5c20 | ||
![]() |
f4e006fa76 | ||
![]() |
92ab16d5eb | ||
![]() |
f8faa8f2a1 | ||
![]() |
b9e28eb23a | ||
![]() |
14fdd9a102 | ||
![]() |
8c526622ff | ||
![]() |
9c544aba41 | ||
![]() |
acdd35290e | ||
![]() |
ee7814dfe9 | ||
![]() |
70450b80d2 | ||
![]() |
5021918c20 | ||
![]() |
c2e5031710 | ||
![]() |
8f0901b887 | ||
![]() |
c4220e43b6 | ||
![]() |
72f4e0aedd | ||
![]() |
5a8f26359c | ||
![]() |
42c8015cbc | ||
![]() |
cbd7f94a44 | ||
![]() |
ec97e2ab37 | ||
![]() |
6dd3073a75 | ||
![]() |
391f8bbd72 | ||
![]() |
90600188a8 | ||
![]() |
4c681cffe0 | ||
![]() |
6a8b158a9f | ||
![]() |
165b847051 | ||
![]() |
1cbc55f2b8 | ||
![]() |
b0e4309210 | ||
![]() |
564222e737 | ||
![]() |
0f07900601 | ||
![]() |
65660a83be | ||
![]() |
be04391ab7 | ||
![]() |
bdf1021886 | ||
![]() |
b0ba746307 | ||
![]() |
0877c941de | ||
![]() |
268dceaa12 | ||
![]() |
a582d760ed | ||
![]() |
c935521bf9 | ||
![]() |
4bf63300b3 | ||
![]() |
90f866c5bc | ||
![]() |
dc0dddcb7d | ||
![]() |
4a18fa3775 | ||
![]() |
d40a26a536 | ||
![]() |
e0763f81bc | ||
![]() |
993c0a49b4 | ||
![]() |
3fc30c4a1e | ||
![]() |
1a5afd9976 | ||
![]() |
99c9f55511 | ||
![]() |
8fbe7aae50 | ||
![]() |
eca98ac056 | ||
![]() |
92358f850d | ||
![]() |
8a560908ea | ||
![]() |
33880f9a23 | ||
![]() |
1e8769ec95 | ||
![]() |
61ae0d1d5e | ||
![]() |
4da64ac904 | ||
![]() |
c80e2d5b03 | ||
![]() |
ea40b8fcda | ||
![]() |
348800600a | ||
![]() |
65bbe4014f | ||
![]() |
a90737a7fa | ||
![]() |
8184e9de1f | ||
![]() |
7ec798e197 | ||
![]() |
43533fa968 | ||
![]() |
d0eebedfbf | ||
![]() |
6f31115c7f | ||
![]() |
b3176ada74 | ||
![]() |
f32b42e6cc | ||
![]() |
e4b9ea918f | ||
![]() |
5fe4a3480f | ||
![]() |
8be4cee418 | ||
![]() |
e7b941a136 | ||
![]() |
1a136c7e3d | ||
![]() |
efb7a159d5 | ||
![]() |
a6f092a6f5 | ||
![]() |
1e4b6b734e | ||
![]() |
dd7348066f | ||
![]() |
7bb34e056c | ||
![]() |
df6e288519 | ||
![]() |
28cf67e5b6 | ||
![]() |
e87badaa1b | ||
![]() |
d1fff1119a | ||
![]() |
a873ddc4fa | ||
![]() |
0f796a56a9 | ||
![]() |
11ddba7a06 | ||
![]() |
9b2dd0d757 | ||
![]() |
d862c8a3eb | ||
![]() |
35997c0384 | ||
![]() |
9b558e2a07 | ||
![]() |
92ffc25d79 | ||
![]() |
aea0de4777 | ||
![]() |
6304bbec6e | ||
![]() |
911ea81630 | ||
![]() |
90c8f0516c | ||
![]() |
447c2f18c5 | ||
![]() |
6988bbcca0 | ||
![]() |
afb5468ded | ||
![]() |
55dac20805 | ||
![]() |
58d12723d6 | ||
![]() |
c613c1de94 | ||
![]() |
9e56567d23 | ||
![]() |
20880807ce | ||
![]() |
ed82aa90ea | ||
![]() |
85b5fa11bc | ||
![]() |
d01ecc272b | ||
![]() |
83c027c4bf | ||
![]() |
c007747169 | ||
![]() |
3ccf849c1a | ||
![]() |
961778f0a7 | ||
![]() |
1a7f6f376d | ||
![]() |
163afcce75 | ||
![]() |
7ebd1c6daf | ||
![]() |
82a765a255 | ||
![]() |
b940543cd5 | ||
![]() |
c7d4a099d9 | ||
![]() |
ba8c0f8903 | ||
![]() |
67439bb4e5 | ||
![]() |
89ba6cc2fa | ||
![]() |
a27637acb3 | ||
![]() |
29405c7d70 | ||
![]() |
c26c5da016 | ||
![]() |
5d6b149ea9 | ||
![]() |
c55cc260ab | ||
![]() |
96f1574faf | ||
![]() |
1632dd73c9 | ||
![]() |
651a99b35d | ||
![]() |
562efed54d | ||
![]() |
4814a649be | ||
![]() |
18a4b90501 | ||
![]() |
0e3d543a13 | ||
![]() |
91ba1f374d | ||
![]() |
f859190cba | ||
![]() |
e4630e7985 | ||
![]() |
63397d857e | ||
![]() |
6f8e115980 | ||
![]() |
b3b479490e | ||
![]() |
cb3b73ba36 | ||
![]() |
933713292c | ||
![]() |
9f36808a7f | ||
![]() |
e961ef705e | ||
![]() |
43ce102425 | ||
![]() |
923ef1ba74 | ||
![]() |
0b1012aee0 | ||
![]() |
3f122692eb | ||
![]() |
4442fda895 | ||
![]() |
cf2c2bac85 | ||
![]() |
c128cf69be | ||
![]() |
24aac114e9 | ||
![]() |
a3415079ec | ||
![]() |
de21d04ba5 | ||
![]() |
66525ccae3 | ||
![]() |
3d6017de7c | ||
![]() |
cacb387a61 | ||
![]() |
83d84b8703 | ||
![]() |
c48c275a8f | ||
![]() |
9459513813 | ||
![]() |
a90b98a144 | ||
![]() |
eb5deccea8 | ||
![]() |
413141ae29 | ||
![]() |
487b401b26 | ||
![]() |
a949c9321c | ||
![]() |
5c64f51503 | ||
![]() |
f2712f4a8f | ||
![]() |
f819dbb4e4 | ||
![]() |
12a3a4c51a | ||
![]() |
f9416ce71c | ||
![]() |
b86bbbff00 | ||
![]() |
dd929659ab | ||
![]() |
4b95e8ea1d | ||
![]() |
a3ced56b18 | ||
![]() |
0a1d0bbcbf | ||
![]() |
7e492f4420 | ||
![]() |
157c9b0966 | ||
![]() |
12c91999ac | ||
![]() |
774d12f12e | ||
![]() |
ea0f5b8779 | ||
![]() |
2d8f13fb93 | ||
![]() |
862f07bc76 | ||
![]() |
131b62283b | ||
![]() |
984e946f76 | ||
![]() |
196ceeac59 | ||
![]() |
6431176f4e | ||
![]() |
2a4a3dfa23 | ||
![]() |
0075a81879 | ||
![]() |
88f9c94101 | ||
![]() |
0cda8b02d5 | ||
![]() |
332346b685 | ||
![]() |
1e4f0c58dc | ||
![]() |
8509ce2096 | ||
![]() |
9f6cbc50ee | ||
![]() |
5a90a2a9a1 | ||
![]() |
1f089e99f6 | ||
![]() |
18c1891708 | ||
![]() |
10da1af59b | ||
![]() |
b89b4fd18a | ||
![]() |
19edc880da | ||
![]() |
a1310047c4 | ||
![]() |
e91ee4c384 | ||
![]() |
26695dc582 | ||
![]() |
221f96ac2c | ||
![]() |
96008c55d0 | ||
![]() |
352d8be237 | ||
![]() |
e87b1d2298 | ||
![]() |
f2b6938d64 | ||
![]() |
47f8caad4d | ||
![]() |
8ec83f6945 | ||
![]() |
e11100a137 | ||
![]() |
9ca363d0e4 | ||
![]() |
0e2df71829 | ||
![]() |
fb418831df | ||
![]() |
0dd5a2e0a5 | ||
![]() |
0e1beb0018 | ||
![]() |
cbcee4dce4 | ||
![]() |
741d39a004 | ||
![]() |
b365623b13 | ||
![]() |
4e65b5018f | ||
![]() |
932f866105 | ||
![]() |
ccfe005731 | ||
![]() |
bd2ac3c92e | ||
![]() |
05aa7648bb | ||
![]() |
bfea3e2fc2 | ||
![]() |
3b93554ce6 | ||
![]() |
1718fc811e | ||
![]() |
44e7eb12d6 | ||
![]() |
df01e463b6 | ||
![]() |
e115a655dc | ||
![]() |
a74c05c6c0 | ||
![]() |
a763b6310d | ||
![]() |
ae1c06d940 | ||
![]() |
f317225a70 | ||
![]() |
0842004b61 | ||
![]() |
9f9c77e6b6 | ||
![]() |
3eac4a912c | ||
![]() |
c4b934c2ae | ||
![]() |
43ed851006 | ||
![]() |
af854570cd | ||
![]() |
72a33c37ef | ||
![]() |
78e2e580f8 | ||
![]() |
3d35147aba | ||
![]() |
abbff9c030 | ||
![]() |
e6400bcc27 | ||
![]() |
7f7ae1a749 | ||
![]() |
8d9336dfa6 | ||
![]() |
bc79abb7d5 | ||
![]() |
2af1db7f3a | ||
![]() |
f21841e926 | ||
![]() |
60ca554413 | ||
![]() |
f24a0b1019 | ||
![]() |
d15f5fae64 | ||
![]() |
775c10da3b | ||
![]() |
67e8df6a5c | ||
![]() |
5f6246b2cb | ||
![]() |
03526e5535 | ||
![]() |
ef6ea7e718 | ||
![]() |
6efd78c5e0 | ||
![]() |
4a456ba054 | ||
![]() |
9f1b25e07d | ||
![]() |
f412df7a2b | ||
![]() |
5a1c3c8a4a | ||
![]() |
7cd5b22ca7 | ||
![]() |
18fc788201 | ||
![]() |
5a05fd446a | ||
![]() |
e4f0a95a56 | ||
![]() |
c520901069 | ||
![]() |
70547b5f16 | ||
![]() |
32170897dd | ||
![]() |
c46a6f66c6 | ||
![]() |
f039774cf5 | ||
![]() |
a4ea2205ba | ||
![]() |
511a8996f3 | ||
![]() |
7e22e5ce6e | ||
![]() |
fa66db840d | ||
![]() |
fb5645a30d | ||
![]() |
7a3cf405d3 | ||
![]() |
cdfbd6d08f | ||
![]() |
12bfd18ba7 | ||
![]() |
627892852c | ||
![]() |
b72065f04b | ||
![]() |
4f9d5f7dc6 | ||
![]() |
f1ce9228a1 | ||
![]() |
88e6b52b88 | ||
![]() |
f9597ba7cb | ||
![]() |
83ffd92b2b | ||
![]() |
721486c82b | ||
![]() |
0a86b45a91 | ||
![]() |
9afc253726 | ||
![]() |
8bf16746e9 | ||
![]() |
97a1584bb5 | ||
![]() |
5dc4ec48fe | ||
![]() |
53e2260ca0 | ||
![]() |
a2b08664f7 | ||
![]() |
f58b4d923a | ||
![]() |
9322123a97 | ||
![]() |
6ace2fe4e0 | ||
![]() |
5c08f75d69 | ||
![]() |
9e6ace6bcc | ||
![]() |
e5a3f3d7c4 | ||
![]() |
335d554438 | ||
![]() |
c6b82a4b37 | ||
![]() |
b1aff903f5 | ||
![]() |
998c2de633 | ||
![]() |
ba6b4bf622 | ||
![]() |
49d64dbc95 | ||
![]() |
8cc7d0c019 | ||
![]() |
49c75a3157 | ||
![]() |
865baa3bb1 | ||
![]() |
a4e7ff8608 | ||
![]() |
c17878d1a7 | ||
![]() |
464cba833e | ||
![]() |
f3ffa663f1 | ||
![]() |
9c3e6a25f6 | ||
![]() |
db223e4adb | ||
![]() |
7756e1b5fe | ||
![]() |
e477633779 | ||
![]() |
1568dbb9e4 | ||
![]() |
54b65a556c | ||
![]() |
6d9e79a498 | ||
![]() |
725fe2ac7d | ||
![]() |
de00de96fd | ||
![]() |
b47d0802d1 | ||
![]() |
d4397ce16c | ||
![]() |
3bf7b4313c | ||
![]() |
2583ab73dc | ||
![]() |
35b928b08f | ||
![]() |
aeb73b0cd4 | ||
![]() |
f945dbb390 | ||
![]() |
a4078df9d5 | ||
![]() |
8d2a21eb78 | ||
![]() |
54659aec9e | ||
![]() |
e6b58052da | ||
![]() |
262022c1d1 | ||
![]() |
d7ac4478a8 | ||
![]() |
080cffe064 | ||
![]() |
b3b0fb5832 | ||
![]() |
5869c85749 | ||
![]() |
e592360d4d | ||
![]() |
6ba9387bf8 | ||
![]() |
d7eb0bab75 | ||
![]() |
8af2511e66 | ||
![]() |
9974b0ee23 | ||
![]() |
069b3276df | ||
![]() |
7b595bd957 | ||
![]() |
f4e1563c4c | ||
![]() |
0611058974 | ||
![]() |
54b2c3525c | ||
![]() |
10ce8839fa | ||
![]() |
34ba03951e | ||
![]() |
c6f5092287 | ||
![]() |
dc6bce256d | ||
![]() |
4e643b6d31 | ||
![]() |
ad96d220f4 | ||
![]() |
47050371a1 | ||
![]() |
8f69e7095d | ||
![]() |
1b22da3c28 | ||
![]() |
ba84aa2783 | ||
![]() |
2762784364 | ||
![]() |
bd56782f26 | ||
![]() |
8bcc60edd9 | ||
![]() |
2aebb37fb0 | ||
![]() |
36b7a3cb03 | ||
![]() |
b93cba3b05 | ||
![]() |
3bade4b1bd | ||
![]() |
542f577c81 | ||
![]() |
72559c5c49 | ||
![]() |
f02643d9f2 | ||
![]() |
1e9a5c2d78 | ||
![]() |
e314699ee0 | ||
![]() |
217871cb45 | ||
![]() |
da79629308 | ||
![]() |
3c70317393 | ||
![]() |
b803095b83 | ||
![]() |
ad05829195 | ||
![]() |
10aca1cfb9 | ||
![]() |
7e96634da4 | ||
![]() |
e1c38eef76 | ||
![]() |
b149099bf3 | ||
![]() |
2822e5c1f3 | ||
![]() |
b59abce8ea | ||
![]() |
680c9266bf | ||
![]() |
165b64f553 | ||
![]() |
fbaa70e1eb | ||
![]() |
b1cc3d550c | ||
![]() |
fba65c440c | ||
![]() |
01307a25ff | ||
![]() |
1d946146cc | ||
![]() |
fa01875eb2 | ||
![]() |
d87933a3b5 | ||
![]() |
037285ac09 | ||
![]() |
161f9a7751 | ||
![]() |
97629cefaa | ||
![]() |
a948724340 | ||
![]() |
6b013d5f38 | ||
![]() |
371532ff02 | ||
![]() |
313008ecaa | ||
![]() |
953bbde089 | ||
![]() |
3826132240 | ||
![]() |
b7046934e5 | ||
![]() |
45300ac43d | ||
![]() |
8c838a949f | ||
![]() |
8e0d846b40 | ||
![]() |
828613a694 | ||
![]() |
5ae01fe4d9 | ||
![]() |
e2ae9f3125 | ||
![]() |
c821e5144a | ||
![]() |
803e6f9e46 | ||
![]() |
e703bfc319 | ||
![]() |
a372847ccf | ||
![]() |
4f7bc7d98e | ||
![]() |
500a9208d5 | ||
![]() |
f9ca7cd05a | ||
![]() |
f7dc4847f2 | ||
![]() |
63a9f45ba6 | ||
![]() |
36da7e91c5 | ||
![]() |
c1778acb93 | ||
![]() |
8834fe02da | ||
![]() |
7a08ca4dc6 | ||
![]() |
5fe80671a2 | ||
![]() |
0c5f03559d | ||
![]() |
a0370f347c | ||
![]() |
85a8c138fc | ||
![]() |
3d708aa7e1 | ||
![]() |
5f31f5c2b9 | ||
![]() |
13bbb78907 | ||
![]() |
a8539f6772 | ||
![]() |
4df01218e2 | ||
![]() |
644e453709 | ||
![]() |
dc858f6d29 | ||
![]() |
7c2b79bef0 | ||
![]() |
97f56b1e08 | ||
![]() |
b22401deb3 | ||
![]() |
f6109765d8 | ||
![]() |
7058e501cb | ||
![]() |
228b5f6e9d | ||
![]() |
03071dec0f | ||
![]() |
c1a834e886 | ||
![]() |
e4ddc75069 | ||
![]() |
2cffc3b851 | ||
![]() |
38c264ccdf | ||
![]() |
cb05ef9f2c | ||
![]() |
82e29f47b3 | ||
![]() |
53aa46890a | ||
![]() |
74ea69b784 | ||
![]() |
fd0891f224 | ||
![]() |
cce998a835 | ||
![]() |
30799bce73 | ||
![]() |
28f4f1ce04 | ||
![]() |
f8dc2a3130 | ||
![]() |
e5b72cdfe7 | ||
![]() |
ea92fb2fe4 | ||
![]() |
a4b7baf6fd | ||
![]() |
69158f2c76 | ||
![]() |
18d697a1e6 | ||
![]() |
66e5484c1c | ||
![]() |
b8846f7a27 | ||
![]() |
37b43e4cea | ||
![]() |
1b158d8e80 | ||
![]() |
84e1f7c92e | ||
![]() |
dd18852946 | ||
![]() |
01ad553978 | ||
![]() |
79da2728c4 | ||
![]() |
8bfd5ec865 | ||
![]() |
dcb3574cf0 | ||
![]() |
dad3be3c64 | ||
![]() |
c609248f0e | ||
![]() |
0d25493c39 | ||
![]() |
f86519bca6 | ||
![]() |
1b0de9b4cf | ||
![]() |
1f396e64a0 | ||
![]() |
d221a4b904 | ||
![]() |
54e206a0a1 | ||
![]() |
09b98e0f7f | ||
![]() |
b50bec463d | ||
![]() |
a46d20a681 | ||
![]() |
e226ebcc97 | ||
![]() |
caa5e51a67 | ||
![]() |
0028272cf4 | ||
![]() |
394a7a92ab | ||
![]() |
88b141cf1f | ||
![]() |
94bff89347 | ||
![]() |
2808c9f4c8 | ||
![]() |
4a27ea1643 | ||
![]() |
b5fbdb7180 | ||
![]() |
41af533a34 | ||
![]() |
ec0005b919 | ||
![]() |
128cdd1dfb | ||
![]() |
f45ec667a3 | ||
![]() |
7da00c1220 | ||
![]() |
321e18777e | ||
![]() |
1066be1594 | ||
![]() |
aa61e6381d | ||
![]() |
b04206a9d3 | ||
![]() |
268698b9ef | ||
![]() |
fed0068dac | ||
![]() |
c3ef23e4f6 | ||
![]() |
62df2e2df9 | ||
![]() |
2b005addd3 | ||
![]() |
1a6d150687 | ||
![]() |
5bdcda30b0 | ||
![]() |
e0b8ec7f54 | ||
![]() |
ae092e05a2 | ||
![]() |
b0182b2376 | ||
![]() |
08c4726043 | ||
![]() |
6916e6590f | ||
![]() |
4be5b14869 | ||
![]() |
6b26f0cb17 | ||
![]() |
eed42445e8 | ||
![]() |
a74e072a9a | ||
![]() |
62984c0a24 | ||
![]() |
bbca1cd8d9 | ||
![]() |
6695f8ae91 | ||
![]() |
ed4f60fc7e | ||
![]() |
28cb19148c | ||
![]() |
f7d2c45af3 | ||
![]() |
c7ec30ae25 | ||
![]() |
93ae3e26b1 | ||
![]() |
b73beede34 | ||
![]() |
9d4f57abf3 | ||
![]() |
4b15e2ed97 | ||
![]() |
25dca38ef9 | ||
![]() |
b0e38d5697 | ||
![]() |
80a36f1d7c | ||
![]() |
19f473e49a | ||
![]() |
6f5492cef8 | ||
![]() |
612b2cc9b7 | ||
![]() |
cce38bc147 | ||
![]() |
082f012a91 | ||
![]() |
3e01eac105 | ||
![]() |
5c912f16d4 | ||
![]() |
a3c4b1c95b | ||
![]() |
cd52d0a6d9 | ||
![]() |
fb12f298ca | ||
![]() |
91497e8d2d | ||
![]() |
a0b4ac2f66 | ||
![]() |
f2ba95bc64 | ||
![]() |
6ee248b012 | ||
![]() |
f2bafbc57f | ||
![]() |
bbd81bbbd1 |
40 changed files with 1890 additions and 1113 deletions
|
@ -434,7 +434,7 @@ endif()
|
|||
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku")
|
||||
if(MI_OPT_ARCH)
|
||||
if(APPLE AND CMAKE_C_COMPILER_ID STREQUAL "AppleClang" AND CMAKE_OSX_ARCHITECTURES) # to support multi-arch binaries (#999)
|
||||
if(APPLE AND CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang" AND CMAKE_OSX_ARCHITECTURES) # to support multi-arch binaries (#999)
|
||||
if("arm64" IN_LIST CMAKE_OSX_ARCHITECTURES)
|
||||
list(APPEND MI_OPT_ARCH_FLAGS "-Xarch_arm64;-march=armv8.1-a")
|
||||
endif()
|
||||
|
@ -532,7 +532,9 @@ if(MI_TRACK_ASAN)
|
|||
endif()
|
||||
string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC)
|
||||
list(APPEND mi_defines "MI_CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE_LC}") #todo: multi-config project needs $<CONFIG> ?
|
||||
if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$"))
|
||||
if(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")
|
||||
list(APPEND mi_defines MI_BUILD_RELEASE)
|
||||
else()
|
||||
set(mi_libname "${mi_libname}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version
|
||||
endif()
|
||||
|
||||
|
@ -582,7 +584,7 @@ if(MI_BUILD_SHARED)
|
|||
install(TARGETS mimalloc EXPORT mimalloc ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
|
||||
install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir})
|
||||
|
||||
if(WIN32)
|
||||
if(WIN32 AND NOT MINGW)
|
||||
# On windows, the import library name for the dll would clash with the static mimalloc.lib library
|
||||
# so we postfix the dll import library with `.dll.lib` (and also the .pdb debug file)
|
||||
set_property(TARGET mimalloc PROPERTY ARCHIVE_OUTPUT_NAME "${mi_libname}.dll" )
|
||||
|
@ -592,6 +594,9 @@ if(MI_BUILD_SHARED)
|
|||
# install(FILES "$<TARGET_FILE_DIR:mimalloc>/${mi_libname}.dll.pdb" DESTINATION ${CMAKE_INSTALL_LIBDIR})
|
||||
endif()
|
||||
if(WIN32 AND MI_WIN_REDIRECT)
|
||||
if(MINGW)
|
||||
set_property(TARGET mimalloc PROPERTY PREFIX "")
|
||||
endif()
|
||||
# On windows, link and copy the mimalloc redirection dll too.
|
||||
if(CMAKE_GENERATOR_PLATFORM STREQUAL "arm64ec")
|
||||
set(MIMALLOC_REDIRECT_SUFFIX "-arm64ec")
|
||||
|
|
|
@ -7,9 +7,9 @@ trigger:
|
|||
branches:
|
||||
include:
|
||||
- master
|
||||
- dev
|
||||
- dev2
|
||||
- dev3
|
||||
- dev2
|
||||
- dev
|
||||
tags:
|
||||
include:
|
||||
- v*
|
||||
|
@ -34,6 +34,14 @@ jobs:
|
|||
BuildType: secure
|
||||
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
|
||||
MSBuildConfiguration: Release
|
||||
Debug x86:
|
||||
BuildType: debug
|
||||
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -A Win32
|
||||
MSBuildConfiguration: Debug
|
||||
Release x86:
|
||||
BuildType: release
|
||||
cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -A Win32
|
||||
MSBuildConfiguration: Release
|
||||
steps:
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
|
@ -161,6 +169,7 @@ jobs:
|
|||
- script: ctest --verbose --timeout 240
|
||||
workingDirectory: $(BuildType)
|
||||
displayName: CTest
|
||||
|
||||
# - upload: $(Build.SourcesDirectory)/$(BuildType)
|
||||
# artifact: mimalloc-macos-$(BuildType)
|
||||
|
||||
|
|
BIN
bin/mimalloc-redirect-arm64.dll
Normal file → Executable file
BIN
bin/mimalloc-redirect-arm64.dll
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect-arm64.lib
Normal file → Executable file
BIN
bin/mimalloc-redirect-arm64.lib
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect-arm64ec.dll
Normal file → Executable file
BIN
bin/mimalloc-redirect-arm64ec.dll
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect-arm64ec.lib
Normal file → Executable file
BIN
bin/mimalloc-redirect-arm64ec.lib
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect.dll
Normal file → Executable file
BIN
bin/mimalloc-redirect.dll
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect.lib
Normal file → Executable file
BIN
bin/mimalloc-redirect.lib
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect32.dll
Normal file → Executable file
BIN
bin/mimalloc-redirect32.dll
Normal file → Executable file
Binary file not shown.
BIN
bin/mimalloc-redirect32.lib
Normal file → Executable file
BIN
bin/mimalloc-redirect32.lib
Normal file → Executable file
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
set(mi_version_major 1)
|
||||
set(mi_version_minor 9)
|
||||
set(mi_version_patch 2)
|
||||
set(mi_version_major 2)
|
||||
set(mi_version_minor 2)
|
||||
set(mi_version_patch 3)
|
||||
set(mi_version ${mi_version_major}.${mi_version_minor})
|
||||
|
||||
set(PACKAGE_VERSION ${mi_version})
|
||||
|
|
|
@ -5,11 +5,11 @@ vcpkg_from_github(
|
|||
|
||||
# The "REF" can be a commit hash, branch name (dev2), or a version (v2.2.1).
|
||||
# REF "v${VERSION}"
|
||||
REF 866ce5b89db1dbc3e66bbf89041291fd16329518
|
||||
REF e2db21e9ba9fb9172b7b0aa0fe9b8742525e8774
|
||||
|
||||
# The sha512 is the hash of the tar.gz bundle.
|
||||
# (To get the sha512, run `vcpkg install mimalloc[override] --overlay-ports=<dir of this file>` and copy the sha from the error message.)
|
||||
SHA512 0b0e5ff823c49b9534b8c32800679806c5d7c29020af058da043c3e6e36ae3c32a1cdd5a21ece97dd60bc7dd4703967f683beac435dbb8514638a6cc55e5dea8
|
||||
SHA512 8cbb601fdf8b46dd6a9c0d314d6da9d4960699853829e96d2470753867f90689fb4caeaf30d628943fd388670dc11902dbecc9cc7c329b99a510524a09bdb612
|
||||
)
|
||||
|
||||
vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "mimalloc",
|
||||
"version": "1.9.2",
|
||||
"port-version": 2,
|
||||
"version": "2.2.2",
|
||||
"port-version": 1,
|
||||
"description": "Compact general purpose allocator with excellent performance",
|
||||
"homepage": "https://github.com/microsoft/mimalloc",
|
||||
"license": "MIT",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#ifndef MIMALLOC_H
|
||||
#define MIMALLOC_H
|
||||
|
||||
#define MI_MALLOC_VERSION 192 // major + 2 digits minor
|
||||
#define MI_MALLOC_VERSION 223 // major + 2 digits minor
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Compiler specific attributes
|
||||
|
@ -97,6 +97,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
#include <stddef.h> // size_t
|
||||
#include <stdbool.h> // bool
|
||||
#include <stdint.h> // INTPTR_MAX
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
|
@ -266,6 +266,13 @@ static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int6
|
|||
return current;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) {
|
||||
const int64_t add = *padd;
|
||||
if (add != 0) {
|
||||
mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
|
||||
int64_t current;
|
||||
do {
|
||||
|
@ -363,8 +370,9 @@ static inline void mi_atomic_yield(void) {
|
|||
_mm_pause();
|
||||
}
|
||||
#elif (defined(__GNUC__) || defined(__clang__)) && \
|
||||
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
|
||||
defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
|
||||
(defined(__x86_64__) || defined(__i386__) || \
|
||||
defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
|
@ -373,10 +381,16 @@ static inline void mi_atomic_yield(void) {
|
|||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("wfe");
|
||||
}
|
||||
#elif (defined(__arm__) && __ARM_ARCH__ >= 7)
|
||||
#elif defined(__arm__)
|
||||
#if __ARM_ARCH >= 7
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("yield" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("nop" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
|
||||
#ifdef __APPLE__
|
||||
static inline void mi_atomic_yield(void) {
|
||||
|
@ -387,10 +401,6 @@ static inline void mi_atomic_yield(void) {
|
|||
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__armel__) || defined(__ARMEL__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("nop" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__sun)
|
||||
// Fallback for other archs
|
||||
|
|
|
@ -127,6 +127,7 @@ bool _mi_os_has_virtual_reserve(void);
|
|||
|
||||
bool _mi_os_reset(void* addr, size_t size);
|
||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero);
|
||||
bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size);
|
||||
bool _mi_os_decommit(void* addr, size_t size);
|
||||
bool _mi_os_protect(void* addr, size_t size);
|
||||
bool _mi_os_unprotect(void* addr, size_t size);
|
||||
|
@ -177,10 +178,11 @@ void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
|||
void _mi_segment_map_unsafe_destroy(void);
|
||||
|
||||
// "segment.c"
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||
void _mi_segment_collect(mi_segment_t* segment, bool force);
|
||||
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
|
@ -188,10 +190,11 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, m
|
|||
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#endif
|
||||
|
||||
void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
||||
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
|
||||
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||
|
@ -341,12 +344,28 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
|||
}
|
||||
}
|
||||
|
||||
// Align downwards
|
||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return (sz & ~mask);
|
||||
}
|
||||
else {
|
||||
return ((sz / alignment) * alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Align a pointer upwards
|
||||
static inline void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
// Align a pointer downwards
|
||||
static inline void* mi_align_down_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_down((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
|
||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||
|
@ -370,6 +389,7 @@ static inline bool mi_mem_is_zero(const void* p, size_t size) {
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Align a byte size to a size in _machine words_,
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
|
@ -464,29 +484,44 @@ static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
|
||||
return (mi_page_t*)(s);
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
||||
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
|
||||
return (mi_slice_t*)(p);
|
||||
}
|
||||
|
||||
// Segment belonging to a page
|
||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||
mi_assert_internal(page!=NULL);
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment == NULL || page == &segment->pages[page->segment_idx]);
|
||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||
return segment;
|
||||
}
|
||||
|
||||
// used internally
|
||||
static inline size_t _mi_segment_page_idx_of(const mi_segment_t* segment, const void* p) {
|
||||
// if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
|
||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||
mi_assert_internal(diff >= 0 && (size_t)diff <= MI_SEGMENT_SIZE /* for huge alignment it can be equal */);
|
||||
size_t idx = (size_t)diff >> segment->page_shift;
|
||||
mi_assert_internal(idx < segment->capacity);
|
||||
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || idx == 0);
|
||||
return idx;
|
||||
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
|
||||
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
|
||||
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
|
||||
mi_assert_internal(start->slice_offset == 0);
|
||||
mi_assert_internal(start + start->slice_count > slice);
|
||||
return start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
// Get the page containing the pointer (performance critical as it is called in mi_free)
|
||||
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
||||
size_t idx = _mi_segment_page_idx_of(segment, p);
|
||||
return &((mi_segment_t*)segment)->pages[idx];
|
||||
mi_assert_internal(p > (void*)segment);
|
||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
|
||||
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
||||
mi_assert_internal(idx <= segment->slice_entries);
|
||||
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
||||
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
|
||||
return mi_slice_to_page(slice);
|
||||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
|
@ -509,8 +544,8 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
|
|||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
mi_assert_internal((page->is_huge && _mi_page_segment(page)->page_kind == MI_PAGE_HUGE) ||
|
||||
(!page->is_huge && _mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
|
||||
mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) ||
|
||||
(!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE));
|
||||
return page->is_huge;
|
||||
}
|
||||
|
||||
|
@ -522,7 +557,11 @@ static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
|
|||
|
||||
// size of a segment
|
||||
static inline size_t mi_segment_size(mi_segment_t* segment) {
|
||||
return segment->segment_size;
|
||||
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
|
||||
}
|
||||
|
||||
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
|
||||
return (uint8_t*)segment + mi_segment_size(segment);
|
||||
}
|
||||
|
||||
// Thread free access
|
||||
|
@ -677,12 +716,13 @@ static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
|||
}
|
||||
|
||||
static inline bool mi_is_in_same_page(const void* p, const void* q) {
|
||||
mi_segment_t* segmentp = _mi_ptr_segment(p);
|
||||
mi_segment_t* segmentq = _mi_ptr_segment(q);
|
||||
if (segmentp != segmentq) return false;
|
||||
size_t idxp = _mi_segment_page_idx_of(segmentp, p);
|
||||
size_t idxq = _mi_segment_page_idx_of(segmentq, q);
|
||||
return (idxp == idxq);
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
if (_mi_ptr_segment(q) != segment) return false;
|
||||
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
|
||||
mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
|
||||
}
|
||||
|
||||
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
|
||||
|
@ -764,6 +804,50 @@ static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, c
|
|||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// commit mask
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = ~((size_t)0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != ~((size_t)0)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// defined in `segment.c`:
|
||||
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
|
||||
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
|
||||
|
||||
#define mi_commit_mask_foreach(cm,idx,count) \
|
||||
idx = 0; \
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
#define mi_commit_mask_foreach_end() \
|
||||
idx += count; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
memory id's
|
||||
----------------------------------------------------------- */
|
||||
|
@ -947,6 +1031,21 @@ static inline size_t mi_bsr(size_t x) {
|
|||
return (x==0 ? MI_SIZE_BITS : MI_SIZE_BITS - 1 - mi_clz(x));
|
||||
}
|
||||
|
||||
size_t _mi_popcount_generic(size_t x);
|
||||
|
||||
static inline size_t mi_popcount(size_t x) {
|
||||
if (x<=1) return x;
|
||||
if (x==SIZE_MAX) return MI_SIZE_BITS;
|
||||
#if defined(__GNUC__)
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_popcountl(x);
|
||||
#else
|
||||
return __builtin_popcountll(x);
|
||||
#endif
|
||||
#else
|
||||
return _mi_popcount_generic(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------------
|
||||
// Provide our own `_mi_memcpy` for potential performance optimizations.
|
||||
|
|
|
@ -13,8 +13,9 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// mi_heap_t : all data for a thread-local heap, contains
|
||||
// lists of all managed heap pages.
|
||||
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
||||
// are allocated.
|
||||
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
|
||||
// are allocated. A segment is divided in slices (64KiB) from
|
||||
// which pages are allocated.
|
||||
// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from
|
||||
// where objects are allocated.
|
||||
// Note: we write "OS page" for OS memory pages while
|
||||
// using plain "page" for mimalloc pages (`mi_page_t`).
|
||||
|
@ -66,10 +67,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// #define MI_DEBUG 2 // + internal assertion checks
|
||||
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
|
||||
#if !defined(MI_DEBUG)
|
||||
#if !defined(NDEBUG) || defined(_DEBUG)
|
||||
#define MI_DEBUG 2
|
||||
#else
|
||||
#if defined(MI_BUILD_RELEASE) || defined(NDEBUG)
|
||||
#define MI_DEBUG 0
|
||||
#else
|
||||
#define MI_DEBUG 2
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -167,38 +168,40 @@ typedef int32_t mi_ssize_t;
|
|||
// ------------------------------------------------------
|
||||
|
||||
// Main tuning parameters for segment and page sizes
|
||||
// Sizes for 64-bit, divide by two for 32-bit
|
||||
// Sizes for 64-bit (usually divide by two for 32-bit)
|
||||
#ifndef MI_SEGMENT_SLICE_SHIFT
|
||||
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
|
||||
#endif
|
||||
|
||||
#ifndef MI_SEGMENT_SHIFT
|
||||
#if MI_INTPTR_SIZE > 4
|
||||
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
|
||||
#else
|
||||
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef MI_SMALL_PAGE_SHIFT
|
||||
#define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB
|
||||
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
|
||||
#endif
|
||||
#ifndef MI_MEDIUM_PAGE_SHIFT
|
||||
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
|
||||
#endif
|
||||
#ifndef MI_LARGE_PAGE_SHIFT
|
||||
#define MI_LARGE_PAGE_SHIFT ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4MiB
|
||||
#endif
|
||||
#ifndef MI_SEGMENT_SHIFT
|
||||
#define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4MiB -- must be equal to `MI_LARGE_PAGE_SHIFT`
|
||||
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
|
||||
#endif
|
||||
|
||||
// Derived constants
|
||||
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
|
||||
#define MI_SEGMENT_ALIGN (MI_SEGMENT_SIZE)
|
||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
||||
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
|
||||
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
|
||||
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
|
||||
|
||||
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
|
||||
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
|
||||
#define MI_LARGE_PAGE_SIZE (MI_ZU(1)<<MI_LARGE_PAGE_SHIFT)
|
||||
|
||||
#define MI_SMALL_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_SMALL_PAGE_SIZE)
|
||||
#define MI_MEDIUM_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
|
||||
#define MI_LARGE_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
|
||||
|
||||
// The max object size are checked to not waste more than 12.5% internally over the page sizes.
|
||||
// (Except for large pages since huge objects are allocated in 4MiB chunks)
|
||||
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/8) // 8 KiB
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/8) // 64 KiB
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/4) // 1 MiB
|
||||
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/8) // 8 KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/8) // 64 KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 16 MiB on 64-bit
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
|
@ -206,18 +209,27 @@ typedef int32_t mi_ssize_t;
|
|||
#error "mimalloc internal: expecting 73 bins"
|
||||
#endif
|
||||
|
||||
#if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
|
||||
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
|
||||
// Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`)
|
||||
#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
|
||||
#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
|
||||
|
||||
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
// We never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
// Maximum slice count (255) for which we can find the page for interior pointers
|
||||
#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// we never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877)
|
||||
#if (PTRDIFF_MAX > INT32_MAX) && (PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX))
|
||||
#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1))
|
||||
#else
|
||||
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
|
||||
#endif
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc pages contain allocated blocks
|
||||
|
@ -296,8 +308,8 @@ typedef uintptr_t mi_thread_free_t;
|
|||
// Notes:
|
||||
// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 10 words on 64-bit which helps the page index calculations
|
||||
// (and 12 words on 32-bit, and encoded free lists add 2 words)
|
||||
// - The size is 12 words on 64-bit which helps the page index calculations
|
||||
// (and 14 words on 32-bit, and encoded free lists add 2 words)
|
||||
// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
|
||||
|
@ -307,12 +319,12 @@ typedef uintptr_t mi_thread_free_t;
|
|||
// will be freed correctly even if only other threads free blocks.
|
||||
typedef struct mi_page_s {
|
||||
// "owned" by the segment
|
||||
uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
|
||||
uint8_t segment_in_use:1; // `true` if the segment allocated this page
|
||||
uint32_t slice_count; // slices in this page (0 if not a page)
|
||||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed:1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
|
||||
uint8_t is_huge:1; // `true` if the page is in a huge segment
|
||||
|
||||
uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`)
|
||||
// padding
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
|
@ -336,12 +348,11 @@ typedef struct mi_page_s {
|
|||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
_Atomic(uintptr_t) xheap;
|
||||
|
||||
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
|
||||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||
|
||||
#if MI_INTPTR_SIZE==4 // pad to 12 words on 32-bit
|
||||
// 64-bit 11 words, 32-bit 13 words, (+2 for secure)
|
||||
void* padding[1];
|
||||
#endif
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
|
@ -354,10 +365,44 @@ typedef enum mi_page_kind_e {
|
|||
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
||||
MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
|
||||
MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
|
||||
MI_PAGE_HUGE // a huge page is a single page in a segment of variable size (but still 2MiB aligned)
|
||||
// used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an alignment `> MI_BLOCK_ALIGNMENT_MAX`.
|
||||
MI_PAGE_HUGE // a huge page is a single page in a segment of variable size
|
||||
// used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`.
|
||||
} mi_page_kind_t;
|
||||
|
||||
typedef enum mi_segment_kind_e {
|
||||
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
||||
MI_SEGMENT_HUGE, // segment with just one huge page inside.
|
||||
} mi_segment_kind_t;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// A segment holds a commit mask where a bit is set if
|
||||
// the corresponding MI_COMMIT_SIZE area is committed.
|
||||
// The MI_COMMIT_SIZE must be a multiple of the slice
|
||||
// size. If it is equal we have the most fine grained
|
||||
// decommit (but setting it higher can be more efficient).
|
||||
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
|
||||
// be committed in one go which can be set higher than
|
||||
// MI_COMMIT_SIZE for efficiency (while the decommit mask
|
||||
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
|
||||
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
|
||||
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
|
||||
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
||||
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
|
||||
|
||||
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
|
||||
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
|
||||
#endif
|
||||
|
||||
typedef struct mi_commit_mask_s {
|
||||
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
|
||||
} mi_commit_mask_t;
|
||||
|
||||
typedef mi_page_t mi_slice_t;
|
||||
typedef int64_t mi_msecs_t;
|
||||
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// a memory id tracks the provenance of arena/OS allocated memory
|
||||
|
@ -401,43 +446,57 @@ typedef struct mi_memid_s {
|
|||
} mi_memid_t;
|
||||
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Segments contain mimalloc pages
|
||||
// ---------------------------------------------------------------
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// Segments are large allocated memory blocks (32mb on 64 bit) from arenas or the OS.
|
||||
//
|
||||
// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks.
|
||||
// The start of a segment is this structure with a fixed number of slice entries (`slices`)
|
||||
// usually followed by a guard OS page and the actual allocation area with pages.
|
||||
// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`).
|
||||
// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent
|
||||
// slices part of the area, the `slice_offset` is the byte offset back to the first slice
|
||||
// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`).
|
||||
// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`).
|
||||
// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while
|
||||
// large and huge pages span a variable amount of slices.
|
||||
|
||||
typedef struct mi_subproc_s mi_subproc_t;
|
||||
|
||||
// Segments are large allocated memory blocks (2MiB on 64 bit) from the OS.
|
||||
// Inside segments we allocated fixed size _pages_ that contain blocks.
|
||||
typedef struct mi_segment_s {
|
||||
// constant fields
|
||||
mi_memid_t memid; // memory id to track provenance
|
||||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
||||
mi_subproc_t* subproc; // segment belongs to sub process
|
||||
mi_memid_t memid; // memory id for arena/OS allocation
|
||||
bool allow_decommit; // can we decommmit the memory
|
||||
bool allow_purge; // can we purge the memory (reset or decommit)
|
||||
size_t segment_size;
|
||||
mi_subproc_t* subproc; // segment belongs to sub process
|
||||
|
||||
// segment fields
|
||||
struct mi_segment_s* next; // must be the first (non-constant) segment field -- see `segment.c:segment_init`
|
||||
struct mi_segment_s* prev;
|
||||
bool was_reclaimed; // true if it was reclaimed (used to limit reclaim-on-free reclamation)
|
||||
bool dont_free; // can be temporarily true to ensure the segment is not freed
|
||||
mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time
|
||||
mi_commit_mask_t purge_mask; // slices that can be purged
|
||||
mi_commit_mask_t commit_mask; // slices that are currently committed
|
||||
|
||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||
size_t abandoned_visits; // count how often this segment is visited for reclaiming (to force reclaim if it is too long)
|
||||
// from here is zero initialized
|
||||
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
||||
bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation)
|
||||
bool dont_free; // can be temporarily true to ensure the segment is not freed
|
||||
|
||||
size_t used; // count of pages in use (`used <= capacity`)
|
||||
size_t capacity; // count of available pages (`#free + used`)
|
||||
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
|
||||
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||
size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long)
|
||||
size_t used; // count of pages in use
|
||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||
|
||||
struct mi_segment_s* abandoned_os_next; // only used for abandoned segments outside arena's, and only if `mi_option_visit_abandoned` is enabled
|
||||
struct mi_segment_s* abandoned_os_prev;
|
||||
|
||||
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
||||
size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages.
|
||||
|
||||
// layout like this to optimize access in `mi_free`
|
||||
mi_segment_kind_t kind;
|
||||
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
||||
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
||||
size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
|
||||
mi_page_kind_t page_kind; // kind of pages: small, medium, large, or huge
|
||||
mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
|
||||
|
||||
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment
|
||||
} mi_segment_t;
|
||||
|
||||
|
||||
|
@ -541,20 +600,19 @@ struct mi_subproc_s {
|
|||
// Thread Local data
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Milliseconds as in `int64_t` to avoid overflows
|
||||
typedef int64_t mi_msecs_t;
|
||||
// A "span" is is an available range of slices. The span queues keep
|
||||
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
|
||||
typedef struct mi_span_queue_s {
|
||||
mi_slice_t* first;
|
||||
mi_slice_t* last;
|
||||
size_t slice_count;
|
||||
} mi_span_queue_t;
|
||||
|
||||
// Queue of segments
|
||||
typedef struct mi_segment_queue_s {
|
||||
mi_segment_t* first;
|
||||
mi_segment_t* last;
|
||||
} mi_segment_queue_t;
|
||||
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
|
||||
|
||||
// Segments thread local data
|
||||
typedef struct mi_segments_tld_s {
|
||||
mi_segment_queue_t small_free; // queue of segments with free small pages
|
||||
mi_segment_queue_t medium_free; // queue of segments with free medium pages
|
||||
mi_page_queue_t pages_purge; // queue of freed pages that are delay purged
|
||||
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
||||
size_t count; // current number of segments;
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
|
@ -625,22 +683,25 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line
|
|||
// add to stat keeping track of the peak
|
||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
// counters can just be increased
|
||||
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#if (MI_STAT)
|
||||
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
|
||||
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
|
||||
#define mi_stat_adjust_decrease(stat,amount) _mi_stat_adjust_decrease( &(stat), amount)
|
||||
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
|
||||
#else
|
||||
#define mi_stat_increase(stat,amount) ((void)0)
|
||||
#define mi_stat_decrease(stat,amount) ((void)0)
|
||||
#define mi_stat_adjust_decrease(stat,amount) ((void)0)
|
||||
#define mi_stat_counter_increase(stat,amount) ((void)0)
|
||||
#endif
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_stat_adjust_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
#endif
|
||||
|
|
11
readme.md
11
readme.md
|
@ -12,9 +12,9 @@ is a general purpose allocator with excellent [performance](#performance) charac
|
|||
Initially developed by Daan Leijen for the runtime systems of the
|
||||
[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
|
||||
|
||||
Latest release : `v3.0.2` (beta) (2025-03-06).
|
||||
Latest v2 release: `v2.2.2` (2025-03-06).
|
||||
Latest v1 release: `v1.9.2` (2024-03-06).
|
||||
Latest release : `v3.0.3` (beta) (2025-03-28).
|
||||
Latest v2 release: `v2.2.3` (2025-03-28).
|
||||
Latest v1 release: `v1.9.3` (2024-03-28).
|
||||
|
||||
mimalloc is a drop-in replacement for `malloc` and can be used in other programs
|
||||
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
|
||||
|
@ -73,7 +73,7 @@ Enjoy!
|
|||
### Branches
|
||||
|
||||
* `master`: latest stable release (still based on `dev2`).
|
||||
* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's.
|
||||
* `dev`: development branch for mimalloc v1. **Use this branch for submitting PR's**.
|
||||
* `dev2`: development branch for mimalloc v2. This branch is downstream of `dev`
|
||||
(and is essentially equal to `dev` except for `src/segment.c`). Uses larger sliced segments to manage
|
||||
mimalloc pages that can reduce fragmentation.
|
||||
|
@ -84,6 +84,9 @@ Enjoy!
|
|||
|
||||
### Releases
|
||||
|
||||
* 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta): Various small bug and build fixes, including:
|
||||
fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts,
|
||||
fix execution on non BMI1 x64 systems.
|
||||
* 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes.
|
||||
Add `mi_options_print`, `mi_arenas_print`, and the experimental `mi_stat_get` and `mi_stat_get_json`.
|
||||
Add `mi_thread_set_in_threadpool` and `mi_heap_set_numa_affinity` (v3 only). Add vcpkg portfile.
|
||||
|
|
|
@ -115,7 +115,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
|
|||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||
// for the tracker, on huge aligned allocations only the memory from the start of the large block is defined
|
||||
mi_track_mem_undefined(aligned_p, size);
|
||||
if (zero) {
|
||||
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
|
||||
|
@ -191,9 +191,6 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
|
||||
if mi_likely(is_aligned)
|
||||
{
|
||||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc_requested, size);
|
||||
#endif
|
||||
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
|
||||
mi_assert_internal(p != NULL);
|
||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||
|
@ -220,6 +217,11 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap,
|
|||
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
|
||||
}
|
||||
|
||||
// ensure a definition is emitted
|
||||
#if defined(__cplusplus)
|
||||
void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Aligned Allocation
|
||||
// ------------------------------------------------------
|
||||
|
|
25
src/alloc.c
25
src/alloc.c
|
@ -30,6 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
|
||||
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(size >= MI_PADDING_SIZE);
|
||||
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||
|
||||
// check the free list
|
||||
|
@ -82,12 +83,13 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
|
|||
|
||||
#if (MI_STAT>0)
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_increase(heap, malloc_normal, bsize);
|
||||
mi_heap_stat_counter_increase(heap, malloc_normal_count, 1);
|
||||
#if (MI_STAT>1)
|
||||
const size_t bin = _mi_bin(bsize);
|
||||
mi_heap_stat_increase(heap, malloc_bins[bin], 1);
|
||||
mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -146,12 +148,6 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
|||
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
|
@ -188,12 +184,6 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
|||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||
mi_track_malloc(p,size,zero);
|
||||
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
#if MI_DEBUG>3
|
||||
if (p != NULL && zero) {
|
||||
mi_assert_expensive(mi_mem_is_zero(p, size));
|
||||
|
@ -640,7 +630,7 @@ static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) {
|
|||
// give up to place it right in front of the guard page if the offset is too large for unalignment
|
||||
offset = MI_BLOCK_ALIGNMENT_MAX;
|
||||
}
|
||||
void* p = (uint8_t*)block + offset;
|
||||
void* p = (uint8_t*)block + offset;
|
||||
mi_track_align(block, p, offset, obj_size);
|
||||
mi_track_mem_defined(block, sizeof(mi_block_t));
|
||||
return p;
|
||||
|
@ -662,11 +652,12 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo
|
|||
void* const p = mi_block_ptr_set_guarded(block, obj_size);
|
||||
|
||||
// stats
|
||||
mi_track_malloc(p, size, zero);
|
||||
mi_track_malloc(p, size, zero);
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
|
||||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc_requested, mi_usable_size(p));
|
||||
mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size);
|
||||
mi_heap_stat_increase(heap, malloc_requested, size);
|
||||
#endif
|
||||
_mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1);
|
||||
}
|
||||
|
@ -694,7 +685,7 @@ void* _mi_externs[] = {
|
|||
(void*)&mi_zalloc_small,
|
||||
(void*)&mi_heap_malloc,
|
||||
(void*)&mi_heap_zalloc,
|
||||
(void*)&mi_heap_malloc_small
|
||||
(void*)&mi_heap_malloc_small,
|
||||
// (void*)&mi_heap_alloc_new,
|
||||
// (void*)&mi_heap_alloc_new_n
|
||||
};
|
||||
|
|
50
src/arena.c
50
src/arena.c
|
@ -99,6 +99,10 @@ bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_i
|
|||
}
|
||||
}
|
||||
|
||||
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
|
||||
return (memid.memkind == MI_MEM_OS);
|
||||
}
|
||||
|
||||
size_t mi_arena_get_count(void) {
|
||||
return mi_atomic_load_relaxed(&mi_arena_count);
|
||||
}
|
||||
|
@ -255,7 +259,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
|
||||
// set the dirty bits (todo: no need for an atomic op here?)
|
||||
if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
|
||||
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL, NULL);
|
||||
}
|
||||
|
||||
// set commit state
|
||||
|
@ -267,10 +271,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||
memid->initially_committed = true;
|
||||
bool any_uncommitted;
|
||||
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
|
||||
size_t already_committed = 0;
|
||||
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted, &already_committed);
|
||||
if (any_uncommitted) {
|
||||
mi_assert_internal(already_committed < needed_bcount);
|
||||
const size_t commit_size = mi_arena_block_size(needed_bcount);
|
||||
const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed);
|
||||
bool commit_zero = false;
|
||||
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero)) {
|
||||
if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) {
|
||||
memid->initially_committed = false;
|
||||
}
|
||||
else {
|
||||
|
@ -280,7 +288,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
}
|
||||
else {
|
||||
// no need to commit, but check if already fully committed
|
||||
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
size_t already_committed = 0;
|
||||
memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &already_committed);
|
||||
if (!memid->initially_committed && already_committed > 0) {
|
||||
// partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted.
|
||||
mi_assert_internal(already_committed < needed_bcount);
|
||||
_mi_stat_decrease(&_mi_stats_main.committed, mi_arena_block_size(already_committed));
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||
}
|
||||
}
|
||||
|
||||
return p;
|
||||
|
@ -464,17 +479,19 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
|
|||
const size_t size = mi_arena_block_size(blocks);
|
||||
void* const p = mi_arena_block_start(arena, bitmap_idx);
|
||||
bool needs_recommit;
|
||||
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
|
||||
size_t already_committed = 0;
|
||||
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx, &already_committed)) {
|
||||
// all blocks are committed, we can purge freely
|
||||
mi_assert_internal(already_committed == blocks);
|
||||
needs_recommit = _mi_os_purge(p, size);
|
||||
}
|
||||
else {
|
||||
// some blocks are not committed -- this can happen when a partially committed block is freed
|
||||
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
|
||||
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
|
||||
// and also undo the decommit stats (as it was already adjusted)
|
||||
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
|
||||
mi_assert_internal(already_committed < blocks);
|
||||
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
|
||||
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, 0);
|
||||
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
|
||||
}
|
||||
|
||||
// clear the purged blocks
|
||||
|
@ -508,7 +525,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
|
|||
else {
|
||||
// already an expiration was set
|
||||
}
|
||||
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
|
||||
_mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -648,15 +665,16 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
if (p==NULL) return;
|
||||
if (size==0) return;
|
||||
const bool all_committed = (committed_size == size);
|
||||
const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0);
|
||||
|
||||
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
|
||||
mi_track_mem_undefined(p,size);
|
||||
|
||||
if (mi_memkind_is_os(memid.memkind)) {
|
||||
// was a direct OS allocation, pass through
|
||||
if (!all_committed && committed_size > 0) {
|
||||
// if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
|
||||
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
|
||||
if (!all_committed && decommitted_size > 0) {
|
||||
// if partially committed, adjust the committed stats (as `_mi_os_free` will decrease commit by the full size)
|
||||
_mi_stat_increase(&_mi_stats_main.committed, decommitted_size);
|
||||
}
|
||||
_mi_os_free(p, size, memid);
|
||||
}
|
||||
|
@ -690,14 +708,14 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
|
|||
mi_assert_internal(arena->blocks_purge != NULL);
|
||||
|
||||
if (!all_committed) {
|
||||
// mark the entire range as no longer committed (so we recommit the full range when re-using)
|
||||
// mark the entire range as no longer committed (so we will recommit the full range when re-using)
|
||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||
mi_track_mem_noaccess(p,size);
|
||||
if (committed_size > 0) {
|
||||
//if (committed_size > 0) {
|
||||
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
|
||||
// in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
|
||||
// in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed.
|
||||
_mi_stat_decrease(&_mi_stats_main.committed, committed_size);
|
||||
}
|
||||
//}
|
||||
// note: if not all committed, it may be that the purge will reset/decommit the entire range
|
||||
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
|
||||
// works (as we should never reset decommitted parts).
|
||||
|
|
58
src/bitmap.c
58
src/bitmap.c
|
@ -34,17 +34,17 @@ static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
|
|||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Claim a bit sequence atomically
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Try to atomically claim a sequence of `count` bits in a single
|
||||
// field at `idx` in `bitmap`. Returns `true` on success.
|
||||
bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
|
||||
inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
|
||||
{
|
||||
mi_assert_internal(bitmap_idx != NULL);
|
||||
mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
|
||||
mi_assert_internal(count > 0);
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t map = mi_atomic_load_relaxed(field);
|
||||
if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
|
||||
|
@ -94,9 +94,9 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
|
||||
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
|
||||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||
// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
|
||||
size_t idx = start_field_idx;
|
||||
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||
|
@ -108,6 +108,24 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
|
|||
return false;
|
||||
}
|
||||
|
||||
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
|
||||
const size_t start_field_idx, const size_t count,
|
||||
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
|
||||
mi_bitmap_index_t* bitmap_idx) {
|
||||
size_t idx = start_field_idx;
|
||||
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||
if (idx >= bitmap_fields) idx = 0; // wrap
|
||||
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
|
||||
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
|
||||
return true;
|
||||
}
|
||||
// predicate returned false, unclaim and look further
|
||||
_mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
|
@ -228,7 +246,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
|
|||
|
||||
// intermediate fields
|
||||
while (++field < final_field) {
|
||||
newmap = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
|
||||
newmap = MI_BITMAP_FIELD_FULL;
|
||||
map = 0;
|
||||
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
|
||||
}
|
||||
|
@ -250,7 +268,7 @@ rollback:
|
|||
// (we just failed to claim `field` so decrement first)
|
||||
while (--field > initial_field) {
|
||||
newmap = 0;
|
||||
map = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
|
||||
map = MI_BITMAP_FIELD_FULL;
|
||||
mi_assert_internal(mi_atomic_load_relaxed(field) == map);
|
||||
mi_atomic_store_release(field, newmap);
|
||||
}
|
||||
|
@ -351,7 +369,7 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
|
|||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) {
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set) {
|
||||
size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
size_t pre_mask;
|
||||
size_t mid_mask;
|
||||
|
@ -359,28 +377,31 @@ bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t co
|
|||
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
|
||||
bool all_zero = true;
|
||||
bool any_zero = false;
|
||||
size_t one_count = 0;
|
||||
_Atomic(size_t)*field = &bitmap[idx];
|
||||
size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
|
||||
if ((prev & pre_mask) != 0) all_zero = false;
|
||||
if ((prev & pre_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & pre_mask); }
|
||||
if ((prev & pre_mask) != pre_mask) any_zero = true;
|
||||
while (mid_count-- > 0) {
|
||||
prev = mi_atomic_or_acq_rel(field++, mid_mask);
|
||||
if ((prev & mid_mask) != 0) all_zero = false;
|
||||
if ((prev & mid_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & mid_mask); }
|
||||
if ((prev & mid_mask) != mid_mask) any_zero = true;
|
||||
}
|
||||
if (post_mask!=0) {
|
||||
prev = mi_atomic_or_acq_rel(field, post_mask);
|
||||
if ((prev & post_mask) != 0) all_zero = false;
|
||||
if ((prev & post_mask) != 0) { all_zero = false; one_count += mi_popcount(prev & post_mask); }
|
||||
if ((prev & post_mask) != post_mask) any_zero = true;
|
||||
}
|
||||
if (pany_zero != NULL) { *pany_zero = any_zero; }
|
||||
if (already_set != NULL) { *already_set = one_count; };
|
||||
mi_assert_internal(all_zero ? one_count == 0 : one_count <= count);
|
||||
return all_zero;
|
||||
}
|
||||
|
||||
|
||||
// Returns `true` if all `count` bits were 1.
|
||||
// `any_ones` is `true` if there was at least one bit set to one.
|
||||
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
|
||||
static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones, size_t* already_set) {
|
||||
size_t idx = mi_bitmap_index_field(bitmap_idx);
|
||||
size_t pre_mask;
|
||||
size_t mid_mask;
|
||||
|
@ -388,30 +409,33 @@ static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_field
|
|||
size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
|
||||
bool all_ones = true;
|
||||
bool any_ones = false;
|
||||
size_t one_count = 0;
|
||||
mi_bitmap_field_t* field = &bitmap[idx];
|
||||
size_t prev = mi_atomic_load_relaxed(field++);
|
||||
if ((prev & pre_mask) != pre_mask) all_ones = false;
|
||||
if ((prev & pre_mask) != 0) any_ones = true;
|
||||
if ((prev & pre_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & pre_mask); }
|
||||
while (mid_count-- > 0) {
|
||||
prev = mi_atomic_load_relaxed(field++);
|
||||
if ((prev & mid_mask) != mid_mask) all_ones = false;
|
||||
if ((prev & mid_mask) != 0) any_ones = true;
|
||||
if ((prev & mid_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & mid_mask); }
|
||||
}
|
||||
if (post_mask!=0) {
|
||||
prev = mi_atomic_load_relaxed(field);
|
||||
if ((prev & post_mask) != post_mask) all_ones = false;
|
||||
if ((prev & post_mask) != 0) any_ones = true;
|
||||
if ((prev & post_mask) != 0) { any_ones = true; one_count += mi_popcount(prev & post_mask); }
|
||||
}
|
||||
if (pany_ones != NULL) { *pany_ones = any_ones; }
|
||||
if (already_set != NULL) { *already_set = one_count; }
|
||||
mi_assert_internal(all_ones ? one_count == count : one_count < count);
|
||||
return all_ones;
|
||||
}
|
||||
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set) {
|
||||
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL, already_set);
|
||||
}
|
||||
|
||||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
|
||||
bool any_ones;
|
||||
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
|
||||
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones, NULL);
|
||||
return any_ones;
|
||||
}
|
||||
|
|
13
src/bitmap.h
13
src/bitmap.h
|
@ -44,6 +44,11 @@ static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx
|
|||
return mi_bitmap_index_create_ex(idx,bitidx);
|
||||
}
|
||||
|
||||
// Create a bit index.
|
||||
static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
|
||||
return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
|
||||
}
|
||||
|
||||
// Get the field index from a bit index.
|
||||
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
|
||||
return (bitmap_idx / MI_BITMAP_FIELD_BITS);
|
||||
|
@ -71,6 +76,10 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
|
|||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||
typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
|
||||
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
|
||||
|
||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||
// Returns `true` if all `count` bits were 1 previously.
|
||||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
@ -102,9 +111,9 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
|
|||
|
||||
// Set `count` bits at `bitmap_idx` to 1 atomically
|
||||
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
|
||||
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero, size_t* already_set);
|
||||
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, size_t* already_set);
|
||||
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||
|
||||
#endif
|
||||
|
|
42
src/free.c
42
src/free.c
|
@ -35,7 +35,9 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
|||
mi_check_padding(page, block);
|
||||
if (track_stats) { mi_stat_free(page, block); }
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
}
|
||||
#endif
|
||||
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
|
||||
|
||||
|
@ -121,10 +123,16 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
|
|||
|
||||
#if (MI_DEBUG>0)
|
||||
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
||||
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
|
||||
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
|
||||
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
|
||||
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
||||
#if (MI_INTPTR_SIZE == 8 && defined(__linux__))
|
||||
if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
|
||||
#else
|
||||
{
|
||||
#endif
|
||||
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
|
||||
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
|
||||
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
|
||||
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -272,7 +280,7 @@ static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* seg
|
|||
// for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
_mi_padding_shrink(page, block, sizeof(mi_block_t));
|
||||
|
||||
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||
if (segment->kind == MI_SEGMENT_HUGE) {
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
_mi_segment_huge_page_free(segment, page, block);
|
||||
|
@ -514,24 +522,24 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
|||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>0)
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
#if (MI_STAT < 2)
|
||||
MI_UNUSED(block);
|
||||
#endif
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
#if (MI_STAT>1)
|
||||
const size_t usize = mi_page_usable_size_of(page, block);
|
||||
mi_heap_stat_decrease(heap, malloc_requested, usize);
|
||||
#endif
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
// #if (MI_STAT>1)
|
||||
// const size_t usize = mi_page_usable_size_of(page, block);
|
||||
// mi_heap_stat_decrease(heap, malloc_requested, usize);
|
||||
// #endif
|
||||
if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, malloc_normal, bsize);
|
||||
#if (MI_STAT > 1)
|
||||
#if (MI_STAT > 1)
|
||||
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
//else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
// mi_heap_stat_decrease(heap, malloc_large, bsize);
|
||||
//}
|
||||
else {
|
||||
const size_t bpsize = mi_page_block_size(page); // match stat in page.c:mi_huge_page_alloc
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bpsize);
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bsize);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
43
src/heap.c
43
src/heap.c
|
@ -95,6 +95,11 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
|
|||
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
|
||||
mi_collect_t collect = *((mi_collect_t*)arg_collect);
|
||||
_mi_page_free_collect(page, collect >= MI_FORCE);
|
||||
if (collect == MI_FORCE) {
|
||||
// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
_mi_segment_collect(segment, true /* force? */);
|
||||
}
|
||||
if (mi_page_all_free(page)) {
|
||||
// no more used blocks, free the page.
|
||||
// note: this will free retired pages as well.
|
||||
|
@ -127,14 +132,15 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
|||
const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
|
||||
|
||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||
if (
|
||||
#ifdef NDEBUG
|
||||
const bool force_main =
|
||||
#ifdef NDEBUG
|
||||
collect == MI_FORCE
|
||||
#else
|
||||
#else
|
||||
collect >= MI_FORCE
|
||||
#endif
|
||||
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim)
|
||||
{
|
||||
#endif
|
||||
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim;
|
||||
|
||||
if (force_main) {
|
||||
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
|
||||
// if all memory is freed by now, all segments should be freed.
|
||||
// note: this only collects in the current subprocess
|
||||
|
@ -157,8 +163,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
|||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||
|
||||
// collect segments (purge pages, this can be expensive so don't force on abandonment)
|
||||
_mi_segments_collect(collect == MI_FORCE, &heap->tld->segments);
|
||||
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
||||
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
||||
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
|
||||
|
||||
// if forced, collect thread data cache on program-exit (or shared library unload)
|
||||
if (force && is_main_thread && mi_heap_is_backing(heap)) {
|
||||
|
@ -328,20 +335,26 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
|
|||
|
||||
// stats
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bsize);
|
||||
if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
//if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
// mi_heap_stat_decrease(heap, malloc_large, bsize);
|
||||
//}
|
||||
//else
|
||||
{
|
||||
mi_heap_stat_decrease(heap, malloc_huge, bsize);
|
||||
}
|
||||
}
|
||||
#if (MI_STAT)
|
||||
#if (MI_STAT>0)
|
||||
_mi_page_free_collect(page, false); // update used count
|
||||
const size_t inuse = page->used;
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse);
|
||||
#if (MI_STAT>1)
|
||||
#if (MI_STAT>1)
|
||||
mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
|
||||
#endif
|
||||
// mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks...
|
||||
#endif
|
||||
|
||||
/// pretend it is all free now
|
||||
mi_assert_internal(mi_page_thread_free(page) == NULL);
|
||||
|
|
56
src/init.c
56
src/init.c
|
@ -34,13 +34,12 @@ const mi_page_t _mi_page_empty = {
|
|||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||
MI_ATOMIC_VAR_INIT(0), // xheap
|
||||
NULL, NULL
|
||||
#if MI_INTPTR_SIZE==4
|
||||
, { NULL }
|
||||
#endif
|
||||
, { 0 } // padding
|
||||
};
|
||||
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
|
||||
#if (MI_SMALL_WSIZE_MAX==128)
|
||||
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#elif (MI_PADDING>0)
|
||||
|
@ -48,7 +47,9 @@ const mi_page_t _mi_page_empty = {
|
|||
#else
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
|
||||
#endif
|
||||
|
||||
#else
|
||||
#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
|
||||
#endif
|
||||
|
||||
// Empty page queues for every bin
|
||||
#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
|
||||
|
@ -63,8 +64,8 @@ const mi_page_t _mi_page_empty = {
|
|||
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
|
||||
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
|
||||
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
|
||||
QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
|
||||
QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ }
|
||||
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
|
||||
QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
|
||||
|
||||
#define MI_STAT_COUNT_NULL() {0,0,0}
|
||||
|
||||
|
@ -86,6 +87,18 @@ const mi_page_t _mi_page_empty = {
|
|||
{ MI_INIT74(MI_STAT_COUNT_NULL) }, \
|
||||
{ MI_INIT74(MI_STAT_COUNT_NULL) }
|
||||
|
||||
|
||||
// Empty slice span queues for every bin
|
||||
#define SQNULL(sz) { NULL, NULL, sz }
|
||||
#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
|
||||
{ SQNULL(1), \
|
||||
SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
|
||||
SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
|
||||
SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
|
||||
SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
|
||||
SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
|
||||
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Statically allocate an empty heap as the initial
|
||||
// thread local value for the default heap,
|
||||
|
@ -95,7 +108,7 @@ const mi_page_t _mi_page_empty = {
|
|||
// may lead to allocation itself on some platforms)
|
||||
// --------------------------------------------------------
|
||||
|
||||
mi_decl_hidden mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
NULL,
|
||||
MI_ATOMIC_VAR_INIT(NULL),
|
||||
0, // tid
|
||||
|
@ -116,6 +129,17 @@ mi_decl_hidden mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
|||
MI_PAGE_QUEUES_EMPTY
|
||||
};
|
||||
|
||||
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
|
||||
|
||||
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
|
||||
|
||||
mi_decl_cache_align static const mi_tld_t tld_empty = {
|
||||
0,
|
||||
false,
|
||||
NULL, NULL,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, tld_empty_stats }, // segments
|
||||
{ MI_STAT_VERSION, MI_STATS_NULL } // stats
|
||||
};
|
||||
|
||||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
|
||||
return _mi_prim_thread_id();
|
||||
|
@ -126,15 +150,10 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
|
|||
|
||||
extern mi_decl_hidden mi_heap_t _mi_heap_main;
|
||||
|
||||
static mi_decl_cache_align mi_subproc_t mi_subproc_default;
|
||||
|
||||
static mi_decl_cache_align mi_tld_t tld_main = {
|
||||
0, false,
|
||||
&_mi_heap_main, &_mi_heap_main,
|
||||
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
|
||||
0, 0, 0, 0, 0, &mi_subproc_default,
|
||||
&tld_main.stats
|
||||
}, // segments
|
||||
&_mi_heap_main, & _mi_heap_main,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &mi_subproc_default, &tld_main.stats }, // segments
|
||||
{ MI_STAT_VERSION, MI_STATS_NULL } // stats
|
||||
};
|
||||
|
||||
|
@ -391,7 +410,7 @@ static bool _mi_thread_heap_init(void) {
|
|||
|
||||
// initialize thread local data
|
||||
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
|
||||
_mi_memzero_aligned(tld,sizeof(mi_tld_t));
|
||||
_mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t));
|
||||
tld->heap_backing = bheap;
|
||||
tld->heaps = NULL;
|
||||
tld->segments.subproc = &mi_subproc_default;
|
||||
|
@ -432,7 +451,10 @@ static bool _mi_thread_heap_done(mi_heap_t* heap) {
|
|||
|
||||
// free if not the main thread
|
||||
if (heap != &_mi_heap_main) {
|
||||
mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
|
||||
// the following assertion does not always hold for huge segments as those are always treated
|
||||
// as abondened: one may allocate it in one thread, but deallocate in another in which case
|
||||
// the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
|
||||
// mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
|
||||
mi_thread_data_free((mi_thread_data_t*)heap);
|
||||
}
|
||||
else {
|
||||
|
@ -647,7 +669,7 @@ void mi_process_init(void) mi_attr_noexcept {
|
|||
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
|
||||
long ksize = mi_option_get(mi_option_reserve_os_memory);
|
||||
if (ksize > 0) {
|
||||
mi_reserve_os_memory((size_t)ksize*MI_KiB, true, true);
|
||||
mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
57
src/libc.c
57
src/libc.c
|
@ -275,3 +275,60 @@ int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) {
|
|||
va_end(args);
|
||||
return written;
|
||||
}
|
||||
|
||||
|
||||
#if MI_SIZE_SIZE == 4
|
||||
#define mi_mask_even_bits32 (0x55555555)
|
||||
#define mi_mask_even_pairs32 (0x33333333)
|
||||
#define mi_mask_even_nibbles32 (0x0F0F0F0F)
|
||||
|
||||
// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
|
||||
static size_t mi_byte_sum32(uint32_t x) {
|
||||
// perform `x * 0x01010101`: the highest byte contains the sum of all bytes.
|
||||
x += (x << 8);
|
||||
x += (x << 16);
|
||||
return (size_t)(x >> 24);
|
||||
}
|
||||
|
||||
static size_t mi_popcount_generic32(uint32_t x) {
|
||||
// first count each 2-bit group `a`, where: a==0b00 -> 00, a==0b01 -> 01, a==0b10 -> 01, a==0b11 -> 10
|
||||
// in other words, `a - (a>>1)`; to do this in parallel, we need to mask to prevent spilling a bit pair
|
||||
// into the lower bit-pair:
|
||||
x = x - ((x >> 1) & mi_mask_even_bits32);
|
||||
// add the 2-bit pair results
|
||||
x = (x & mi_mask_even_pairs32) + ((x >> 2) & mi_mask_even_pairs32);
|
||||
// add the 4-bit nibble results
|
||||
x = (x + (x >> 4)) & mi_mask_even_nibbles32;
|
||||
// each byte now has a count of its bits, we can sum them now:
|
||||
return mi_byte_sum32(x);
|
||||
}
|
||||
|
||||
mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
|
||||
return mi_popcount_generic32(x);
|
||||
}
|
||||
|
||||
#else
|
||||
#define mi_mask_even_bits64 (0x5555555555555555)
|
||||
#define mi_mask_even_pairs64 (0x3333333333333333)
|
||||
#define mi_mask_even_nibbles64 (0x0F0F0F0F0F0F0F0F)
|
||||
|
||||
// sum of all the bytes in `x` if it is guaranteed that the sum < 256!
|
||||
static size_t mi_byte_sum64(uint64_t x) {
|
||||
x += (x << 8);
|
||||
x += (x << 16);
|
||||
x += (x << 32);
|
||||
return (size_t)(x >> 56);
|
||||
}
|
||||
|
||||
static size_t mi_popcount_generic64(uint64_t x) {
|
||||
x = x - ((x >> 1) & mi_mask_even_bits64);
|
||||
x = (x & mi_mask_even_pairs64) + ((x >> 2) & mi_mask_even_pairs64);
|
||||
x = (x + (x >> 4)) & mi_mask_even_nibbles64;
|
||||
return mi_byte_sum64(x);
|
||||
}
|
||||
|
||||
mi_decl_noinline size_t _mi_popcount_generic(size_t x) {
|
||||
return mi_popcount_generic64(x);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -106,11 +106,11 @@ typedef struct mi_option_desc_s {
|
|||
static mi_option_desc_t options[_mi_option_last] =
|
||||
{
|
||||
// stable options
|
||||
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
|
||||
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
|
||||
{ 1, UNINIT, MI_OPTION(show_errors) },
|
||||
#else
|
||||
#else
|
||||
{ 0, UNINIT, MI_OPTION(show_errors) },
|
||||
#endif
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(show_stats) },
|
||||
{ MI_DEFAULT_VERBOSE, UNINIT, MI_OPTION(verbose) },
|
||||
|
||||
|
@ -129,7 +129,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION(abandoned_page_purge) }, // purge free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
|
||||
#if defined(__NetBSD__)
|
||||
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
||||
|
|
18
src/os.c
18
src/os.c
|
@ -91,21 +91,6 @@ void _mi_os_init(void) {
|
|||
bool _mi_os_decommit(void* addr, size_t size);
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero);
|
||||
|
||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return (sz & ~mask);
|
||||
}
|
||||
else {
|
||||
return ((sz / alignment) * alignment);
|
||||
}
|
||||
}
|
||||
|
||||
static void* mi_align_down_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_down((uintptr_t)p, alignment);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
aligned hinting
|
||||
|
@ -519,7 +504,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size)
|
|||
mi_os_stat_increase(purged, size);
|
||||
|
||||
if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
|
||||
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||
!_mi_preloading()) // don't decommit during preloading (unsafe)
|
||||
{
|
||||
bool needs_recommit = true;
|
||||
mi_os_decommit_ex(p, size, &needs_recommit, stat_size);
|
||||
|
@ -539,7 +524,6 @@ bool _mi_os_purge(void* p, size_t size) {
|
|||
return _mi_os_purge_ex(p, size, true, size);
|
||||
}
|
||||
|
||||
|
||||
// Protect a region in memory to be not accessible.
|
||||
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
|
||||
// page align conservatively within the range
|
||||
|
|
|
@ -12,7 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#ifndef MI_IN_PAGE_C
|
||||
#error "this file should be included from 'page.c'"
|
||||
// include to help an IDE
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#endif
|
||||
|
@ -38,15 +38,15 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
|
||||
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
|
||||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
|
||||
}
|
||||
|
||||
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
|
||||
return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
|
||||
}
|
||||
|
||||
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
|
||||
return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
|
||||
return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
@ -58,7 +58,7 @@ static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
|
|||
// We use `wsize` for the size in "machine word sizes",
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t mi_bin(size_t size) {
|
||||
size_t wsize = _mi_wsize_from_size(size);
|
||||
size_t wsize = _mi_wsize_from_size(size);
|
||||
#if defined(MI_ALIGN4W)
|
||||
if mi_likely(wsize <= 4) {
|
||||
return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes
|
||||
|
@ -72,7 +72,7 @@ static inline size_t mi_bin(size_t size) {
|
|||
return (wsize == 0 ? 1 : wsize);
|
||||
}
|
||||
#endif
|
||||
else if mi_unlikely(wsize > MI_LARGE_OBJ_WSIZE_MAX) {
|
||||
else if mi_unlikely(wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
|
||||
return MI_BIN_HUGE;
|
||||
}
|
||||
else {
|
||||
|
@ -107,7 +107,7 @@ size_t _mi_bin_size(size_t bin) {
|
|||
|
||||
// Good size for allocation
|
||||
size_t mi_good_size(size_t size) mi_attr_noexcept {
|
||||
if (size <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||
return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
|
||||
}
|
||||
else {
|
||||
|
@ -136,6 +136,10 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool mi_page_is_large_or_huge(const mi_page_t* page) {
|
||||
return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
|
||||
}
|
||||
|
||||
static size_t mi_page_bin(const mi_page_t* page) {
|
||||
const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
|
@ -147,7 +151,7 @@ static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t*
|
|||
const size_t bin = mi_page_bin(page);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
|
||||
return pq;
|
||||
}
|
||||
|
@ -210,10 +214,11 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
|||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
if (page == queue->last) queue->last = page->prev;
|
||||
|
@ -235,10 +240,10 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
|||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
|
@ -277,8 +282,8 @@ static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t*
|
|||
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
|
||||
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(to)) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_full(to)));
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
|
@ -317,8 +322,8 @@ static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t*
|
|||
page->prev = to->first;
|
||||
page->next = next;
|
||||
to->first->next = page;
|
||||
if (next != NULL) {
|
||||
next->prev = page;
|
||||
if (next != NULL) {
|
||||
next->prev = page;
|
||||
}
|
||||
else {
|
||||
to->last = page;
|
||||
|
|
83
src/page.c
83
src/page.c
|
@ -82,11 +82,9 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
|||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
// const size_t bsize = mi_page_block_size(page);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = mi_page_start(page);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
||||
mi_assert_internal(page->is_huge == (segment->page_kind == MI_PAGE_HUGE));
|
||||
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
|
||||
mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE));
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
|
@ -123,14 +121,15 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||
#endif
|
||||
if (mi_page_heap(page)!=NULL) {
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
|
||||
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
if (segment->page_kind != MI_PAGE_HUGE)
|
||||
if (segment->kind != MI_SEGMENT_HUGE)
|
||||
#endif
|
||||
{
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
|
||||
}
|
||||
}
|
||||
|
@ -257,10 +256,11 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
|||
// called from segments when reclaiming abandoned pages
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
|
||||
mi_assert_internal(mi_page_heap(page) == heap);
|
||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
#endif
|
||||
|
||||
// TODO: push on full queue immediately if it is full?
|
||||
|
@ -274,7 +274,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
|||
#if !MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(pq != NULL);
|
||||
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
|
||||
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
|
||||
#endif
|
||||
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments);
|
||||
if (page == NULL) {
|
||||
|
@ -284,6 +284,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
|||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||
// a fresh page was found, initialize it
|
||||
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
|
@ -426,6 +427,7 @@ void _mi_page_force_abandon(mi_page_t* page) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// Free a page with no more free blocks
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||
mi_assert_internal(page != NULL);
|
||||
|
@ -449,7 +451,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
_mi_segment_page_free(page, force, segments_tld);
|
||||
}
|
||||
|
||||
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||
#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||
#define MI_RETIRE_CYCLES (16)
|
||||
|
||||
// Retire a page with no more used blocks
|
||||
|
@ -623,7 +625,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
|||
#if (MI_SECURE>0)
|
||||
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
||||
#else
|
||||
#define MI_MIN_EXTEND (1)
|
||||
#define MI_MIN_EXTEND (4)
|
||||
#endif
|
||||
|
||||
// Extend the capacity (up to reserved) by initializing a free list
|
||||
|
@ -632,6 +634,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
|||
// allocations but this did not speed up any benchmark (due to an
|
||||
// extra test in malloc? or cache effects?)
|
||||
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
|
||||
MI_UNUSED(tld);
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
#if (MI_SECURE<=2)
|
||||
mi_assert(page->free == NULL);
|
||||
|
@ -640,9 +643,6 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
|
|||
#endif
|
||||
if (page->capacity >= page->reserved) return;
|
||||
|
||||
size_t page_size;
|
||||
//uint8_t* page_start =
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &page_size);
|
||||
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
|
@ -688,6 +688,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
size_t page_size;
|
||||
page->page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
mi_track_mem_noaccess(page->page_start,page_size);
|
||||
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
mi_assert_internal(page->reserved > 0);
|
||||
|
@ -702,6 +704,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
|
||||
}
|
||||
#endif
|
||||
mi_assert_internal(page->is_committed);
|
||||
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
||||
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
|
||||
}
|
||||
|
@ -824,7 +827,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
}
|
||||
|
||||
if (page == NULL) {
|
||||
_mi_heap_collect_retired(heap, false); // perhaps make a page available
|
||||
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
|
||||
page = mi_page_fresh(heap, pq);
|
||||
if (page == NULL && first_try) {
|
||||
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
|
||||
|
@ -902,31 +905,47 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
|||
General allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Huge pages contain just one block, and the segment contains just that page.
|
||||
// Large and huge page allocation.
|
||||
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
||||
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
size_t block_size = _mi_os_good_alloc_size(size);
|
||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_page_queue_t* pq = NULL;
|
||||
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
||||
#else
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, MI_LARGE_OBJ_SIZE_MAX+1); // always in the huge queue regardless of the block size
|
||||
mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
|
||||
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
||||
#endif
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
mi_assert_internal(mi_page_is_huge(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
mi_page_set_heap(page, NULL);
|
||||
#endif
|
||||
mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page));
|
||||
mi_heap_stat_counter_increase(heap, malloc_huge_count, 1);
|
||||
|
||||
if (is_huge) {
|
||||
mi_assert_internal(mi_page_is_huge(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
mi_page_set_heap(page, NULL);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(!mi_page_is_huge(page));
|
||||
}
|
||||
|
||||
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
||||
/*if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_increase(heap, malloc_large, bsize);
|
||||
mi_heap_stat_counter_increase(heap, malloc_large_count, 1);
|
||||
}
|
||||
else */
|
||||
{
|
||||
_mi_stat_increase(&heap->tld->stats.malloc_huge, bsize);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.malloc_huge_count, 1);
|
||||
}
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
@ -937,13 +956,13 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
|
|||
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
||||
// huge allocation?
|
||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||
if mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
||||
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
||||
if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
||||
return NULL;
|
||||
}
|
||||
else {
|
||||
return mi_huge_page_alloc(heap,size,huge_alignment);
|
||||
return mi_large_huge_page_alloc(heap,size,huge_alignment);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -31,11 +31,12 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
#if defined(__linux__)
|
||||
#include <features.h>
|
||||
#include <linux/prctl.h> // PR_SET_VMA
|
||||
//#if defined(MI_NO_THP)
|
||||
#include <sys/prctl.h> // THP disable
|
||||
#include <sys/prctl.h> // THP disable
|
||||
//#endif
|
||||
#if defined(__GLIBC__)
|
||||
#include <linux/mman.h> // linux mmap flags
|
||||
#include <linux/mman.h> // linux mmap flags
|
||||
#else
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
@ -205,14 +206,24 @@ static int unix_madvise(void* addr, size_t size, int advice) {
|
|||
return (res==0 ? 0 : errno);
|
||||
}
|
||||
|
||||
static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
|
||||
static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) {
|
||||
void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */);
|
||||
#if (defined(__linux__) && defined(PR_SET_VMA))
|
||||
if (p!=MAP_FAILED && p!=NULL) {
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc");
|
||||
}
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
|
||||
static void* unix_mmap_prim_aligned(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
|
||||
MI_UNUSED(try_alignment);
|
||||
void* p = NULL;
|
||||
#if defined(MAP_ALIGNED) // BSD
|
||||
if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
|
||||
size_t n = mi_bsr(try_alignment);
|
||||
if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
|
||||
p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
|
||||
p = unix_mmap_prim(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd);
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
int err = errno;
|
||||
_mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
|
||||
|
@ -223,7 +234,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
}
|
||||
#elif defined(MAP_ALIGN) // Solaris
|
||||
if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
|
||||
p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment
|
||||
p = unix_mmap_prim((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd); // addr parameter is the required alignment
|
||||
if (p!=MAP_FAILED) return p;
|
||||
// fall back to regular mmap
|
||||
}
|
||||
|
@ -233,7 +244,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
if (addr == NULL) {
|
||||
void* hint = _mi_os_get_aligned_hint(try_alignment, size);
|
||||
if (hint != NULL) {
|
||||
p = mmap(hint, size, protect_flags, flags, fd, 0);
|
||||
p = unix_mmap_prim(hint, size, protect_flags, flags, fd);
|
||||
if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
|
||||
#if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
|
||||
int err = 0;
|
||||
|
@ -248,7 +259,7 @@ static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int p
|
|||
}
|
||||
#endif
|
||||
// regular mmap
|
||||
p = mmap(addr, size, protect_flags, flags, fd, 0);
|
||||
p = unix_mmap_prim(addr, size, protect_flags, flags, fd);
|
||||
if (p!=MAP_FAILED) return p;
|
||||
// failed to allocate
|
||||
return NULL;
|
||||
|
@ -319,7 +330,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
|
|||
if (large_only || lflags != flags) {
|
||||
// try large OS page allocation
|
||||
*is_large = true;
|
||||
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
|
||||
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
|
||||
#ifdef MAP_HUGE_1GB
|
||||
if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) {
|
||||
mi_huge_pages_available = false; // don't try huge 1GiB pages again
|
||||
|
@ -327,7 +338,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
|
|||
_mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
|
||||
}
|
||||
lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
|
||||
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
|
||||
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd);
|
||||
}
|
||||
#endif
|
||||
if (large_only) return p;
|
||||
|
@ -340,7 +351,7 @@ static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protec
|
|||
// regular allocation
|
||||
if (p == NULL) {
|
||||
*is_large = false;
|
||||
p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd);
|
||||
p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, flags, fd);
|
||||
if (p != NULL) {
|
||||
#if defined(MADV_HUGEPAGE)
|
||||
// Many Linux systems don't allow MAP_HUGETLB but they support instead
|
||||
|
@ -398,10 +409,6 @@ static void unix_mprotect_hint(int err) {
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
|
||||
// commit: ensure we can access the area
|
||||
// note: we may think that *is_zero can be true since the memory
|
||||
|
|
22
src/random.c
22
src/random.c
|
@ -143,13 +143,17 @@ void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) {
|
|||
|
||||
uintptr_t _mi_random_next(mi_random_ctx_t* ctx) {
|
||||
mi_assert_internal(mi_random_is_initialized(ctx));
|
||||
#if MI_INTPTR_SIZE <= 4
|
||||
return chacha_next32(ctx);
|
||||
#elif MI_INTPTR_SIZE == 8
|
||||
return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx));
|
||||
#else
|
||||
# error "define mi_random_next for this platform"
|
||||
#endif
|
||||
uintptr_t r;
|
||||
do {
|
||||
#if MI_INTPTR_SIZE <= 4
|
||||
r = chacha_next32(ctx);
|
||||
#elif MI_INTPTR_SIZE == 8
|
||||
r = (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx));
|
||||
#else
|
||||
# error "define mi_random_next for this platform"
|
||||
#endif
|
||||
} while (r==0);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
|
@ -163,7 +167,7 @@ uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
|
|||
x ^= _mi_prim_clock_now();
|
||||
// and do a few randomization steps
|
||||
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
|
||||
for (uintptr_t i = 0; i < max; i++) {
|
||||
for (uintptr_t i = 0; i < max || x==0; i++, x++) {
|
||||
x = _mi_random_shuffle(x);
|
||||
}
|
||||
mi_assert_internal(x != 0);
|
||||
|
@ -179,7 +183,7 @@ static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
|
|||
if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); }
|
||||
#endif
|
||||
uintptr_t x = _mi_os_random_weak(0);
|
||||
for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words.
|
||||
for (size_t i = 0; i < 8; i++, x++) { // key is eight 32-bit words.
|
||||
x = _mi_random_shuffle(x);
|
||||
((uint32_t*)key)[i] = (uint32_t)x;
|
||||
}
|
||||
|
|
1819
src/segment.c
1819
src/segment.c
File diff suppressed because it is too large
Load diff
50
src/stats.c
50
src/stats.c
|
@ -30,6 +30,7 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
|
|||
{
|
||||
// add atomically (for abandoned pages)
|
||||
int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount);
|
||||
// if (stat == &_mi_stats_main.committed) { mi_assert_internal(current + amount >= 0); };
|
||||
mi_atomic_maxi64_relaxed(&stat->peak, current + amount);
|
||||
if (amount > 0) {
|
||||
mi_atomic_addi64_relaxed(&stat->total,amount);
|
||||
|
@ -61,6 +62,25 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
|
|||
}
|
||||
|
||||
|
||||
static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) {
|
||||
if (amount == 0) return;
|
||||
if mi_unlikely(mi_is_in_main(stat))
|
||||
{
|
||||
// adjust atomically
|
||||
mi_atomic_addi64_relaxed(&stat->current, amount);
|
||||
mi_atomic_addi64_relaxed(&stat->total,amount);
|
||||
}
|
||||
else {
|
||||
// adjust local
|
||||
stat->current += amount;
|
||||
stat->total += amount;
|
||||
}
|
||||
}
|
||||
|
||||
void _mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_adjust(stat, -((int64_t)amount));
|
||||
}
|
||||
|
||||
|
||||
// must be thread safe as it is called from stats_merge
|
||||
static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) {
|
||||
|
@ -94,8 +114,8 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
|
|||
}
|
||||
#endif
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
|
||||
}
|
||||
mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#undef MI_STAT_COUNT
|
||||
|
@ -198,6 +218,13 @@ static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int
|
|||
_mi_fprintf(out, arg, "\n");
|
||||
}
|
||||
|
||||
static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) {
|
||||
_mi_fprintf(out, arg, "%10s:", msg);
|
||||
_mi_fprintf(out, arg, "%12s", " "); // no peak
|
||||
mi_print_amount(stat->total, unit, out, arg);
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
}
|
||||
|
||||
static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) {
|
||||
_mi_fprintf(out, arg, "%10s:", msg);
|
||||
mi_print_amount(stat->total, -1, out, arg);
|
||||
|
@ -214,7 +241,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char*
|
|||
|
||||
|
||||
static void mi_print_header(mi_output_fun* out, void* arg ) {
|
||||
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "unit ", "total# ");
|
||||
_mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# ");
|
||||
}
|
||||
|
||||
#if MI_STAT>1
|
||||
|
@ -283,18 +310,20 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
|
|||
// and print using that
|
||||
mi_print_header(out,arg);
|
||||
#if MI_STAT>1
|
||||
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "normal",out,arg);
|
||||
mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg);
|
||||
#endif
|
||||
#if MI_STAT
|
||||
mi_stat_print(&stats->malloc_normal, "normal", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
|
||||
mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg);
|
||||
// mi_stat_print(&stats->malloc_large, "large", (stats->malloc_large_count.total == 0 ? 1 : -1), out, arg);
|
||||
mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg);
|
||||
mi_stat_count_t total = { 0,0,0 };
|
||||
mi_stat_count_add_mt(&total, &stats->malloc_normal);
|
||||
// mi_stat_count_add(&total, &stats->malloc_large);
|
||||
mi_stat_count_add_mt(&total, &stats->malloc_huge);
|
||||
mi_stat_print_ex(&total, "total", 1, out, arg, "");
|
||||
#endif
|
||||
#if MI_STAT>1
|
||||
mi_stat_print_ex(&stats->malloc_requested, "malloc req", 1, out, arg, "");
|
||||
mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg);
|
||||
_mi_fprintf(out, arg, "\n");
|
||||
#endif
|
||||
mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, "");
|
||||
|
@ -496,7 +525,12 @@ static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix,
|
|||
const size_t binsize = _mi_bin_size(bin);
|
||||
const size_t pagesize = (binsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_SMALL_PAGE_SIZE :
|
||||
(binsize <= MI_MEDIUM_OBJ_SIZE_MAX ? MI_MEDIUM_PAGE_SIZE :
|
||||
(binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0)));
|
||||
#if MI_LARGE_PAGE_SIZE
|
||||
(binsize <= MI_LARGE_OBJ_SIZE_MAX ? MI_LARGE_PAGE_SIZE : 0)
|
||||
#else
|
||||
0
|
||||
#endif
|
||||
));
|
||||
char buf[128];
|
||||
_mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : ""));
|
||||
buf[127] = 0;
|
||||
|
@ -589,7 +623,7 @@ char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept {
|
|||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE);
|
||||
}
|
||||
mi_heap_buf_print(&hbuf, " ]\n");
|
||||
mi_heap_buf_print(&hbuf, " ]\n");
|
||||
mi_heap_buf_print(&hbuf, "}\n");
|
||||
return hbuf.buf;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ if (NOT CMAKE_BUILD_TYPE)
|
|||
endif()
|
||||
|
||||
# Import mimalloc (if installed)
|
||||
find_package(mimalloc 1.9 CONFIG REQUIRED)
|
||||
find_package(mimalloc 2.2 CONFIG REQUIRED)
|
||||
message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})")
|
||||
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include <mimalloc.h>
|
||||
#include <mimalloc-override.h> // redefines malloc etc.
|
||||
|
||||
static void mi_bins(void);
|
||||
|
||||
static void double_free1();
|
||||
static void double_free2();
|
||||
|
@ -24,11 +23,12 @@ static void test_reserved(void);
|
|||
static void negative_stat(void);
|
||||
static void alloc_huge(void);
|
||||
static void test_heap_walk(void);
|
||||
static void test_heap_arena(void);
|
||||
static void test_align(void);
|
||||
static void test_canary_leak(void);
|
||||
static void test_manage_os_memory(void);
|
||||
// static void test_large_pages(void);
|
||||
|
||||
|
||||
int main() {
|
||||
mi_version();
|
||||
mi_stats_reset();
|
||||
|
@ -50,8 +50,10 @@ int main() {
|
|||
// negative_stat();
|
||||
// test_heap_walk();
|
||||
// alloc_huge();
|
||||
|
||||
|
||||
// test_heap_walk();
|
||||
// test_heap_arena();
|
||||
// test_align();
|
||||
|
||||
void* p1 = malloc(78);
|
||||
void* p2 = malloc(24);
|
||||
free(p1);
|
||||
|
@ -67,7 +69,7 @@ int main() {
|
|||
free(p1);
|
||||
free(p2);
|
||||
free(s);
|
||||
|
||||
|
||||
/* now test if override worked by allocating/freeing across the api's*/
|
||||
//p1 = mi_malloc(32);
|
||||
//free(p1);
|
||||
|
@ -82,6 +84,13 @@ int main() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void test_align() {
|
||||
void* p = mi_malloc_aligned(256, 256);
|
||||
if (((uintptr_t)p % 256) != 0) {
|
||||
fprintf(stderr, "%p is not 256 alignend!\n", p);
|
||||
}
|
||||
}
|
||||
|
||||
static void invalid_free() {
|
||||
free((void*)0xBADBEEF);
|
||||
realloc((void*)0xBADBEEF,10);
|
||||
|
@ -239,6 +248,20 @@ static void test_heap_walk(void) {
|
|||
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
|
||||
}
|
||||
|
||||
static void test_heap_arena(void) {
|
||||
mi_arena_id_t arena_id;
|
||||
int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id);
|
||||
if (err) abort();
|
||||
mi_heap_t* heap = mi_heap_new_in_arena(arena_id);
|
||||
for (int i = 0; i < 500000; i++) {
|
||||
void* p = mi_heap_malloc(heap, 1024);
|
||||
if (p == NULL) {
|
||||
printf("out of memory after %d kb (expecting about 100_000kb)\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test_canary_leak(void) {
|
||||
char* p = mi_mallocn_tp(char,23);
|
||||
for(int i = 0; i < 23; i++) {
|
||||
|
|
|
@ -27,9 +27,12 @@ static void heap_late_free(); // issue #204
|
|||
static void padding_shrink(); // issue #209
|
||||
static void various_tests();
|
||||
static void test_mt_shutdown();
|
||||
static void large_alloc(void); // issue #363
|
||||
static void fail_aslr(); // issue #372
|
||||
static void tsan_numa_test(); // issue #414
|
||||
static void strdup_test(); // issue #445
|
||||
static void bench_alloc_large(void); // issue #xxx
|
||||
//static void test_large_migrate(void); // issue #691
|
||||
static void heap_thread_free_huge();
|
||||
static void test_std_string(); // issue #697
|
||||
static void test_thread_local(); // issue #944
|
||||
|
@ -55,18 +58,20 @@ int main() {
|
|||
//test_thread_local();
|
||||
// heap_thread_free_huge();
|
||||
/*
|
||||
heap_thread_free_large();
|
||||
heap_no_delete();
|
||||
heap_late_free();
|
||||
padding_shrink();
|
||||
heap_thread_free_huge();
|
||||
heap_thread_free_large();
|
||||
heap_no_delete();
|
||||
heap_late_free();
|
||||
padding_shrink();
|
||||
various_tests();
|
||||
large_alloc();
|
||||
tsan_numa_test();
|
||||
strdup_test();
|
||||
*/
|
||||
// test_stl_allocators();
|
||||
// test_mt_shutdown();
|
||||
// test_large_migrate();
|
||||
|
||||
tsan_numa_test();
|
||||
*/
|
||||
/*
|
||||
strdup_test();
|
||||
test_stl_allocators();
|
||||
test_mt_shutdown();
|
||||
*/
|
||||
//fail_aslr();
|
||||
mi_stats_print(NULL);
|
||||
return 0;
|
||||
|
@ -358,7 +363,7 @@ static void heap_thread_free_large_worker() {
|
|||
|
||||
static void heap_thread_free_large() {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
shared_p = mi_malloc_aligned(2*1024*1024 + 1, 8);
|
||||
shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8);
|
||||
auto t1 = std::thread(heap_thread_free_large_worker);
|
||||
t1.join();
|
||||
}
|
||||
|
@ -369,14 +374,13 @@ static void heap_thread_free_huge_worker() {
|
|||
}
|
||||
|
||||
static void heap_thread_free_huge() {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
shared_p = mi_malloc(1024 * 1024 * 1024);
|
||||
auto t1 = std::thread(heap_thread_free_huge_worker);
|
||||
t1.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void test_mt_shutdown()
|
||||
{
|
||||
const int threads = 5;
|
||||
|
@ -401,6 +405,18 @@ static void test_mt_shutdown()
|
|||
std::cout << "done" << std::endl;
|
||||
}
|
||||
|
||||
// issue #363
|
||||
using namespace std;
|
||||
|
||||
void large_alloc(void)
|
||||
{
|
||||
char* a = new char[1ull << 25];
|
||||
thread th([&] {
|
||||
delete[] a;
|
||||
});
|
||||
th.join();
|
||||
}
|
||||
|
||||
// issue #372
|
||||
static void fail_aslr() {
|
||||
size_t sz = (size_t)(4ULL << 40); // 4TiB
|
||||
|
@ -421,6 +437,36 @@ static void tsan_numa_test() {
|
|||
t1.join();
|
||||
}
|
||||
|
||||
// issue #?
|
||||
#include <chrono>
|
||||
#include <random>
|
||||
#include <iostream>
|
||||
|
||||
static void bench_alloc_large(void) {
|
||||
static constexpr int kNumBuffers = 20;
|
||||
static constexpr size_t kMinBufferSize = 5 * 1024 * 1024;
|
||||
static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
|
||||
std::unique_ptr<char[]> buffers[kNumBuffers];
|
||||
|
||||
std::random_device rd; (void)rd;
|
||||
std::mt19937 gen(42); //rd());
|
||||
std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
|
||||
std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
|
||||
|
||||
static constexpr int kNumIterations = 2000;
|
||||
const auto start = std::chrono::steady_clock::now();
|
||||
for (int i = 0; i < kNumIterations; ++i) {
|
||||
int buffer_idx = buf_number_distribution(gen);
|
||||
size_t new_size = size_distribution(gen);
|
||||
buffers[buffer_idx] = std::make_unique<char[]>(new_size);
|
||||
}
|
||||
const auto end = std::chrono::steady_clock::now();
|
||||
const auto num_ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
|
||||
const auto us_per_allocation = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / kNumIterations;
|
||||
std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl;
|
||||
std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl;
|
||||
}
|
||||
|
||||
|
||||
class MTest
|
||||
{
|
||||
|
@ -447,4 +493,4 @@ void test_thread_local()
|
|||
mi_stats_print(NULL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -203,7 +203,11 @@ int main(void) {
|
|||
CHECK_BODY("malloc-aligned9") { // test large alignments
|
||||
bool ok = true;
|
||||
void* p[8];
|
||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
|
||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1,
|
||||
#if SIZE_MAX > UINT32_MAX
|
||||
2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX,
|
||||
#endif
|
||||
0 };
|
||||
for (int i = 0; i < 28 && ok; i++) {
|
||||
int align = (1 << i);
|
||||
for (int j = 0; j < 8 && ok; j++) {
|
||||
|
|
|
@ -320,11 +320,17 @@ int main(int argc, char** argv) {
|
|||
|
||||
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
|
||||
srand(0x7feb352d);
|
||||
// mi_stats_reset();
|
||||
|
||||
//mi_reserve_os_memory(512ULL << 20, true, true);
|
||||
|
||||
#if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
|
||||
mi_stats_reset();
|
||||
#endif
|
||||
|
||||
#ifdef STRESS
|
||||
test_stress();
|
||||
test_stress();
|
||||
#else
|
||||
test_leak();
|
||||
test_leak();
|
||||
#endif
|
||||
|
||||
#ifndef USE_STD_MALLOC
|
||||
|
|
Loading…
Add table
Reference in a new issue