This commit is contained in:
hankluo6 2021-10-27 16:49:28 -07:00 committed by GitHub
commit c472b8b016
WARNING! Although there is a key with this ID in the database it does not verify this commit! This commit is SUSPICIOUS.
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 17 additions and 0 deletions

View file

@ -311,6 +311,9 @@ typedef enum mi_option_e {
mi_option_page_reset,
mi_option_abandoned_page_reset,
mi_option_segment_reset,
#if defined(__linux__)
mi_option_prefault,
#endif
mi_option_eager_commit_delay,
mi_option_reset_delay,
mi_option_use_numa_nodes,

View file

@ -302,6 +302,10 @@ or via environment variables:
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
and allocate just a little to take up space in the huge OS page area (which cannot be reset).
- `MIMALLOC_PREFAULT=1`: (Linux only) enable memory prefaulting when available. This option instructs the kernel to synchronously
load the entire mapped region into active memory by specifying `MAP_POPULATE` in `mmap`. It will cause read-ahead on that memory,
and then the subsequent accesses to the memory can proceed without page faults, improving some performance, but might also reduce
some available memory. In `mimalloc_test_stress`, it is known to reduce about 95% page-faults.
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the

View file

@ -81,6 +81,9 @@ static mi_option_desc_t options[_mi_option_last] =
{ 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
#if defined(__linux__)
{ 0, UNINIT, MI_OPTION(prefault) },
#endif
#if defined(__NetBSD__)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
#else

View file

@ -399,6 +399,10 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#define MAP_NORESERVE 0
#endif
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
#if defined(__linux__)
if (mi_option_get(mi_option_prefault))
flags = flags | MAP_POPULATE;
#endif
int fd = -1;
#if defined(MAP_ALIGNED) // BSD
if (try_alignment > 0) {
@ -429,6 +433,9 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
}
else {
int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
#if defined(__linux__)
lflags = lflags & ~MAP_POPULATE; // don't use MAP_POPULATE on huge pages
#endif
int lfd = fd;
#ifdef MAP_ALIGNED_SUPER
lflags |= MAP_ALIGNED_SUPER;