mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-06 15:29:31 +03:00
merge from dev
This commit is contained in:
commit
128cdd1dfb
4 changed files with 26 additions and 16 deletions
16
src/memory.c
16
src/memory.c
|
@ -302,16 +302,19 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
// no need to commit, but check if already fully committed
|
// no need to commit, but check if already fully committed
|
||||||
*commit = mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx);
|
*commit = mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx);
|
||||||
}
|
}
|
||||||
mi_assert_internal(mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx));
|
mi_assert_internal(!*commit || mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx));
|
||||||
|
|
||||||
// unreset reset blocks
|
// unreset reset blocks
|
||||||
if (mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
if (mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
||||||
|
// some blocks are still reset
|
||||||
mi_assert_internal(!info.is_large);
|
mi_assert_internal(!info.is_large);
|
||||||
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit);
|
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit);
|
||||||
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
||||||
bool reset_zero;
|
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
||||||
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
|
bool reset_zero = false;
|
||||||
if (reset_zero) *is_zero = true;
|
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
|
||||||
|
if (reset_zero) *is_zero = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mi_assert_internal(!mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx));
|
mi_assert_internal(!mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx));
|
||||||
|
|
||||||
|
@ -409,8 +412,9 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset the blocks to reduce the working set.
|
// reset the blocks to reduce the working set.
|
||||||
if (!info.is_large && mi_option_is_enabled(mi_option_segment_reset) &&
|
if (!info.is_large && mi_option_is_enabled(mi_option_segment_reset)
|
||||||
mi_option_is_enabled(mi_option_eager_commit)) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
||||||
|
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
||||||
{
|
{
|
||||||
bool any_unreset;
|
bool any_unreset;
|
||||||
mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, &any_unreset);
|
mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, &any_unreset);
|
||||||
|
|
|
@ -64,7 +64,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||||
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
|
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
|
||||||
{ 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
|
{ 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
|
||||||
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
||||||
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
|
{ 0, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
|
||||||
{ 1000, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
|
{ 1000, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
|
||||||
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds
|
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds
|
||||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||||
|
|
|
@ -313,7 +313,7 @@ static mi_segment_t* mi_segment_cache_pop(size_t segment_slices, mi_segments_tld
|
||||||
|
|
||||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
|
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
|
||||||
{
|
{
|
||||||
if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
|
// if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
|
||||||
size_t max_cache = mi_option_get(mi_option_segment_cache);
|
size_t max_cache = mi_option_get(mi_option_segment_cache);
|
||||||
if (tld->cache_count < max_cache
|
if (tld->cache_count < max_cache
|
||||||
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
|
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
|
||||||
|
@ -1028,6 +1028,7 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
|
||||||
set to 1 if it contains the segment meta data.
|
set to 1 if it contains the segment meta data.
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
|
|
||||||
#if (MI_INTPTR_SIZE==8)
|
#if (MI_INTPTR_SIZE==8)
|
||||||
#define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB
|
#define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -5,9 +5,14 @@ terms of the MIT license.
|
||||||
-----------------------------------------------------------------------------*/
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
/* This is a stress test for the allocator, using multiple threads and
|
/* This is a stress test for the allocator, using multiple threads and
|
||||||
transferring objects between threads. This is not a typical workload
|
transferring objects between threads. It tries to reflect real-world workloads:
|
||||||
but uses a random linear size distribution. Timing can also depend on
|
- allocation size is distributed linearly in powers of two
|
||||||
(random) thread scheduling. Do not use this test as a benchmark!
|
- with some fraction extra large (and some extra extra large)
|
||||||
|
- the allocations are initialized and read again at free
|
||||||
|
- pointers transfer between threads
|
||||||
|
- threads are terminated and recreated with some objects surviving in between
|
||||||
|
- uses deterministic "randomness", but execution can still depend on
|
||||||
|
(random) thread scheduling. Do not use this test as a benchmark!
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
@ -22,13 +27,13 @@ terms of the MIT license.
|
||||||
// argument defaults
|
// argument defaults
|
||||||
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
||||||
static int SCALE = 50; // scaling factor
|
static int SCALE = 50; // scaling factor
|
||||||
static int ITER = 10; // N full iterations re-creating all threads
|
static int ITER = 10; // N full iterations destructing and re-creating all threads
|
||||||
|
|
||||||
// static int THREADS = 8; // more repeatable if THREADS <= #processors
|
// static int THREADS = 8; // more repeatable if THREADS <= #processors
|
||||||
// static int SCALE = 100; // scaling factor
|
// static int SCALE = 100; // scaling factor
|
||||||
|
|
||||||
static bool allow_large_objects = true; // allow very large objects?
|
static bool allow_large_objects = true; // allow very large objects?
|
||||||
static size_t use_one_size = 0; // use single object size of N uintptr_t?
|
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
|
||||||
|
|
||||||
|
|
||||||
#ifdef USE_STD_MALLOC
|
#ifdef USE_STD_MALLOC
|
||||||
|
@ -185,7 +190,7 @@ int main(int argc, char** argv) {
|
||||||
long n = (strtol(argv[3], &end, 10));
|
long n = (strtol(argv[3], &end, 10));
|
||||||
if (n > 0) ITER = n;
|
if (n > 0) ITER = n;
|
||||||
}
|
}
|
||||||
printf("start with %d threads with a %d%% load-per-thread and %d iterations\n", THREADS, SCALE, ITER);
|
printf("Using %d threads with a %d%% load-per-thread and %d iterations\n", THREADS, SCALE, ITER);
|
||||||
//int res = mi_reserve_huge_os_pages(4,1);
|
//int res = mi_reserve_huge_os_pages(4,1);
|
||||||
//printf("(reserve huge: %i\n)", res);
|
//printf("(reserve huge: %i\n)", res);
|
||||||
|
|
||||||
|
@ -204,7 +209,7 @@ int main(int argc, char** argv) {
|
||||||
}
|
}
|
||||||
mi_collect(false);
|
mi_collect(false);
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
if ((n + 1) % 10 == 0) { printf("- iterations: %3d\n", n + 1); }
|
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - n + 1); }
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue