better object sizes for large objects

This commit is contained in:
daan 2019-08-25 10:01:11 -07:00
parent 23812cc0ac
commit a431d80fc3
3 changed files with 13 additions and 12 deletions

View file

@ -91,10 +91,12 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_MEDIUM_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE) #define MI_MEDIUM_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
#define MI_LARGE_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE) #define MI_LARGE_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // The max object size are checked to not waste more than 12.5% internally over the page sizes.
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb on 64-bit // (Except for large pages since huge objects are allocated in 4MiB chunks)
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2Mb on 64-bit #define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 16kb
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX>>MI_INTPTR_SHIFT) #define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2mb
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c) #define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
// Minimal alignment necessary. On most platforms 16 bytes are needed // Minimal alignment necessary. On most platforms 16 bytes are needed

View file

@ -700,7 +700,10 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
General allocation General allocation
----------------------------------------------------------- */ ----------------------------------------------------------- */
// A huge page is allocated directly without being in a queue // A huge page is allocated directly without being in a queue.
// Because huge pages contain just one block, and the segment contains
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) { static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t); size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);

View file

@ -709,17 +709,13 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Page allocation and free Page allocation and free
----------------------------------------------------------- */ ----------------------------------------------------------- */
static bool mi_is_good_fit(size_t bsize, size_t size) {
// good fit if no more than 25% wasted
return (bsize > 0 && size > 0 && bsize < size && (size - (size % bsize)) < (size/4));
}
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page; mi_page_t* page;
if (block_size <= MI_SMALL_OBJ_SIZE_MAX || mi_is_good_fit(block_size,MI_SMALL_PAGE_SIZE)) { if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
page = mi_segment_small_page_alloc(tld,os_tld); page = mi_segment_small_page_alloc(tld,os_tld);
} }
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX || mi_is_good_fit(block_size, MI_MEDIUM_PAGE_SIZE)) { else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
page = mi_segment_medium_page_alloc(tld, os_tld); page = mi_segment_medium_page_alloc(tld, os_tld);
} }
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {