clean up segment slice handling

This commit is contained in:
daan 2019-08-24 12:20:32 -07:00
parent cce38bc147
commit 612b2cc9b7
5 changed files with 352 additions and 335 deletions

View file

@ -259,7 +259,7 @@ static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_count));
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment;
}
@ -276,11 +276,11 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff >= 0 && diff < (ptrdiff_t)MI_SEGMENT_SIZE);
uintptr_t idx = (uintptr_t)diff >> MI_SEGMENT_SLICE_SHIFT;
mi_assert_internal(idx < segment->slice_count);
mi_assert_internal(idx < segment->slice_entries);
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
mi_assert_internal(slice->slice_offset == 0);
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_count);
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
return mi_slice_to_page(slice);
}

View file

@ -78,7 +78,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64mb
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64kb
#define MI_MEDIUM_PAGE_SHIFT ( 2 + MI_SEGMENT_SLICE_SHIFT) // 512kb
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512kb
// Derived constants
@ -109,6 +109,9 @@ terms of the MIT license. A copy of the license can be found in the file
#error "define more bins"
#endif
// Maximum slice offset (7)
#define MI_MAX_SLICE_OFFSET ((MI_MEDIUM_PAGE_SIZE / MI_SEGMENT_SLICE_SIZE) - 1)
typedef uintptr_t mi_encoded_t;
// free lists contain blocks
@ -206,6 +209,12 @@ typedef enum mi_segment_kind_e {
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
} mi_segment_kind_t;
#define MI_COMMIT_SIZE ((size_t)2 << 20) // OS large page size
#if ((MI_SEGMENT_SIZE / MI_COMMIT_SIZE) > MI_INTPTR_SIZE)
#error "not enough commit bits to cover the segment size"
#endif
typedef mi_page_t mi_slice_t;
// Segments are large allocated memory blocks (2mb on 64 bit) from
@ -214,18 +223,21 @@ typedef mi_page_t mi_slice_t;
typedef struct mi_segment_s {
struct mi_segment_s* next; // the list of freed segments in the cache
volatile struct mi_segment_s* abandoned_next; // the list of abandoned segments
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t used; // count of pages in use
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
size_t segment_info_size; // space we are using from the first page for segment meta-data and possible guard pages.
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
size_t memid; // id for the os-level memory manager
bool all_committed;
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
bool allow_decommit;
uintptr_t commit_mask;
// layout like this to optimize access in `mi_free`
mi_segment_kind_t kind;
uintptr_t thread_id;
size_t slice_count; // slices in this segment (at most MI_SLICES_PER_SEGMENT)
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
mi_slice_t slices[MI_SLICES_PER_SEGMENT];
} mi_segment_t;
@ -371,17 +383,19 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
// Thread Local data
// ------------------------------------------------------
// Queue of segments
typedef struct mi_segment_queue_s {
mi_segment_t* first;
mi_segment_t* last;
} mi_segment_queue_t;
// A "span" is is an available range of slices. The span queues keep
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
typedef struct mi_span_queue_s {
mi_slice_t* first;
mi_slice_t* last;
size_t slice_count;
} mi_span_queue_t;
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
// Segments thread local data
typedef struct mi_segments_tld_s {
mi_page_queue_t pages[MI_SEGMENT_BIN_MAX+1]; // free pages inside segments
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments