fix overflow of slice count, PR #877

This commit is contained in:
Daan 2024-04-19 10:03:14 -07:00
commit 78418b3d24
5 changed files with 15 additions and 3 deletions

View file

@ -205,6 +205,14 @@ typedef int32_t mi_ssize_t;
// Maximum slice count (255) for which we can find the page for interior pointers // Maximum slice count (255) for which we can find the page for interior pointers
#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1) #define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// we never allocate more than PTRDIFF_MAX (see also <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877)
#if PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX)
#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1))
#else
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
#endif
// ------------------------------------------------------ // ------------------------------------------------------
// Mimalloc pages contain allocated blocks // Mimalloc pages contain allocated blocks

View file

@ -875,7 +875,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
// huge allocation? // huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) { if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>) if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) {
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL; return NULL;
} }

View file

@ -42,8 +42,8 @@ terms of the MIT license. A copy of the license can be found in the file
#elif defined(__APPLE__) #elif defined(__APPLE__)
#include <AvailabilityMacros.h> #include <AvailabilityMacros.h>
#include <TargetConditionals.h> #include <TargetConditionals.h>
#if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR #if !defined(TARGET_OS_OSX) || TARGET_OS_OSX // see issue #879, used to be (!TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR)
#include <mach/vm_statistics.h> #include <mach/vm_statistics.h> // VM_MAKE_TAG, VM_FLAGS_SUPERPAGE_SIZE_2MB, etc.
#endif #endif
#if !defined(MAC_OS_X_VERSION_10_7) #if !defined(MAC_OS_X_VERSION_10_7)
#define MAC_OS_X_VERSION_10_7 1070 #define MAC_OS_X_VERSION_10_7 1070

View file

@ -820,6 +820,7 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
const size_t extra = align_offset - info_size; const size_t extra = align_offset - info_size;
// recalculate due to potential guard pages // recalculate due to potential guard pages
*psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices); *psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices);
mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX);
} }
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE; const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
@ -869,6 +870,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
size_t info_slices; size_t info_slices;
size_t pre_size; size_t pre_size;
size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices); size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices);
mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX);
// Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems

View file

@ -295,11 +295,13 @@ int main(void) {
// --------------------------------------------------- // ---------------------------------------------------
// various // various
// --------------------------------------------------- // ---------------------------------------------------
#if !defined(MI_TRACK_ASAN) // realpath may leak with ASAN enabled (as the ASAN allocator intercepts it)
CHECK_BODY("realpath") { CHECK_BODY("realpath") {
char* s = mi_realpath( ".", NULL ); char* s = mi_realpath( ".", NULL );
// printf("realpath: %s\n",s); // printf("realpath: %s\n",s);
mi_free(s); mi_free(s);
}; };
#endif
CHECK("stl_allocator1", test_stl_allocator1()); CHECK("stl_allocator1", test_stl_allocator1());
CHECK("stl_allocator2", test_stl_allocator2()); CHECK("stl_allocator2", test_stl_allocator2());