Merge branch 'dev' into dev-win

This commit is contained in:
daan 2019-08-19 21:23:04 -07:00
commit 05631ebfc4
6 changed files with 33 additions and 14 deletions

View file

@ -152,15 +152,15 @@ bool _mi_page_is_valid(mi_page_t* page);
// Overflow detecting multiply // Overflow detecting multiply
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) #define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
static inline bool mi_mul_overflow(size_t size, size_t count, size_t* total) { static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5 #if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
#if (MI_INTPTR_SIZE == 4) #if (MI_INTPTR_SIZE == 4)
return __builtin_umul_overflow(size, count, total); return __builtin_umul_overflow(count, size, total);
#else #else
return __builtin_umull_overflow(size, count, total); return __builtin_umull_overflow(count, size, total);
#endif #endif
#else /* __builtin_umul_overflow is unavailable */ #else /* __builtin_umul_overflow is unavailable */
*total = size * count; *total = count * size;
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
&& size > 0 && (SIZE_MAX / size) < count); && size > 0 && (SIZE_MAX / size) < count);
#endif #endif

View file

@ -303,14 +303,14 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL); mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return; if (from==NULL || from->page_count == 0) return;
// unfull all full pages // unfull all full pages in the `from` heap
mi_page_t* page = heap->pages[MI_BIN_FULL].first; mi_page_t* page = from->pages[MI_BIN_FULL].first;
while (page != NULL) { while (page != NULL) {
mi_page_t* next = page->next; mi_page_t* next = page->next;
_mi_page_unfull(page); _mi_page_unfull(page);
page = next; page = next;
} }
mi_assert_internal(heap->pages[MI_BIN_FULL].first == NULL); mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
// free outstanding thread delayed free blocks // free outstanding thread delayed free blocks
_mi_heap_delayed_free(from); _mi_heap_delayed_free(from);

View file

@ -106,14 +106,14 @@ mi_heap_t _mi_heap_main = {
MI_SMALL_PAGES_EMPTY, MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY, MI_PAGE_QUEUES_EMPTY,
NULL, NULL,
0, 0, // thread id
0,
#if MI_INTPTR_SIZE==8 // the cookie of the main heap can be fixed (unlike page cookies that need to be secure!) #if MI_INTPTR_SIZE==8 // the cookie of the main heap can be fixed (unlike page cookies that need to be secure!)
0xCDCDCDCDCDCDCDCDUL, 0xCDCDCDCDCDCDCDCDUL,
#else #else
0xCDCDCDCDUL, 0xCDCDCDCDUL,
#endif #endif
0, 0, // random
0, // page count
false // can reclaim false // can reclaim
}; };

View file

@ -128,6 +128,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
size_t mask = mi_region_block_mask(blocks,bitidx); size_t mask = mi_region_block_mask(blocks,bitidx);
mi_assert_internal(mask != 0); mi_assert_internal(mask != 0);
mi_assert_internal((mask & mi_atomic_read(&region->map)) == mask); mi_assert_internal((mask & mi_atomic_read(&region->map)) == mask);
mi_assert_internal(&regions[idx] == region);
// ensure the region is reserved // ensure the region is reserved
void* start = mi_atomic_read_ptr(&region->start); void* start = mi_atomic_read_ptr(&region->start);
@ -142,6 +143,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
} while (!mi_atomic_compare_exchange(&region->map, map & ~mask, map)); } while (!mi_atomic_compare_exchange(&region->map, map & ~mask, map));
return false; return false;
} }
Sleep(10);
// set the newly allocated region // set the newly allocated region
if (mi_atomic_compare_exchange_ptr(&region->start, start, NULL)) { if (mi_atomic_compare_exchange_ptr(&region->start, start, NULL)) {
@ -149,9 +151,23 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
mi_atomic_increment(&regions_count); mi_atomic_increment(&regions_count);
} }
else { else {
// failed, another thread allocated just before us, free our allocated memory // failed, another thread allocated just before us!
// TODO: should we keep the allocated memory and assign it to some other region? // we assign it to a later slot instead (up to 4 tries).
_mi_os_free(start, MI_REGION_SIZE, tld->stats); // note: we don't need to increment the region count, this will happen on another allocation
for(size_t i = 1; i <= 4 && idx + i < MI_REGION_MAX; i++) {
void* s = mi_atomic_read_ptr(&regions[idx+i].start);
if (s == NULL) { // quick test
if (mi_atomic_compare_exchange_ptr(&regions[idx+i].start, start, s)) {
start = NULL;
break;
}
}
}
if (start != NULL) {
// free it if we didn't succeed to save it to some other region
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
}
// and continue with the memory at our index
start = mi_atomic_read_ptr(&region->start); start = mi_atomic_read_ptr(&region->start);
} }
} }

View file

@ -22,6 +22,9 @@ terms of the MIT license. A copy of the license can be found in the file
#else #else
#include <sys/mman.h> // mmap #include <sys/mman.h> // mmap
#include <unistd.h> // sysconf #include <unistd.h> // sysconf
#if defined(__linux__)
#include <linux/mman.h> // linux mmap flags
#endif
#if defined(__APPLE__) #if defined(__APPLE__)
#include <mach/vm_statistics.h> #include <mach/vm_statistics.h>
#endif #endif

View file

@ -745,7 +745,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// huge allocation? // huge allocation?
mi_page_t* page; mi_page_t* page;
if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) { if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
if (mi_unlikely(size >= (SIZE_MAX - MI_MAX_ALIGN_SIZE))) { if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
page = NULL; page = NULL;
} }
else { else {