mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
merge with dev branch
This commit is contained in:
commit
8cb36c62e6
11 changed files with 118 additions and 80 deletions
|
@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
#include "mimalloc.h"
|
||||
#include "mimalloc-internal.h"
|
||||
|
||||
#if defined(MI_MALLOC_OVERRIDE)
|
||||
#if defined(MI_MALLOC_OVERRIDE)
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
#error "this file should only be included on macOS"
|
||||
|
@ -21,6 +21,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
#include <AvailabilityMacros.h>
|
||||
#include <malloc/malloc.h>
|
||||
#include <string.h> // memset
|
||||
|
||||
#if defined(MAC_OS_X_VERSION_10_6) && \
|
||||
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
|
||||
|
@ -65,7 +66,7 @@ static void zone_destroy(malloc_zone_t* zone) {
|
|||
// todo: ignore for now?
|
||||
}
|
||||
|
||||
static size_t zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, size_t count) {
|
||||
static unsigned zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, unsigned count) {
|
||||
size_t i;
|
||||
for (i = 0; i < count; i++) {
|
||||
ps[i] = zone_malloc(zone, size);
|
||||
|
@ -74,7 +75,7 @@ static size_t zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, siz
|
|||
return i;
|
||||
}
|
||||
|
||||
static void zone_batch_free(malloc_zone_t* zone, void** ps, size_t count) {
|
||||
static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) {
|
||||
for(size_t i = 0; i < count; i++) {
|
||||
zone_free(zone, ps[i]);
|
||||
ps[i] = NULL;
|
||||
|
@ -149,7 +150,7 @@ static malloc_zone_t* mi_get_default_zone()
|
|||
{
|
||||
// The first returned zone is the real default
|
||||
malloc_zone_t** zones = NULL;
|
||||
size_t count = 0;
|
||||
unsigned count = 0;
|
||||
kern_return_t ret = malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &count);
|
||||
if (ret == KERN_SUCCESS && count > 0) {
|
||||
return zones[0];
|
||||
|
|
|
@ -25,7 +25,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__)
|
||||
// use aliasing to alias the exported function to one of our `mi_` functions
|
||||
#define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default")));
|
||||
#define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default")))
|
||||
#define MI_FORWARD1(fun,x) MI_FORWARD(fun)
|
||||
#define MI_FORWARD2(fun,x,y) MI_FORWARD(fun)
|
||||
#define MI_FORWARD0(fun,x) MI_FORWARD(fun)
|
||||
|
@ -50,14 +50,20 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
MI_INTERPOSE_MI(malloc),
|
||||
MI_INTERPOSE_MI(calloc),
|
||||
MI_INTERPOSE_MI(realloc),
|
||||
MI_INTERPOSE_MI(free)
|
||||
MI_INTERPOSE_MI(free),
|
||||
MI_INTERPOSE_MI(strdup),
|
||||
MI_INTERPOSE_MI(strndup),
|
||||
MI_INTERPOSE_MI(realpath)
|
||||
};
|
||||
#else
|
||||
// On all other systems forward to our API
|
||||
void* malloc(size_t size) mi_attr_noexcept MI_FORWARD1(mi_malloc, size)
|
||||
void* calloc(size_t size, size_t n) mi_attr_noexcept MI_FORWARD2(mi_calloc, size, n)
|
||||
void* realloc(void* p, size_t newsize) mi_attr_noexcept MI_FORWARD2(mi_realloc, p, newsize)
|
||||
void free(void* p) mi_attr_noexcept MI_FORWARD0(mi_free, p)
|
||||
void* malloc(size_t size) mi_attr_noexcept MI_FORWARD1(mi_malloc, size);
|
||||
void* calloc(size_t size, size_t n) mi_attr_noexcept MI_FORWARD2(mi_calloc, size, n);
|
||||
void* realloc(void* p, size_t newsize) mi_attr_noexcept MI_FORWARD2(mi_realloc, p, newsize);
|
||||
void free(void* p) mi_attr_noexcept MI_FORWARD0(mi_free, p);
|
||||
char* strdup(const char* s) MI_FORWARD1(mi_strdup, s);
|
||||
char* strndup(const char* s, size_t n) MI_FORWARD2(mi_strndup, s, n);
|
||||
char* realpath(const char* fname, char* resolved_name) MI_FORWARD2(mi_realpath, fname, resolved_name);
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__)
|
||||
|
@ -75,14 +81,14 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// see <https://en.cppreference.com/w/cpp/memory/new/operator_new>
|
||||
// ------------------------------------------------------
|
||||
#include <new>
|
||||
void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p)
|
||||
void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p)
|
||||
void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n)
|
||||
void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n)
|
||||
void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p);
|
||||
void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p);
|
||||
void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n);
|
||||
void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_malloc,n);
|
||||
|
||||
#if (__cplusplus >= 201703L)
|
||||
void* operator new( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align)
|
||||
void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align)
|
||||
void* operator new( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align);
|
||||
void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false) MI_FORWARD2(mi_malloc_aligned,n,align);
|
||||
#endif
|
||||
#else
|
||||
// ------------------------------------------------------
|
||||
|
@ -91,16 +97,16 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// used by GCC and CLang).
|
||||
// See <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling>
|
||||
// ------------------------------------------------------
|
||||
void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete
|
||||
void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[]
|
||||
void _ZdlPv(void* p) MI_FORWARD0(mi_free,p); // delete
|
||||
void _ZdaPv(void* p) MI_FORWARD0(mi_free,p); // delete[]
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
void* _Znwm(uint64_t n) MI_FORWARD1(mi_malloc,n) // new 64-bit
|
||||
void* _Znam(uint64_t n) MI_FORWARD1(mi_malloc,n) // new[] 64-bit
|
||||
void* _Znwm(uint64_t n) MI_FORWARD1(mi_malloc,n); // new 64-bit
|
||||
void* _Znam(uint64_t n) MI_FORWARD1(mi_malloc,n); // new[] 64-bit
|
||||
void* _Znwmm(uint64_t n, uint64_t align) { return mi_malloc_aligned(n,align); } // aligned new 64-bit
|
||||
void* _Znamm(uint64_t n, uint64_t align) { return mi_malloc_aligned(n,align); } // aligned new[] 64-bit
|
||||
#elif (MI_INTPTR_SIZE==4)
|
||||
void* _Znwj(uint32_t n) MI_FORWARD1(mi_malloc,n) // new 32-bit
|
||||
void* _Znaj(uint32_t n) MI_FORWARD1(mi_malloc,n) // new[] 32-bit
|
||||
void* _Znwj(uint32_t n) MI_FORWARD1(mi_malloc,n); // new 32-bit
|
||||
void* _Znaj(uint32_t n) MI_FORWARD1(mi_malloc,n); // new[] 32-bit
|
||||
void* _Znwjj(uint32_t n, uint32_t align) { return mi_malloc_aligned(n,align); } // aligned new 32-bit
|
||||
void* _Znajj(uint32_t n, uint32_t align) { return mi_malloc_aligned(n,align); } // aligned new[] 32-bit
|
||||
#else
|
||||
|
@ -126,10 +132,11 @@ extern "C" {
|
|||
#define ENOMEM 12
|
||||
#endif
|
||||
|
||||
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize)
|
||||
size_t malloc_size(void* p) MI_FORWARD1(mi_usable_size,p)
|
||||
size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p)
|
||||
void cfree(void* p) MI_FORWARD0(mi_free, p)
|
||||
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize);
|
||||
size_t malloc_size(void* p) MI_FORWARD1(mi_usable_size,p);
|
||||
size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p);
|
||||
void cfree(void* p) MI_FORWARD0(mi_free, p);
|
||||
|
||||
|
||||
int posix_memalign(void** p, size_t alignment, size_t size) {
|
||||
// TODO: the spec says we should return EINVAL also if alignment is not a power of 2.
|
||||
|
@ -169,12 +176,12 @@ void* reallocarray( void* p, size_t count, size_t size ) { // BSD
|
|||
}
|
||||
|
||||
#if defined(__GLIBC__) && defined(__linux__)
|
||||
// forward __libc interface (needed for redhat linux)
|
||||
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size)
|
||||
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size)
|
||||
void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size)
|
||||
void __libc_free(void* p) MI_FORWARD0(mi_free,p)
|
||||
void __libc_cfree(void* p) MI_FORWARD0(mi_free,p)
|
||||
// forward __libc interface (needed for glibc-based Linux distributions)
|
||||
void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size);
|
||||
void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size);
|
||||
void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size);
|
||||
void __libc_free(void* p) MI_FORWARD0(mi_free,p);
|
||||
void __libc_cfree(void* p) MI_FORWARD0(mi_free,p);
|
||||
|
||||
void* __libc_memalign(size_t alignment, size_t size) {
|
||||
return memalign(alignment,size);
|
||||
|
|
10
src/init.c
10
src/init.c
|
@ -103,7 +103,7 @@ mi_heap_t _mi_heap_main = {
|
|||
NULL,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0xCDCDCDCDCDCDCDL,
|
||||
0,
|
||||
false // can reclaim
|
||||
};
|
||||
|
@ -356,12 +356,18 @@ static void mi_process_done(void);
|
|||
void mi_process_init(void) mi_attr_noexcept {
|
||||
// ensure we are called once
|
||||
if (_mi_process_is_initialized) return;
|
||||
// access _mi_heap_default before setting _mi_process_is_initialized to ensure
|
||||
// that the TLS slot is allocated without getting into recursion on macOS
|
||||
// when using dynamic linking with interpose.
|
||||
mi_heap_t* h = _mi_heap_default;
|
||||
_mi_process_is_initialized = true;
|
||||
|
||||
_mi_heap_main.thread_id = _mi_thread_id();
|
||||
_mi_verbose_message("process init: 0x%zx\n", _mi_heap_main.thread_id);
|
||||
uintptr_t random = _mi_random_init(_mi_heap_main.thread_id);
|
||||
uintptr_t random = _mi_random_init(_mi_heap_main.thread_id) ^ (uintptr_t)h;
|
||||
#ifndef __APPLE__
|
||||
_mi_heap_main.cookie = (uintptr_t)&_mi_heap_main ^ random;
|
||||
#endif
|
||||
_mi_heap_main.random = _mi_random_shuffle(random);
|
||||
#if (MI_DEBUG)
|
||||
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
|
||||
|
|
11
src/os.c
11
src/os.c
|
@ -199,6 +199,11 @@ static void* mi_os_mem_alloc(void* addr, size_t size, bool commit, int extra_fla
|
|||
flags |= MAP_FIXED;
|
||||
#endif
|
||||
}
|
||||
int pflags = (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE);
|
||||
#if defined(PROT_MAX)
|
||||
pflags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
|
||||
#endif
|
||||
|
||||
if (large_os_page_size > 0 && use_large_os_page(size, 0) && ((uintptr_t)addr % large_os_page_size) == 0) {
|
||||
int lflags = flags;
|
||||
#ifdef MAP_ALIGNED_SUPER
|
||||
|
@ -212,12 +217,12 @@ static void* mi_os_mem_alloc(void* addr, size_t size, bool commit, int extra_fla
|
|||
#endif
|
||||
if (lflags != flags) {
|
||||
// try large page allocation
|
||||
p = mmap(addr, size, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE), lflags, -1, 0);
|
||||
if (p == MAP_FAILED) p = NULL;
|
||||
p = mmap(addr, size, pflags, lflags, -1, 0);
|
||||
if (p == MAP_FAILED) p = NULL; // fall back to regular mmap if large is exhausted or no permission
|
||||
}
|
||||
}
|
||||
if (p == NULL) {
|
||||
p = mmap(addr, size, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE), flags, -1, 0);
|
||||
p = mmap(addr, size, pflags, flags, -1, 0);
|
||||
if (p == MAP_FAILED) p = NULL;
|
||||
}
|
||||
if (addr != NULL && p != addr) {
|
||||
|
|
42
src/page.c
42
src/page.c
|
@ -73,10 +73,10 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
|||
mi_assert_internal(page->block_size > 0);
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
|
@ -98,7 +98,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||
mi_assert_internal(page->cookie != 0);
|
||||
if (page->heap!=NULL) {
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_internal(segment->thread_id == page->heap->thread_id);
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id);
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_SIZE_MAX || page->flags.in_full);
|
||||
|
@ -172,19 +172,19 @@ void _mi_page_free_collect(mi_page_t* page) {
|
|||
|
||||
// free the local free list
|
||||
if (page->local_free != NULL) {
|
||||
if (mi_likely(page->free == NULL)) {
|
||||
if (mi_likely(page->free == NULL)) {
|
||||
// usual caes
|
||||
page->free = page->local_free;
|
||||
}
|
||||
else {
|
||||
mi_block_t* tail = page->free;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page, tail)) != NULL) {
|
||||
tail = next;
|
||||
while ((next = mi_block_next(page, tail)) != NULL) {
|
||||
tail = next;
|
||||
}
|
||||
mi_block_set_next(page, tail, page->local_free);
|
||||
}
|
||||
page->local_free = NULL;
|
||||
page->local_free = NULL;
|
||||
}
|
||||
// and the thread free list
|
||||
if (page->thread_free.head != 0) { // quick test to avoid an atomic operation
|
||||
|
@ -380,7 +380,7 @@ void _mi_page_retire(mi_page_t* page) {
|
|||
|
||||
/* -----------------------------------------------------------
|
||||
Initialize the initial free list in a page.
|
||||
In secure mode we initialize a randomized list by
|
||||
In secure mode we initialize a randomized list by
|
||||
alternating between slices.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
@ -393,7 +393,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
UNUSED(stats);
|
||||
void* page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
|
||||
size_t bsize = page->block_size;
|
||||
mi_block_t* start = mi_page_block_at(page, page_area, page->capacity);
|
||||
mi_block_t* start = mi_page_block_at(page, page_area, page->capacity);
|
||||
if (extend < MI_MIN_SLICES || !mi_option_is_enabled(mi_option_secure)) {
|
||||
// initialize a sequential free list
|
||||
mi_block_t* end = mi_page_block_at(page, page_area, page->capacity + extend - 1);
|
||||
|
@ -411,7 +411,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
// set up `slice_count` slices to alternate between
|
||||
size_t shift = MI_MAX_SLICE_SHIFT;
|
||||
while ((extend >> shift) == 0) {
|
||||
shift--;
|
||||
shift--;
|
||||
}
|
||||
size_t slice_count = (size_t)1U << shift;
|
||||
size_t slice_extend = extend / slice_count;
|
||||
|
@ -419,12 +419,12 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice
|
||||
size_t counts[MI_MAX_SLICES]; // available objects in the slice
|
||||
for (size_t i = 0; i < slice_count; i++) {
|
||||
blocks[i] = mi_page_block_at(page, page_area, page->capacity + i*slice_extend);
|
||||
blocks[i] = mi_page_block_at(page, page_area, page->capacity + i*slice_extend);
|
||||
counts[i] = slice_extend;
|
||||
}
|
||||
counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?)
|
||||
|
||||
// and initialize the free list by randomly threading through them
|
||||
// and initialize the free list by randomly threading through them
|
||||
// set up first element
|
||||
size_t current = _mi_heap_random(heap) % slice_count;
|
||||
counts[current]--;
|
||||
|
@ -436,16 +436,16 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
size_t round = i%MI_INTPTR_SIZE;
|
||||
if (round == 0) rnd = _mi_random_shuffle(rnd);
|
||||
// select a random next slice index
|
||||
size_t next = ((rnd >> 8*round) & (slice_count-1));
|
||||
size_t next = ((rnd >> 8*round) & (slice_count-1));
|
||||
while (counts[next]==0) { // ensure it still has space
|
||||
next++;
|
||||
if (next==slice_count) next = 0;
|
||||
}
|
||||
// and link the current block to it
|
||||
counts[next]--;
|
||||
counts[next]--;
|
||||
mi_block_t* block = blocks[current];
|
||||
blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block
|
||||
mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
|
||||
mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next`
|
||||
current = next;
|
||||
}
|
||||
mi_block_set_next( page, blocks[current], NULL); // end of the list
|
||||
|
@ -462,7 +462,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
|
||||
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
|
||||
#if MI_SECURE
|
||||
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
||||
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
|
||||
#else
|
||||
#define MI_MIN_EXTEND (1)
|
||||
#endif
|
||||
|
@ -490,10 +490,10 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
|
|||
mi_stat_increase( stats->pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
size_t max_extend = MI_MAX_EXTEND_SIZE/page->block_size;
|
||||
if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND;
|
||||
|
||||
|
||||
if (extend > max_extend) {
|
||||
// ensure we don't touch memory beyond the page to reduce page commit.
|
||||
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
|
||||
|
@ -514,14 +514,14 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert(page != NULL);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert(segment != NULL);
|
||||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
size_t page_size;
|
||||
_mi_segment_page_start(segment, page, &page_size);
|
||||
_mi_segment_page_start(segment, page, block_size, &page_size);
|
||||
page->block_size = block_size;
|
||||
mi_assert_internal(block_size>0);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
page->cookie = _mi_heap_random(heap) | 1;
|
||||
page->cookie = _mi_heap_random(heap) | 1;
|
||||
|
||||
mi_assert_internal(page->capacity == 0);
|
||||
mi_assert_internal(page->free == NULL);
|
||||
|
|
|
@ -108,16 +108,25 @@ static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment)
|
|||
}
|
||||
}
|
||||
|
||||
// Start of the page available memory
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
// Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size)
|
||||
{
|
||||
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
|
||||
uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
|
||||
|
||||
if (page->segment_idx == 0) {
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
// for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
|
||||
if (block_size > 0 && segment->page_kind == MI_PAGE_SMALL) {
|
||||
size_t adjust = block_size - ((uintptr_t)p % block_size);
|
||||
if (adjust < block_size) {
|
||||
p += adjust;
|
||||
psize -= adjust;
|
||||
}
|
||||
mi_assert_internal((uintptr_t)p % block_size == 0);
|
||||
}
|
||||
}
|
||||
long secure = mi_option_get(mi_option_secure);
|
||||
if (secure > 1 || (secure == 1 && page->segment_idx == segment->capacity - 1)) {
|
||||
|
@ -125,7 +134,7 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
|||
// secure > 1: every page has an os guard page
|
||||
psize -= _mi_os_page_size();
|
||||
}
|
||||
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(_mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
|
@ -338,7 +347,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||
// Available memory in a page
|
||||
static size_t mi_page_size(const mi_page_t* page) {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
_mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
#endif
|
||||
|
@ -422,7 +431,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
|
|||
// reset the page memory to reduce memory pressure?
|
||||
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
||||
page->is_reset = true;
|
||||
if (inuse > 0) {
|
||||
_mi_mem_reset(start, psize, stats); // TODO: just `inuse`?
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue