mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-07-06 19:38:41 +03:00
fix typos
This commit is contained in:
parent
03020fbf81
commit
632421da3a
12 changed files with 59 additions and 62 deletions
|
@ -284,7 +284,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
|
|||
return p;
|
||||
}
|
||||
|
||||
// allocate in a speficic arena
|
||||
// allocate in a specific arena
|
||||
static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
|
||||
bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
|
@ -984,5 +984,3 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv
|
|||
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -94,10 +94,10 @@ static mi_option_desc_t options[_mi_option_last] =
|
|||
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
|
||||
{ 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
|
||||
#if defined(MI_VISIT_ABANDONED)
|
||||
{ 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandonded segments; requires taking locks during reclaim.
|
||||
#if defined(MI_VISIT_ABANDONED)
|
||||
{ 1, INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim.
|
||||
#else
|
||||
{ 0, UNINIT, MI_OPTION(visit_abandoned) },
|
||||
{ 0, UNINIT, MI_OPTION(visit_abandoned) },
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -286,7 +286,7 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop
|
|||
// (recursively) invoke malloc again to allocate space for the thread local
|
||||
// variables on demand. This is why we use a _mi_preloading test on such
|
||||
// platforms. However, C code generator may move the initial thread local address
|
||||
// load before the `if` and we therefore split it out in a separate funcion.
|
||||
// load before the `if` and we therefore split it out in a separate function.
|
||||
static mi_decl_thread bool recurse = false;
|
||||
|
||||
static mi_decl_noinline bool mi_recurse_enter_prim(void) {
|
||||
|
@ -491,7 +491,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
|
|||
char* end = buf;
|
||||
long value = strtol(buf, &end, 10);
|
||||
if (mi_option_has_size_in_kib(desc->option)) {
|
||||
// this option is interpreted in KiB to prevent overflow of `long` for large allocations
|
||||
// this option is interpreted in KiB to prevent overflow of `long` for large allocations
|
||||
// (long is 32-bit on 64-bit windows, which allows for 4TiB max.)
|
||||
size_t size = (value < 0 ? 0 : (size_t)value);
|
||||
bool overflow = false;
|
||||
|
|
|
@ -763,7 +763,7 @@ bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
|
|||
#include <CommonCrypto/CommonRandom.h>
|
||||
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len) {
|
||||
// We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
|
||||
// We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf
|
||||
// may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
|
||||
return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*
|
|||
static PVirtualAlloc2 pVirtualAlloc2 = NULL;
|
||||
static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
|
||||
|
||||
// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7
|
||||
// Similarly, GetNumaProcessorNodeEx is only supported since Windows 7
|
||||
typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER;
|
||||
|
||||
typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber);
|
||||
|
@ -658,4 +658,3 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_
|
|||
(i.e. we are careful to not touch the memory until we actually allocate a block there)
|
||||
|
||||
If a thread ends, it "abandons" pages that still contain live blocks.
|
||||
Such segments are abondoned and these can be reclaimed by still running threads,
|
||||
Such segments are abandoned and these can be reclaimed by still running threads,
|
||||
(much like work-stealing).
|
||||
-------------------------------------------------------------------------------- */
|
||||
|
||||
|
@ -276,7 +276,7 @@ static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_
|
|||
|
||||
// we re-use the `free` field for the expiration counter. Since this is a
|
||||
// a pointer size field while the clock is always 64-bit we need to guard
|
||||
// against overflow, we use substraction to check for expiry which works
|
||||
// against overflow, we use subtraction to check for expiry which works
|
||||
// as long as the reset delay is under (2^30 - 1) milliseconds (~12 days)
|
||||
static uint32_t mi_page_get_expire( mi_page_t* page ) {
|
||||
return (uint32_t)((uintptr_t)page->free);
|
||||
|
@ -294,7 +294,7 @@ static void mi_page_purge_set_expire(mi_page_t* page) {
|
|||
|
||||
// we re-use the `free` field for the expiration counter. Since this is a
|
||||
// a pointer size field while the clock is always 64-bit we need to guard
|
||||
// against overflow, we use substraction to check for expiry which work
|
||||
// against overflow, we use subtraction to check for expiry which work
|
||||
// as long as the reset delay is under (2^30 - 1) milliseconds (~12 days)
|
||||
static bool mi_page_purge_is_expired(mi_page_t* page, mi_msecs_t now) {
|
||||
int32_t expire = (int32_t)mi_page_get_expire(page);
|
||||
|
@ -778,7 +778,7 @@ When a block is freed in an abandoned segment, the segment
|
|||
is reclaimed into that thread.
|
||||
|
||||
Moreover, if threads are looking for a fresh segment, they
|
||||
will first consider abondoned segments -- these can be found
|
||||
will first consider abandoned segments -- these can be found
|
||||
by scanning the arena memory
|
||||
(segments outside arena memoryare only reclaimed by a free).
|
||||
----------------------------------------------------------- */
|
||||
|
@ -995,7 +995,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
|||
{
|
||||
mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process
|
||||
segment->abandoned_visits++;
|
||||
// todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit?
|
||||
// todo: should we respect numa affinity for abandoned reclaim? perhaps only for the first visit?
|
||||
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries
|
||||
// Perhaps we can skip non-suitable ones in a better way?
|
||||
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue