Merge branch 'dev' into dev2

This commit is contained in:
Daan 2025-05-13 16:07:04 -07:00
commit d87bcd0027
9 changed files with 63 additions and 35 deletions

View file

@ -1,6 +1,6 @@
# install from an image
# download first an appropriate tar.gz image into the current directory
# from: <https://github.com/alpinelinux/docker-alpine/tree/edge/armv7>
# from <https://github.com/alpinelinux/docker-alpine/tree/edge/armv7>
FROM scratch
# Substitute the image name that was downloaded

View file

@ -0,0 +1,28 @@
# install from an image
# download first an appropriate tar.gz image into the current directory
# from <https://github.com/alpinelinux/docker-alpine/tree/edge/x86>
FROM scratch
# Substitute the image name that was downloaded
ADD alpine-minirootfs-20250108-x86.tar.gz /
# Install tools
RUN apk add build-base make cmake
RUN apk add git
RUN apk add vim
RUN mkdir -p /home/dev
WORKDIR /home/dev
# Get mimalloc
RUN git clone https://github.com/microsoft/mimalloc -b dev2
RUN mkdir -p mimalloc/out/release
RUN mkdir -p mimalloc/out/debug
# Build mimalloc debug
WORKDIR /home/dev/mimalloc/out/debug
RUN cmake ../.. -DMI_DEBUG_FULL=ON
# RUN make -j
# RUN make test
CMD ["/bin/sh"]

View file

@ -119,7 +119,7 @@ void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
#define MI_WIN_USE_FIXED_TLS 1
//-------------------------------------------------------------------
// Access to TLS (thread local storage) slots.

View file

@ -571,7 +571,6 @@ struct mi_heap_s {
size_t guarded_size_min; // minimal size for guarded objects
size_t guarded_size_max; // maximal size for guarded objects
size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages)
size_t guarded_sample_seed; // starting sample count
size_t guarded_sample_count; // current sample count (counting down to 0)
#endif
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.

View file

@ -44,7 +44,7 @@ typedef struct mi_arena_s {
mi_lock_t abandoned_visit_lock; // lock is only used when abandoned segments are being visited
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be purged from `blocks_purge`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
@ -369,7 +369,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *arena_id)
{
if (_mi_preloading()) return false; // use OS only while pre loading
const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
@ -411,7 +411,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc)) { // is arena allocation allowed?
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0)
{
void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid);
if (p != NULL) return p;
@ -491,7 +491,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
mi_assert_internal(already_committed < blocks);
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, mi_arena_block_size(already_committed));
}
// clear the purged blocks
@ -560,7 +560,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{
// check pre-conditions
if (arena->memid.is_pinned) return false;
// expired yet?
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (!force && (expire == 0 || expire > now)) return false;
@ -615,7 +615,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
return any_purged;
}
static void mi_arenas_try_purge( bool force, bool visit_all )
static void mi_arenas_try_purge( bool force, bool visit_all )
{
if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
@ -632,7 +632,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all )
mi_atomic_guard(&purge_guard)
{
// increase global expire: at most one purge per delay cycle
mi_atomic_storei64_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay());
mi_atomic_storei64_release(&mi_arenas_purge_expire, now + mi_arena_purge_delay());
size_t max_purge_count = (visit_all ? max_arena : 2);
bool all_visited = true;
for (size_t i = 0; i < max_arena; i++) {
@ -951,7 +951,7 @@ void mi_debug_show_arenas(void) mi_attr_noexcept {
for (size_t i = 0; i < max_arenas; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena == NULL) break;
_mi_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_BLOCK_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
_mi_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, (size_t)(MI_ARENA_BLOCK_SIZE / MI_MiB), arena->field_count, (arena->memid.is_pinned ? ", pinned" : ""));
if (show_inuse) {
inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count);
}

View file

@ -123,7 +123,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
false, // can reclaim
0, // tag
#if MI_GUARDED
0, 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`)
0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`)
#endif
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY
@ -172,7 +172,7 @@ mi_decl_cache_align mi_heap_t _mi_heap_main = {
false, // can reclaim
0, // tag
#if MI_GUARDED
0, 0, 0, 0, 0,
0, 0, 0, 0,
#endif
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY
@ -184,15 +184,14 @@ mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL };
#if MI_GUARDED
mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) {
heap->guarded_sample_seed = seed;
if (heap->guarded_sample_seed == 0) {
heap->guarded_sample_seed = _mi_heap_random_next(heap);
}
heap->guarded_sample_rate = sample_rate;
if (heap->guarded_sample_rate >= 1) {
heap->guarded_sample_seed = heap->guarded_sample_seed % heap->guarded_sample_rate;
heap->guarded_sample_count = sample_rate; // count down samples
if (heap->guarded_sample_rate > 1) {
if (seed == 0) {
seed = _mi_heap_random_next(heap);
}
heap->guarded_sample_count = (seed % heap->guarded_sample_rate) + 1; // start at random count between 1 and `sample_rate`
}
heap->guarded_sample_count = heap->guarded_sample_seed; // count down samples
}
mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) {

View file

@ -32,7 +32,7 @@ terms of the MIT license. A copy of the license can be found in the file
#if defined(__linux__)
#include <features.h>
#include <sys/prctl.h> // THP disable, PR_SET_VMA
#if !defined(PR_SET_VMA)
#if defined(__GLIBC__) && !defined(PR_SET_VMA)
#include <linux/prctl.h>
#endif
#if defined(__GLIBC__)

View file

@ -628,18 +628,16 @@ bool _mi_prim_random_buf(void* buf, size_t buf_len) {
//----------------------------------------------------------------
#if MI_WIN_USE_FIXED_TLS==1
mi_decl_cache_align size_t _mi_win_tls_offset = sizeof(void*); // use 2nd slot by default
mi_decl_cache_align size_t _mi_win_tls_offset = 0;
#endif
static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
MI_UNUSED(reserved);
MI_UNUSED(module);
static void mi_win_tls_init(DWORD reason) {
#if MI_HAS_TLS_SLOT >= 2 // we must initialize the TLS slot before any allocation
#if MI_WIN_USE_FIXED_TLS==1
if (reason==DLL_PROCESS_ATTACH) {
const DWORD tls_slot = TlsAlloc();
if (tls_slot == TLS_OUT_OF_INDEXES) {
_mi_error_message(EFAULT, "unable to allocate the a TLS slot (rebuild without MI_WIN_USE_FIXED_TLS?)\n");
if (reason==DLL_PROCESS_ATTACH && _mi_win_tls_offset == 0) {
const DWORD tls_slot = TlsAlloc(); // usually returns slot 1
if (tls_slot == TLS_OUT_OF_INDEXES) {
_mi_error_message(EFAULT, "unable to allocate the a TLS slot (rebuild without MI_WIN_USE_FIXED_TLS?)\n");
}
_mi_win_tls_offset = (size_t)tls_slot * sizeof(void*);
}
@ -653,7 +651,15 @@ static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
mi_assert_internal(p == (void*)&_mi_heap_empty);
#endif
}
#else
MI_UNUSED(reason);
#endif
}
static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
MI_UNUSED(reserved);
MI_UNUSED(module);
mi_win_tls_init(reason);
if (reason==DLL_PROCESS_ATTACH) {
_mi_process_load();
}
@ -815,11 +821,7 @@ static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) {
#endif
mi_decl_export void _mi_redirect_entry(DWORD reason) {
// called on redirection; careful as this may be called before DllMain
#if MI_HAS_TLS_SLOT >= 2 // we must initialize the TLS slot before any allocation
if ((reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) && mi_prim_get_default_heap() == NULL) {
_mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty);
}
#endif
mi_win_tls_init(reason);
if (reason == DLL_PROCESS_ATTACH) {
mi_redirected = true;
}

View file

@ -43,7 +43,7 @@ int main() {
// corrupt_free();
// block_overflow1();
// block_overflow2();
// test_canary_leak();
test_canary_leak();
// test_aslr();
// invalid_free();
// test_reserved();