tune free-ing and abandoning

This commit is contained in:
daanx 2024-12-05 17:00:23 -08:00
parent 0616ee151e
commit 7443ee317e
10 changed files with 125 additions and 93 deletions

View file

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 188 // major + 2 digits minor
#define MI_MALLOC_VERSION 300 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@ -369,7 +369,6 @@ typedef enum mi_option_e {
mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`)
mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10)
mi_option_purge_extend_delay,
mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
@ -379,7 +378,9 @@ typedef enum mi_option_e {
mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000)
mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0)
mi_option_target_segments_per_thread, // experimental (=0)
mi_option_full_page_retain, // retain N full pages per size class (=4, lower it to reduce memory footprint in multi-thread applications)
mi_option_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
mi_option_full_page_retain, // retain N full pages per size class (=2)
mi_option_max_page_candidates, // max candidate pages to consider for allocation (=4)
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,

View file

@ -27,6 +27,8 @@ terms of the MIT license. A copy of the license can be found in the file
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
#pragma warning(disable:26812) // unscoped enum warning
#pragma warning(disable:28159) // don't use GetVersion
#pragma warning(disable:4996) // don't use GetVersion
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_align(a) __declspec(align(a))
@ -169,6 +171,7 @@ void _mi_arena_field_cursor_done(mi_arena_field_cursor_t* current);
*/
// "page-map.c"
bool _mi_page_map_init(void);
void _mi_page_map_register(mi_page_t* page);
void _mi_page_map_unregister(mi_page_t* page);
@ -638,7 +641,7 @@ static inline bool mi_page_is_mostly_used(const mi_page_t* page) {
return (page->reserved - page->used <= frac);
}
// is less than 1/n'th of a page free?
// is more than (n-1)/n'th of a page in use?
static inline bool mi_page_is_used_at_frac(const mi_page_t* page, uint16_t n) {
if (page==NULL) return true;
uint16_t frac = page->reserved / n;

View file

@ -12,10 +12,8 @@ terms of the MIT license. A copy of the license can be found in the file
// This file contains the main type definitions for mimalloc:
// mi_heap_t : all data for a thread-local heap, contains
// lists of all managed heap pages.
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
// are allocated.
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
// where objects are allocated.
// where objects of a single size are allocated.
// Note: we write "OS page" for OS memory pages while
// using plain "page" for mimalloc pages (`mi_page_t`).
// --------------------------------------------------------------------------
@ -417,7 +415,7 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
#if MI_GUARDED
size_t guarded_size_min; // minimal size for guarded objects

View file

@ -861,10 +861,10 @@ size_t mi_bitmap_init(mi_bitmap_t* bitmap, size_t bit_count, bool already_zero)
if (!already_zero) {
_mi_memzero_aligned(bitmap, size);
}
bitmap->chunk_map_count = _mi_divide_up(chunk_count, MI_CHUNKMAP_BITS);
mi_assert_internal(bitmap->chunk_map_count <= MI_BITMAP_MAX_CHUNKMAPS);
bitmap->chunk_count = chunk_count;
mi_assert_internal(bitmap->chunk_map_count <= MI_BITMAP_MAX_CHUNK_COUNT);
mi_atomic_store_release(&bitmap->chunk_map_count, _mi_divide_up(chunk_count, MI_CHUNKMAP_BITS));
mi_assert_internal(mi_atomic_load_relaxed(&bitmap->chunk_map_count) <= MI_BITMAP_MAX_CHUNKMAPS);
mi_atomic_store_release(&bitmap->chunk_count, chunk_count);
mi_assert_internal(mi_atomic_load_relaxed(&bitmap->chunk_count) <= MI_BITMAP_MAX_CHUNK_COUNT);
return size;
}

View file

@ -23,9 +23,6 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
// Free
// ------------------------------------------------------
// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_block_t* block);
// regular free of a (thread local) block pointer
// fast path written carefully to prevent spilling on the stack
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
@ -50,6 +47,40 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
}
}
// Forward declaration for multi-threaded collect
static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page);
// Free a block multi-threaded
static inline void mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// adjust stats (after padding check and potentially recursive `mi_free` above)
mi_stat_free(page, block); // stat_free may access the padding
mi_track_free_size(block, mi_page_usable_size_of(page, block));
// _mi_padding_shrink(page, block, sizeof(mi_block_t));
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
size_t dbgsize = mi_usable_size(block);
if (dbgsize > MI_MiB) { dbgsize = MI_MiB; }
_mi_memset_aligned(block, MI_DEBUG_FREED, dbgsize);
#endif
// push atomically on the page thread free list
mi_thread_free_t tf_new;
mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
do {
mi_block_set_next(page, block, mi_tf_block(tf_old));
tf_new = mi_tf_create(block, true /* always owned: try to claim it if abandoned */);
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new));
// and atomically try to collect the page if it was abandoned
const bool is_owned_now = !mi_tf_is_owned(tf_old);
if (is_owned_now) {
mi_assert_internal(mi_page_is_abandoned(page));
mi_free_try_collect_mt(page);
}
}
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
// `page_start` and `block_size` fields; however these are constant and the page won't be
@ -81,6 +112,7 @@ static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, vo
}
#endif
// free a local pointer (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, void* p) mi_attr_noexcept {
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
@ -101,6 +133,7 @@ void mi_decl_noinline _mi_free_generic(mi_page_t* page, bool is_local, void* p)
else mi_free_generic_mt(page,p);
}
// Get the segment data belonging to a pointer
// This is just a single `and` in release mode but does further checks in debug mode
// (and secure mode) to see if this was a valid pointer.
@ -142,8 +175,16 @@ void mi_free(void* p) mi_attr_noexcept
}
}
else {
// not thread-local; use generic path
mi_free_generic_mt(page, p);
// free-ing in a page owned by a heap in another thread, or on abandoned page (not belonging to a heap)
if mi_likely(page->flags.full_aligned == 0) {
// blocks are aligned (and not a full page)
mi_block_t* const block = (mi_block_t*)p;
mi_free_block_mt(page,block);
}
else {
// page is full or contains (inner) aligned blocks; use generic multi-thread path
mi_free_generic_mt(page, p);
}
}
}
@ -152,40 +193,11 @@ void mi_free(void* p) mi_attr_noexcept
// Multi-threaded Free (`_mt`)
// ------------------------------------------------------
static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page);
// Push a block that is owned by another thread (or abandoned) on its page-local thread free list.
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// adjust stats (after padding check and potentially recursive `mi_free` above)
mi_stat_free(page, block); // stat_free may access the padding
mi_track_free_size(block, mi_page_usable_size_of(page, block));
// _mi_padding_shrink(page, block, sizeof(mi_block_t));
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
size_t dbgsize = mi_usable_size(block);
if (dbgsize > MI_MiB) { dbgsize = MI_MiB; }
_mi_memset_aligned(block, MI_DEBUG_FREED, dbgsize);
#endif
// push atomically on the page thread free list
mi_thread_free_t tf_new;
mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
do {
mi_block_set_next(page, block, mi_tf_block(tf_old));
tf_new = mi_tf_create(block, true /* always owned: try to claim it if abandoned */);
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new));
// and atomically reclaim the page if it was abandoned
bool reclaimed = !mi_tf_is_owned(tf_old);
if (reclaimed) {
mi_free_try_reclaim_mt(page);
}
}
static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page) {
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
// we own the page now..
// safe to collect the thread atomic free list
_mi_page_free_collect(page, false); // update `used` count
@ -202,16 +214,10 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
_mi_arena_page_free(page);
return;
}
// 2. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
else if (!mi_page_is_mostly_used(page) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
_mi_arena_page_try_reabandon_to_mapped(page))
{
return;
}
// 3. if the page is not too full, we can try to reclaim it for ourselves
else if (_mi_option_get_fast(mi_option_abandoned_reclaim_on_free) != 0 &&
!mi_page_is_mostly_used(page))
// 2. if the page is not too full, we can try to reclaim it for ourselves
if (_mi_option_get_fast(mi_option_reclaim_on_free) != 0 &&
!mi_page_is_used_at_frac(page,8))
{
// the page has still some blocks in use (but not too many)
// reclaim in our heap if compatible, or otherwise abandon again
@ -222,20 +228,32 @@ static void mi_decl_noinline mi_free_try_reclaim_mt(mi_page_t* page) {
if (heap != (mi_heap_t*)&_mi_heap_empty) // we did not already terminate our thread (can this happen?
{
mi_heap_t* const tagheap = _mi_heap_by_tag(heap, page->heap_tag);
if ((tagheap != NULL) && // don't reclaim across heap object types
if ((tagheap != NULL) && // don't reclaim across heap object types
(!tagheap->no_reclaim) && // we are allowed to reclaim abandoned pages
(page->subproc == tagheap->tld->subproc) && // don't reclaim across sub-processes; todo: make this check faster (integrate with _mi_heap_by_tag ? )
(_mi_arena_memid_is_suitable(page->memid, tagheap->arena_id)) // don't reclaim across unsuitable arena's; todo: inline arena_is_suitable (?)
)
{
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
_mi_arena_page_unabandon(page);
_mi_heap_page_reclaim(tagheap, page);
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_free, 1);
return;
{
if (mi_page_queue(tagheap, page->block_size)->first != NULL) { // don't reclaim for an block_size we don't use
// first remove it from the abandoned pages in the arena -- this waits for any readers to finish
_mi_arena_page_unabandon(page);
_mi_heap_page_reclaim(tagheap, page);
_mi_stat_counter_increase(&_mi_stats_main.pages_reclaim_on_free, 1);
return;
}
}
}
}
// 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations
if (!mi_page_is_used_at_frac(page, 4) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page
!mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA &&
_mi_arena_page_try_reabandon_to_mapped(page))
{
return;
}
// not reclaimed or free'd, unown again
_mi_page_unown(page);
}

View file

@ -208,6 +208,20 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
heap->no_reclaim = noreclaim;
heap->allow_page_abandon = (!noreclaim && mi_option_get(mi_option_full_page_retain) >= 0);
heap->tag = tag;
#if defined(WIN32) && (MI_ARCH_X64 || MI_ARCH_X86)
// disallow reclaim for threads running in the windows threadpool
const DWORD winVersion = GetVersion();
const DWORD winMajorVersion = (DWORD)(LOBYTE(LOWORD(winVersion)));
if (winMajorVersion >= 6) {
_TEB* const teb = NtCurrentTeb();
void* const poolData = *((void**)((uint8_t*)teb + (MI_SIZE_BITS == 32 ? 0x0F90 : 0x1778)));
if (poolData != NULL) {
heap->no_reclaim = true;
}
}
#endif
if (heap == tld->heap_backing) {
_mi_random_init(&heap->random);
}

View file

@ -400,7 +400,7 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->heap_backing = bheap;
tld->heaps = NULL;
tld->subproc = &mi_subproc_default;
tld->tseq = mi_atomic_add_acq_rel(&mi_tcount, 1);
tld->tseq = 0; // mi_atomic_add_acq_rel(&mi_tcount, 1);
tld->os.stats = &tld->stats;
}
@ -619,6 +619,7 @@ void mi_process_init(void) mi_attr_noexcept {
mi_detect_cpu_features();
_mi_os_init();
_mi_page_map_init();
_mi_arena_init();
mi_heap_main_init();
#if MI_DEBUG

View file

@ -143,7 +143,6 @@ static mi_option_desc_t options[_mi_option_last] =
{ MI_DEFAULT_ARENA_RESERVE, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ MI_DEFAULT_DISALLOW_ARENA_ALLOC, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
#if defined(MI_VISIT_ABANDONED)
@ -158,7 +157,9 @@ static mi_option_desc_t options[_mi_option_last] =
UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000)
{ 0, UNINIT, MI_OPTION(guarded_sample_seed)},
{ 0, UNINIT, MI_OPTION(target_segments_per_thread) }, // abandon segments beyond this point, or 0 to disable.
{ 1, UNINIT, MI_OPTION_LEGACY(reclaim_on_free, abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
{ 2, UNINIT, MI_OPTION(full_page_retain) },
{ 4, UNINIT, MI_OPTION(max_page_candidates) },
};
static void mi_option_init(mi_option_desc_t* desc);
@ -189,7 +190,7 @@ void _mi_options_init(void) {
}
}
_mi_verbose_message("guarded build: %s\n", mi_option_get(mi_option_guarded_sample_rate) != 0 ? "enabled" : "disabled");
#endif
#endif
}
long _mi_option_get_fast(mi_option_t option) {

View file

@ -13,9 +13,9 @@ mi_decl_cache_align uint8_t* _mi_page_map = NULL;
static bool mi_page_map_all_committed = false;
static size_t mi_page_map_entries_per_commit_bit = MI_ARENA_SLICE_SIZE;
static mi_memid_t mi_page_map_memid;
static mi_bitmap_t mi_page_map_commit;
static mi_bitmap_t mi_page_map_commit = { 1, MI_BITMAP_MIN_CHUNK_COUNT };
static bool mi_page_map_init(void) {
bool _mi_page_map_init(void) {
size_t vbits = _mi_os_virtual_address_bits();
if (vbits >= 48) vbits = 47;
// 1 byte per block = 2 GiB for 128 TiB address space (48 bit = 256 TiB address space)
@ -23,7 +23,7 @@ static bool mi_page_map_init(void) {
const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT));
mi_page_map_entries_per_commit_bit = _mi_divide_up(page_map_size, MI_BITMAP_MIN_BIT_COUNT);
mi_bitmap_init(&mi_page_map_commit, MI_BITMAP_MIN_BIT_COUNT, true);
// mi_bitmap_init(&mi_page_map_commit, MI_BITMAP_MIN_BIT_COUNT, true);
mi_page_map_all_committed = false; // _mi_os_has_overcommit(); // commit on-access on Linux systems?
_mi_page_map = (uint8_t*)_mi_os_alloc_aligned(page_map_size, 1, mi_page_map_all_committed, true, &mi_page_map_memid, NULL);
@ -57,11 +57,15 @@ static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) {
bool is_zero;
uint8_t* const start = _mi_page_map + (i*mi_page_map_entries_per_commit_bit);
const size_t size = mi_page_map_entries_per_commit_bit;
_mi_os_commit(start, size, &is_zero, NULL);
_mi_os_commit(start, size, &is_zero, NULL);
if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start,size); }
mi_bitmap_xsetN(MI_BIT_SET, &mi_page_map_commit, i, 1, NULL);
}
}
#if MI_DEBUG > 0
_mi_page_map[idx] = 0;
_mi_page_map[idx+slice_count-1] = 0;
#endif
}
}
@ -78,8 +82,9 @@ static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t*
void _mi_page_map_register(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_internal(_mi_is_aligned(page,MI_PAGE_ALIGN));
mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access!
if mi_unlikely(_mi_page_map == NULL) {
if (!mi_page_map_init()) return;
if (!_mi_page_map_init()) return;
}
mi_assert(_mi_page_map!=NULL);
uint8_t* page_start;

View file

@ -758,11 +758,6 @@ void _mi_page_init(mi_heap_t* heap, mi_page_t* page) {
Find pages with free blocks
-------------------------------------------------------------*/
// search for a best next page to use for at most N pages (often cut short if immediate blocks are available)
#define MI_MAX_CANDIDATE_SEARCH (8)
#define MI_MAX_FULL_PAGES_PER_QUEUE (4)
// Find a page with free blocks of `page->block_size`.
static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
@ -770,10 +765,8 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
#if MI_STAT
size_t count = 0;
#endif
#if MI_MAX_CANDIDATE_SEARCH > 1
size_t candidate_count = 0; // we reset this on the first candidate to limit the search
#endif
size_t full_page_count = 0;
long candidate_limit = 0; // we reset this on the first candidate to limit the search
long full_page_retain = _mi_option_get_fast(mi_option_full_page_retain);
mi_page_t* page_candidate = NULL; // a page with free space
mi_page_t* page = pq->first;
@ -783,14 +776,11 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
#if MI_STAT
count++;
#endif
#if MI_MAX_CANDIDATE_SEARCH > 1
candidate_count++;
#endif
candidate_limit--;
// collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
#if MI_MAX_CANDIDATE_SEARCH > 1
// search up to N pages for a best candidate
// is the local free list non-empty?
@ -799,8 +789,8 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
// if the page is completely full, move it to the `mi_pages_full`
// queue so we don't visit long-lived pages too often.
if (!immediate_available && !mi_page_is_expandable(page)) {
full_page_count++;
if (full_page_count > MI_MAX_FULL_PAGES_PER_QUEUE) {
full_page_retain--;
if (full_page_retain < 0) {
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page, pq);
}
@ -810,7 +800,7 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
// we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages)
if (page_candidate == NULL) {
page_candidate = page;
candidate_count = 0;
candidate_limit = _mi_option_get_fast(mi_option_max_page_candidates);
}
else if (mi_page_all_free(page_candidate)) {
_mi_page_free(page_candidate, pq);
@ -820,13 +810,14 @@ static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, m
page_candidate = page;
}
// if we find a non-expandable candidate, or searched for N pages, return with the best candidate
if (immediate_available || candidate_count > MI_MAX_CANDIDATE_SEARCH) {
if (immediate_available || candidate_limit <= 0) {
mi_assert_internal(page_candidate!=NULL);
break;
}
}
#else
// first-fit algorithm
#if 0
// first-fit algorithm without candidates
// If the page contains free blocks, we are done
if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) {
break; // pick this one