mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-05-04 14:39:31 +03:00
make mimalloc compile with the highest warning level on msvc
This commit is contained in:
parent
b84b11b6a9
commit
6b7356a10a
3 changed files with 13 additions and 12 deletions
|
@ -111,7 +111,7 @@
|
||||||
</ItemDefinitionGroup>
|
</ItemDefinitionGroup>
|
||||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||||
<ClCompile>
|
<ClCompile>
|
||||||
<WarningLevel>Level2</WarningLevel>
|
<WarningLevel>Level4</WarningLevel>
|
||||||
<Optimization>Disabled</Optimization>
|
<Optimization>Disabled</Optimization>
|
||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<ConformanceMode>true</ConformanceMode>
|
<ConformanceMode>true</ConformanceMode>
|
||||||
|
|
|
@ -21,6 +21,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
|
#pragma warning(disable:4127) // constant conditional due to MI_SECURE paths
|
||||||
#define mi_decl_noinline __declspec(noinline)
|
#define mi_decl_noinline __declspec(noinline)
|
||||||
#define mi_attr_noreturn
|
#define mi_attr_noreturn
|
||||||
#elif defined(__GNUC__) || defined(__clang__)
|
#elif defined(__GNUC__) || defined(__clang__)
|
||||||
|
|
22
src/memory.c
22
src/memory.c
|
@ -80,7 +80,7 @@ typedef union mi_region_info_u {
|
||||||
bool valid;
|
bool valid;
|
||||||
bool is_large;
|
bool is_large;
|
||||||
short numa_node;
|
short numa_node;
|
||||||
};
|
} x;
|
||||||
} mi_region_info_t;
|
} mi_region_info_t;
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,9 +204,9 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
||||||
|
|
||||||
// and share it
|
// and share it
|
||||||
mi_region_info_t info;
|
mi_region_info_t info;
|
||||||
info.valid = true;
|
info.x.valid = true;
|
||||||
info.is_large = region_large;
|
info.x.is_large = region_large;
|
||||||
info.numa_node = _mi_os_numa_node(tld);
|
info.x.numa_node = (short)_mi_os_numa_node(tld);
|
||||||
mi_atomic_write(&r->info, info.value); // now make it available to others
|
mi_atomic_write(&r->info, info.value); // now make it available to others
|
||||||
*region = r;
|
*region = r;
|
||||||
return true;
|
return true;
|
||||||
|
@ -224,12 +224,12 @@ static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, boo
|
||||||
|
|
||||||
// numa correct
|
// numa correct
|
||||||
if (numa_node >= 0) { // use negative numa node to always succeed
|
if (numa_node >= 0) { // use negative numa node to always succeed
|
||||||
int rnode = info.numa_node;
|
int rnode = info.x.numa_node;
|
||||||
if (rnode >= 0 && rnode != numa_node) return false;
|
if (rnode >= 0 && rnode != numa_node) return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check allow-large
|
// check allow-large
|
||||||
if (!allow_large && info.is_large) return false;
|
if (!allow_large && info.x.is_large) return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -278,11 +278,11 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
mi_region_info_t info;
|
mi_region_info_t info;
|
||||||
info.value = mi_atomic_read(®ion->info);
|
info.value = mi_atomic_read(®ion->info);
|
||||||
void* start = mi_atomic_read_ptr(®ion->start);
|
void* start = mi_atomic_read_ptr(®ion->start);
|
||||||
mi_assert_internal(!(info.is_large && !*is_large));
|
mi_assert_internal(!(info.x.is_large && !*is_large));
|
||||||
mi_assert_internal(start != NULL);
|
mi_assert_internal(start != NULL);
|
||||||
|
|
||||||
*is_zero = mi_bitmap_unclaim(®ion->dirty, 1, blocks, bit_idx);
|
*is_zero = mi_bitmap_unclaim(®ion->dirty, 1, blocks, bit_idx);
|
||||||
*is_large = info.is_large;
|
*is_large = info.x.is_large;
|
||||||
*memid = mi_memid_create(region, bit_idx);
|
*memid = mi_memid_create(region, bit_idx);
|
||||||
void* p = (uint8_t*)start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
|
void* p = (uint8_t*)start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
bool any_uncommitted;
|
bool any_uncommitted;
|
||||||
mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted);
|
mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted);
|
||||||
if (any_uncommitted) {
|
if (any_uncommitted) {
|
||||||
mi_assert_internal(!info.is_large);
|
mi_assert_internal(!info.x.is_large);
|
||||||
bool commit_zero;
|
bool commit_zero;
|
||||||
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
|
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
|
||||||
if (commit_zero) *is_zero = true;
|
if (commit_zero) *is_zero = true;
|
||||||
|
@ -307,7 +307,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
|
||||||
// unreset reset blocks
|
// unreset reset blocks
|
||||||
if (mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
if (mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) {
|
||||||
// some blocks are still reset
|
// some blocks are still reset
|
||||||
mi_assert_internal(!info.is_large);
|
mi_assert_internal(!info.x.is_large);
|
||||||
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
|
||||||
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx);
|
||||||
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
|
||||||
|
@ -412,7 +412,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset the blocks to reduce the working set.
|
// reset the blocks to reduce the working set.
|
||||||
if (!info.is_large && mi_option_is_enabled(mi_option_segment_reset)
|
if (!info.x.is_large && mi_option_is_enabled(mi_option_segment_reset)
|
||||||
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
&& (mi_option_is_enabled(mi_option_eager_commit) ||
|
||||||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue