reduce type casts in atomic operations

This commit is contained in:
daan 2020-01-22 19:44:54 -08:00
parent 0f14f431c5
commit e3b16fe4ef
9 changed files with 93 additions and 92 deletions

View File

@ -23,18 +23,16 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stdatomic.h>
#endif
#define mi_atomic_cast(tp,x) (volatile _Atomic(tp)*)(x)
// ------------------------------------------------------
// Atomic operations specialized for mimalloc
// ------------------------------------------------------
// Atomically add a 64-bit value; returns the previous value.
// Note: not using _Atomic(int64_t) as it is only used for statistics.
static inline void mi_atomic_add64(volatile int64_t* p, int64_t add);
static inline void mi_atomic_addi64(volatile int64_t* p, int64_t add);
// Atomically add a value; returns the previous value. Memory ordering is relaxed.
static inline intptr_t mi_atomic_add(volatile _Atomic(intptr_t)* p, intptr_t add);
static inline uintptr_t mi_atomic_add(volatile _Atomic(uintptr_t)* p, uintptr_t add);
// Atomically "and" a value; returns the previous value. Memory ordering is relaxed.
static inline uintptr_t mi_atomic_and(volatile _Atomic(uintptr_t)* p, uintptr_t x);
@ -42,7 +40,6 @@ static inline uintptr_t mi_atomic_and(volatile _Atomic(uintptr_t)* p, uintptr_t
// Atomically "or" a value; returns the previous value. Memory ordering is relaxed.
static inline uintptr_t mi_atomic_or(volatile _Atomic(uintptr_t)* p, uintptr_t x);
// Atomically compare and exchange a value; returns `true` if successful.
// May fail spuriously. Memory ordering as release on success, and relaxed on failure.
// (Note: expected and desired are in opposite order from atomic_compare_exchange)
@ -69,57 +66,57 @@ static inline void mi_atomic_write(volatile _Atomic(uintptr_t)* p, uintptr_t x);
static inline void mi_atomic_yield(void);
// Atomically add a value; returns the previous value.
static inline uintptr_t mi_atomic_addu(volatile _Atomic(uintptr_t)* p, uintptr_t add) {
return (uintptr_t)mi_atomic_add((volatile _Atomic(intptr_t)*)p, (intptr_t)add);
}
// Atomically subtract a value; returns the previous value.
static inline uintptr_t mi_atomic_subu(volatile _Atomic(uintptr_t)* p, uintptr_t sub) {
return (uintptr_t)mi_atomic_add((volatile _Atomic(intptr_t)*)p, -((intptr_t)sub));
static inline uintptr_t mi_atomic_sub(volatile _Atomic(uintptr_t)* p, uintptr_t sub) {
return mi_atomic_add(p, (uintptr_t)(-((intptr_t)sub)));
}
// Atomically increment a value; returns the incremented result.
static inline uintptr_t mi_atomic_increment(volatile _Atomic(uintptr_t)* p) {
return mi_atomic_addu(p, 1);
return mi_atomic_add(p, 1);
}
// Atomically decrement a value; returns the decremented result.
static inline uintptr_t mi_atomic_decrement(volatile _Atomic(uintptr_t)* p) {
return mi_atomic_subu(p, 1);
return mi_atomic_sub(p, 1);
}
// Atomically read a pointer; Memory order is relaxed.
static inline void* mi_atomic_read_ptr_relaxed(volatile _Atomic(void*) const * p) {
return (void*)mi_atomic_read_relaxed((const volatile _Atomic(uintptr_t)*)p);
// Atomically add a signed value; returns the previous value.
static inline intptr_t mi_atomic_addi(volatile _Atomic(intptr_t)* p, intptr_t add) {
return (intptr_t)mi_atomic_add((volatile _Atomic(uintptr_t)*)p, (uintptr_t)add);
}
// Atomically subtract a signed value; returns the previous value.
static inline intptr_t mi_atomic_subi(volatile _Atomic(intptr_t)* p, intptr_t sub) {
return (intptr_t)mi_atomic_addi(p,-sub);
}
// Atomically read a pointer; Memory order is relaxed (i.e. no fence, only atomic).
#define mi_atomic_read_ptr_relaxed(T,p) \
(T*)(mi_atomic_read_relaxed((const volatile _Atomic(uintptr_t)*)(p)))
// Atomically read a pointer; Memory order is acquire.
static inline void* mi_atomic_read_ptr(volatile _Atomic(void*) const * p) {
return (void*)mi_atomic_read((const volatile _Atomic(uintptr_t)*)p);
}
#define mi_atomic_read_ptr(T,p) \
(T*)(mi_atomic_read((const volatile _Atomic(uintptr_t)*)(p)))
// Atomically write a pointer
static inline void mi_atomic_write_ptr(volatile _Atomic(void*)* p, void* x) {
mi_atomic_write((volatile _Atomic(uintptr_t)*)p, (uintptr_t)x );
}
// Atomically write a pointer; Memory order is acquire.
#define mi_atomic_write_ptr(T,p,x) \
mi_atomic_write((volatile _Atomic(uintptr_t)*)(p), (uintptr_t)((T*)x))
// Atomically compare and exchange a pointer; returns `true` if successful. May fail spuriously.
// Memory order is release. (like a write)
// (Note: expected and desired are in opposite order from atomic_compare_exchange)
static inline bool mi_atomic_cas_ptr_weak(volatile _Atomic(void*)* p, void* desired, void* expected) {
return mi_atomic_cas_weak((volatile _Atomic(uintptr_t)*)p, (uintptr_t)desired, (uintptr_t)expected);
}
#define mi_atomic_cas_ptr_weak(T,p,desired,expected) \
mi_atomic_cas_weak((volatile _Atomic(uintptr_t)*)(p), (uintptr_t)((T*)(desired)), (uintptr_t)((T*)(expected)))
// Atomically compare and exchange a pointer; returns `true` if successful.
// Atomically compare and exchange a pointer; returns `true` if successful. Memory order is acquire_release.
// (Note: expected and desired are in opposite order from atomic_compare_exchange)
static inline bool mi_atomic_cas_ptr_strong(volatile _Atomic(void*)* p, void* desired, void* expected) {
return mi_atomic_cas_strong((volatile _Atomic(uintptr_t)*)p, (uintptr_t)desired, (uintptr_t)expected);
}
#define mi_atomic_cas_ptr_strong(T,p,desired,expected) \
mi_atomic_cas_strong((volatile _Atomic(uintptr_t)*)(p),(uintptr_t)((T*)(desired)), (uintptr_t)((T*)(expected)))
// Atomically exchange a pointer value.
static inline void* mi_atomic_exchange_ptr(volatile _Atomic(void*)* p, void* exchange) {
return (void*)mi_atomic_exchange((volatile _Atomic(uintptr_t)*)p, (uintptr_t)exchange);
}
#define mi_atomic_exchange_ptr(T,p,exchange) \
(T*)mi_atomic_exchange((volatile _Atomic(uintptr_t)*)(p), (uintptr_t)((T*)exchange))
#ifdef _MSC_VER
@ -133,8 +130,8 @@ typedef LONG64 msc_intptr_t;
typedef LONG msc_intptr_t;
#define MI_64(f) f
#endif
static inline intptr_t mi_atomic_add(volatile _Atomic(intptr_t)* p, intptr_t add) {
return (intptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
static inline uintptr_t mi_atomic_add(volatile _Atomic(uintptr_t)* p, uintptr_t add) {
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
}
static inline uintptr_t mi_atomic_and(volatile _Atomic(uintptr_t)* p, uintptr_t x) {
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
@ -155,17 +152,21 @@ static inline uintptr_t mi_atomic_read(volatile _Atomic(uintptr_t) const* p) {
return *p;
}
static inline uintptr_t mi_atomic_read_relaxed(volatile _Atomic(uintptr_t) const* p) {
return mi_atomic_read(p);
return *p;
}
static inline void mi_atomic_write(volatile _Atomic(uintptr_t)* p, uintptr_t x) {
#if defined(_M_IX86) || defined(_M_X64)
*p = x;
#else
mi_atomic_exchange(p,x);
#endif
}
static inline void mi_atomic_yield(void) {
YieldProcessor();
}
static inline void mi_atomic_add64(volatile _Atomic(int64_t)* p, int64_t add) {
static inline void mi_atomic_addi64(volatile _Atomic(int64_t)* p, int64_t add) {
#ifdef _WIN64
mi_atomic_add(p,add);
mi_atomic_addi(p,add);
#else
int64_t current;
int64_t sum;
@ -182,11 +183,11 @@ static inline void mi_atomic_add64(volatile _Atomic(int64_t)* p, int64_t add) {
#else
#define MI_USING_STD
#endif
static inline void mi_atomic_add64(volatile int64_t* p, int64_t add) {
static inline void mi_atomic_addi64(volatile int64_t* p, int64_t add) {
MI_USING_STD
atomic_fetch_add_explicit((volatile _Atomic(int64_t)*)p, add, memory_order_relaxed);
}
static inline intptr_t mi_atomic_add(volatile _Atomic(intptr_t)* p, intptr_t add) {
static inline uintptr_t mi_atomic_add(volatile _Atomic(uintptr_t)* p, uintptr_t add) {
MI_USING_STD
return atomic_fetch_add_explicit(p, add, memory_order_relaxed);
}

View File

@ -239,9 +239,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
mi_block_t* dfree;
do {
dfree = (mi_block_t*)heap->thread_delayed_free;
dfree = mi_atomic_read_ptr_relaxed(mi_block_t,&heap->thread_delayed_free);
mi_block_set_nextx(heap,block,dfree, heap->key[0], heap->key[1]);
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
} while (!mi_atomic_cas_ptr_weak(mi_block_t,&heap->thread_delayed_free, block, dfree));
}
// and reset the MI_DELAYED_FREEING flag

View File

@ -55,7 +55,7 @@ bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
// A memory arena descriptor
typedef struct mi_arena_s {
uint8_t* start; // the start of the memory area
_Atomic(uint8_t*) start; // the start of the memory area
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
int numa_node; // associated NUMA node
@ -173,7 +173,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
// try numa affine allocation
for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[i]));
mi_arena_t* arena = mi_atomic_read_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena==NULL) break; // end reached
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
@ -185,7 +185,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
}
// try from another numa node instead..
for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[i]));
mi_arena_t* arena = mi_atomic_read_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
if (arena==NULL) break; // end reached
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
@ -226,7 +226,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats) {
size_t bitmap_idx;
mi_arena_id_indices(memid, &arena_idx, &bitmap_idx);
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[arena_idx]));
mi_arena_t* arena = mi_atomic_read_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL);
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
@ -252,15 +252,15 @@ void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats) {
static bool mi_arena_add(mi_arena_t* arena) {
mi_assert_internal(arena != NULL);
mi_assert_internal((uintptr_t)arena->start % MI_SEGMENT_ALIGN == 0);
mi_assert_internal((uintptr_t)mi_atomic_read_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
mi_assert_internal(arena->block_count > 0);
uintptr_t i = mi_atomic_addu(&mi_arena_count,1);
uintptr_t i = mi_atomic_increment(&mi_arena_count);
if (i >= MI_MAX_ARENAS) {
mi_atomic_subu(&mi_arena_count, 1);
mi_atomic_decrement(&mi_arena_count);
return false;
}
mi_atomic_write_ptr(mi_atomic_cast(void*,&mi_arenas[i]), arena);
mi_atomic_write_ptr(mi_arena_t,&mi_arenas[i], arena);
return true;
}

View File

@ -125,7 +125,7 @@ bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
if (p==NULL) return false;
size_t count = mi_atomic_read_relaxed(&regions_count);
for (size_t i = 0; i < count; i++) {
uint8_t* start = (uint8_t*)mi_atomic_read_ptr_relaxed(&regions[i].start);
uint8_t* start = mi_atomic_read_ptr_relaxed(uint8_t,&regions[i].start);
if (start != NULL && (uint8_t*)p >= start && (uint8_t*)p < start + MI_REGION_SIZE) return true;
}
return false;
@ -133,9 +133,9 @@ bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
static void* mi_region_blocks_start(const mem_region_t* region, mi_bitmap_index_t bit_idx) {
void* start = mi_atomic_read_ptr(&region->start);
uint8_t* start = mi_atomic_read_ptr(uint8_t,&region->start);
mi_assert_internal(start != NULL);
return ((uint8_t*)start + (bit_idx * MI_SEGMENT_SIZE));
return (start + (bit_idx * MI_SEGMENT_SIZE));
}
static size_t mi_memid_create(mem_region_t* region, mi_bitmap_index_t bit_idx) {
@ -200,7 +200,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
mi_atomic_write(&r->reset, 0);
*bit_idx = 0;
mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL);
mi_atomic_write_ptr(&r->start, start);
mi_atomic_write_ptr(uint8_t*,&r->start, start);
// and share it
mi_region_info_t info;
@ -277,14 +277,14 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
mi_region_info_t info;
info.value = mi_atomic_read(&region->info);
void* start = mi_atomic_read_ptr(&region->start);
uint8_t* start = mi_atomic_read_ptr(uint8_t,&region->start);
mi_assert_internal(!(info.x.is_large && !*is_large));
mi_assert_internal(start != NULL);
*is_zero = mi_bitmap_unclaim(&region->dirty, 1, blocks, bit_idx);
*is_large = info.x.is_large;
*memid = mi_memid_create(region, bit_idx);
void* p = (uint8_t*)start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
// commit
if (*commit) {
@ -446,7 +446,7 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
} while(m == 0 && !mi_atomic_cas_weak(&region->in_use, MI_BITMAP_FIELD_FULL, 0 ));
if (m == 0) {
// on success, free the whole region
void* start = mi_atomic_read_ptr(&regions[i].start);
uint8_t* start = mi_atomic_read_ptr(uint8_t,&regions[i].start);
size_t arena_memid = mi_atomic_read_relaxed(&regions[i].arena_memid);
memset(&regions[i], 0, sizeof(mem_region_t));
// and release the whole region

View File

@ -169,7 +169,7 @@ static void mi_out_buf(const char* msg, void* arg) {
size_t n = strlen(msg);
if (n==0) return;
// claim space
uintptr_t start = mi_atomic_addu(&out_len, n);
uintptr_t start = mi_atomic_add(&out_len, n);
if (start >= MI_MAX_DELAY_OUTPUT) return;
// check bound
if (start+n >= MI_MAX_DELAY_OUTPUT) {
@ -181,7 +181,7 @@ static void mi_out_buf(const char* msg, void* arg) {
static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) {
if (out==NULL) return;
// claim (if `no_more_buf == true`, no more output will be added after this point)
size_t count = mi_atomic_addu(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1));
size_t count = mi_atomic_add(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1));
// and output the current contents
if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT;
out_buf[count] = 0;
@ -212,14 +212,14 @@ static mi_output_fun* volatile mi_out_default; // = NULL
static volatile _Atomic(void*) mi_out_arg; // = NULL
static mi_output_fun* mi_out_get_default(void** parg) {
if (parg != NULL) { *parg = mi_atomic_read_ptr(&mi_out_arg); }
if (parg != NULL) { *parg = mi_atomic_read_ptr(void,&mi_out_arg); }
mi_output_fun* out = mi_out_default;
return (out == NULL ? &mi_out_buf : out);
}
void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept {
mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer
mi_atomic_write_ptr(&mi_out_arg, arg);
mi_atomic_write_ptr(void,&mi_out_arg, arg);
if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now
}
@ -328,7 +328,7 @@ static void mi_error_default(int err) {
void mi_register_error(mi_error_fun* fun, void* arg) {
mi_error_handler = fun; // can be NULL
mi_atomic_write_ptr(&mi_error_arg, arg);
mi_atomic_write_ptr(void,&mi_error_arg, arg);
}
void _mi_error_message(int err, const char* fmt, ...) {
@ -339,7 +339,7 @@ void _mi_error_message(int err, const char* fmt, ...) {
va_end(args);
// and call the error handler which may abort (or return normally)
if (mi_error_handler != NULL) {
mi_error_handler(err, mi_atomic_read_ptr(&mi_error_arg));
mi_error_handler(err, mi_atomic_read_ptr(void,&mi_error_arg));
}
else {
mi_error_default(err);

View File

@ -397,20 +397,20 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
// On 64-bit systems, we can do efficient aligned allocation by using
// the 4TiB to 30TiB area to allocate them.
#if (MI_INTPTR_SIZE >= 8) && (defined(_WIN32) || (defined(MI_OS_USE_MMAP) && !defined(MAP_ALIGNED)))
static volatile _Atomic(intptr_t) aligned_base;
static volatile _Atomic(uintptr_t) aligned_base;
// Return a 4MiB aligned address that is probably available
static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL;
if ((size%MI_SEGMENT_SIZE) != 0) return NULL;
intptr_t hint = mi_atomic_add(&aligned_base, size);
uintptr_t hint = mi_atomic_add(&aligned_base, size);
if (hint == 0 || hint > ((intptr_t)30<<40)) { // try to wrap around after 30TiB (area after 32TiB is used for huge OS pages)
intptr_t init = ((intptr_t)4 << 40); // start at 4TiB area
uintptr_t init = ((uintptr_t)4 << 40); // start at 4TiB area
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
uintptr_t r = _mi_heap_random_next(mi_get_default_heap());
init = init + (MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)); // (randomly 20 bits)*4MiB == 0 to 4TiB
#endif
mi_atomic_cas_strong(mi_atomic_cast(uintptr_t, &aligned_base), init, hint + size);
mi_atomic_cas_strong(&aligned_base, init, hint + size);
hint = mi_atomic_add(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all
}
if (hint%try_alignment != 0) return NULL;

View File

@ -278,11 +278,11 @@ static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
(put there by other threads if they deallocated in a full page)
----------------------------------------------------------- */
void _mi_heap_delayed_free(mi_heap_t* heap) {
// take over the list
// take over the list (note: no atomic exchange is it is often NULL)
mi_block_t* block;
do {
block = (mi_block_t*)heap->thread_delayed_free;
} while (block != NULL && !mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), NULL, block));
block = mi_atomic_read_ptr_relaxed(mi_block_t,&heap->thread_delayed_free);
} while (block != NULL && !mi_atomic_cas_ptr_weak(mi_block_t,&heap->thread_delayed_free, NULL, block));
// and free them all
while(block != NULL) {
@ -293,9 +293,9 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
// reset the delayed_freeing flag; in that case delay it further by reinserting.
mi_block_t* dfree;
do {
dfree = (mi_block_t*)heap->thread_delayed_free;
dfree = mi_atomic_read_ptr_relaxed(mi_block_t,&heap->thread_delayed_free);
mi_block_set_nextx(heap, block, dfree, heap->key[0], heap->key[1]);
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
} while (!mi_atomic_cas_ptr_weak(mi_block_t,&heap->thread_delayed_free, block, dfree));
}
block = next;
}
@ -728,14 +728,14 @@ void _mi_deferred_free(mi_heap_t* heap, bool force) {
heap->tld->heartbeat++;
if (deferred_free != NULL && !heap->tld->recurse) {
heap->tld->recurse = true;
deferred_free(force, heap->tld->heartbeat, mi_atomic_read_ptr_relaxed(&deferred_arg));
deferred_free(force, heap->tld->heartbeat, mi_atomic_read_ptr_relaxed(void,&deferred_arg));
heap->tld->recurse = false;
}
}
void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept {
deferred_free = fn;
mi_atomic_write_ptr(&deferred_arg, arg);
mi_atomic_write_ptr(void,&deferred_arg, arg);
}

View File

@ -822,7 +822,7 @@ static void mi_segments_prepend_abandoned(mi_segment_t* first) {
if (first == NULL) return;
// first try if the abandoned list happens to be NULL
if (mi_atomic_cas_ptr_weak(mi_atomic_cast(void*, &abandoned), first, NULL)) return;
if (mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned, first, NULL)) return;
// if not, find the end of the list
mi_segment_t* last = first;
@ -833,9 +833,9 @@ static void mi_segments_prepend_abandoned(mi_segment_t* first) {
// and atomically prepend
mi_segment_t* next;
do {
next = (mi_segment_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &abandoned));
next = mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned);
last->abandoned_next = next;
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*, &abandoned), first, next));
} while (!mi_atomic_cas_ptr_weak(mi_segment_t, &abandoned, first, next));
}
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
@ -877,9 +877,9 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld) {
// To avoid the A-B-A problem, grab the entire list atomically
mi_segment_t* segment = (mi_segment_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &abandoned)); // pre-read to avoid expensive atomic operations
mi_segment_t* segment = mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned); // pre-read to avoid expensive atomic operations
if (segment == NULL) return false;
segment = (mi_segment_t*)mi_atomic_exchange_ptr(mi_atomic_cast(void*, &abandoned), NULL);
segment = mi_atomic_exchange_ptr(mi_segment_t, &abandoned, NULL);
if (segment == NULL) return false;
// we got a non-empty list

View File

@ -26,13 +26,13 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
if (mi_is_in_main(stat))
{
// add atomically (for abandoned pages)
mi_atomic_add64(&stat->current,amount);
mi_atomic_addi64(&stat->current,amount);
if (stat->current > stat->peak) stat->peak = stat->current; // racing.. it's ok
if (amount > 0) {
mi_atomic_add64(&stat->allocated,amount);
mi_atomic_addi64(&stat->allocated,amount);
}
else {
mi_atomic_add64(&stat->freed, -amount);
mi_atomic_addi64(&stat->freed, -amount);
}
}
else {
@ -50,8 +50,8 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
if (mi_is_in_main(stat)) {
mi_atomic_add64( &stat->count, 1 );
mi_atomic_add64( &stat->total, (int64_t)amount );
mi_atomic_addi64( &stat->count, 1 );
mi_atomic_addi64( &stat->total, (int64_t)amount );
}
else {
stat->count++;
@ -70,17 +70,17 @@ void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) {
// must be thread safe as it is called from stats_merge
static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) {
if (stat==src) return;
mi_atomic_add64( &stat->allocated, src->allocated * unit);
mi_atomic_add64( &stat->current, src->current * unit);
mi_atomic_add64( &stat->freed, src->freed * unit);
mi_atomic_addi64( &stat->allocated, src->allocated * unit);
mi_atomic_addi64( &stat->current, src->current * unit);
mi_atomic_addi64( &stat->freed, src->freed * unit);
// peak scores do not work across threads..
mi_atomic_add64( &stat->peak, src->peak * unit);
mi_atomic_addi64( &stat->peak, src->peak * unit);
}
static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) {
if (stat==src) return;
mi_atomic_add64( &stat->total, src->total * unit);
mi_atomic_add64( &stat->count, src->count * unit);
mi_atomic_addi64( &stat->total, src->total * unit);
mi_atomic_addi64( &stat->count, src->count * unit);
}
// must be thread safe as it is called from stats_merge