123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816 |
- #define ASM_DMB_ISH "dmb ish\n\t"
- #if defined(__ARM_ARCH_7S__)
- // this is sufficient for Swift processors
- # define ASM_REL "dmb ishst\n\t"
- #else
- # define ASM_REL "dmb ish\n\t"
- #endif
- static inline void atomic_thread_fence(memory_order_relaxed_t)
- {
- }
- static inline void atomic_thread_fence(memory_order_acquire_t)
- {
- __asm__ __volatile__ ("dmb ld\n\t" : : : "memory");
- }
- static inline void atomic_thread_fence(memory_order_release_t)
- {
- __asm__ __volatile__ (ASM_REL : : : "memory");
- }
- static inline void atomic_thread_fence(memory_order_acq_rel_t)
- {
- __asm__ __volatile__ (ASM_DMB_ISH : : : "memory");
- }
- static inline void atomic_thread_fence(int /* memory_order_seq_cst_t */)
- {
- __asm__ __volatile__ (ASM_DMB_ISH : : : "memory");
- }
- #define ATOMIC_LOAD(opc) \
- atomic_word res; \
- __asm__ __volatile__ \
- ( \
- opc " %0, %1\n\t" \
- : "=r" (res) \
- : "m" (*p) \
- ); \
- return res;
- /*
- * int support
- */
- static inline int atomic_load_explicit(const volatile int* p, memory_order_relaxed_t)
- {
- int res;
- __asm__ __volatile__
- (
- "ldr %w0, %1\n\t"
- : "=r" (res)
- : "m" (*p)
- );
- return res;
- }
- static inline int atomic_load_explicit(const volatile int* p, memory_order_acquire_t)
- {
- int res;
- __asm__ __volatile__
- (
- "ldar %w0, %1\n\t"
- : "=r" (res)
- : "m" (*p)
- );
- return res;
- }
- static inline int atomic_load_explicit(const volatile int* p, int /* memory_order_seq_cst_t */)
- {
- int res;
- __asm__ __volatile__
- (
- "ldar %w0, %1\n\t"
- : "=r" (res)
- : "m" (*p)
- );
- return res;
- }
- /*
- * native word support
- */
- static inline atomic_word atomic_load_explicit(const volatile atomic_word* p, memory_order_relaxed_t)
- {
- ATOMIC_LOAD("ldr")
- }
- static inline atomic_word atomic_load_explicit(const volatile atomic_word* p, memory_order_acquire_t)
- {
- ATOMIC_LOAD("ldar")
- }
- static inline atomic_word atomic_load_explicit(const volatile atomic_word* p, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_LOAD("ldar")
- }
- #define ATOMIC_STORE(opc) \
- __asm__ __volatile__ \
- ( \
- opc " %1, %0\n\t" \
- : "=m" (*p) \
- : "r" (v) \
- : "memory" \
- );
- /*
- * int support
- */
- static inline void atomic_store_explicit(volatile int* p, int v, memory_order_relaxed_t)
- {
- __asm__ __volatile__
- (
- "str %w1, %0\n\t"
- : "=m" (*p)
- : "r" (v)
- : "memory"
- );
- }
- static inline void atomic_store_explicit(volatile int* p, int v, memory_order_release_t)
- {
- __asm__ __volatile__
- (
- "stlr %w1, %0\n\t"
- : "=m" (*p)
- : "r" (v)
- : "memory"
- );
- }
- static inline void atomic_store_explicit(volatile int* p, int v, int /* memory_order_seq_cst_t */)
- {
- __asm__ __volatile__
- (
- "stlr %w1, %0\n\t"
- : "=m" (*p)
- : "r" (v)
- : "memory"
- );
- }
- /*
- * native word support
- */
- static inline void atomic_store_explicit(volatile atomic_word* p, atomic_word v, memory_order_relaxed_t)
- {
- ATOMIC_STORE("str")
- }
- static inline void atomic_store_explicit(volatile atomic_word* p, atomic_word v, memory_order_release_t)
- {
- ATOMIC_STORE("stlr")
- }
- static inline void atomic_store_explicit(volatile atomic_word* p, atomic_word v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_STORE("stlr")
- }
- #define ATOMIC_PFIX_int "%w"
- #define ATOMIC_PFIX_atomic_word "%"
- #define ATOMIC_PFIX(WORD) ATOMIC_PFIX_##WORD
- #define ATOMIC_XCHG(WORD, LD, ST) \
- atomic_word res; \
- atomic_word success; \
- __asm__ __volatile__ \
- ( \
- "0:\n\t" \
- LD " " ATOMIC_PFIX(WORD) "2, [%4]\n\t" \
- ST " %w0, " ATOMIC_PFIX(WORD) "3, [%4]\n\t" \
- "cbnz %w0, 0b\n\t" \
- : "=&r" (success), "+m" (*p), "=&r" (res) \
- : "r" (v), "r" (p) \
- : "memory" \
- ); \
- return res;
- /*
- * int support
- */
- static inline int atomic_exchange_explicit(volatile int* p, int v, memory_order_relaxed_t)
- {
- ATOMIC_XCHG(int, "ldxr", "stxr")
- }
- static inline int atomic_exchange_explicit(volatile int* p, int v, memory_order_acquire_t)
- {
- ATOMIC_XCHG(int, "ldaxr", "stxr")
- }
- static inline int atomic_exchange_explicit(volatile int* p, int v, memory_order_release_t)
- {
- ATOMIC_XCHG(int, "ldxr", "stlxr")
- }
- static inline int atomic_exchange_explicit(volatile int* p, int v, memory_order_acq_rel_t)
- {
- ATOMIC_XCHG(int, "ldaxr", "stlxr")
- }
- static inline int atomic_exchange_explicit(volatile int* p, int v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_XCHG(int, "ldaxr", "stlxr")
- }
- /*
- * native word support
- */
- static inline atomic_word atomic_exchange_explicit(volatile atomic_word* p, atomic_word v, memory_order_relaxed_t)
- {
- ATOMIC_XCHG(atomic_word, "ldxr", "stxr")
- }
- static inline atomic_word atomic_exchange_explicit(volatile atomic_word* p, atomic_word v, memory_order_acquire_t)
- {
- ATOMIC_XCHG(atomic_word, "ldaxr", "stxr")
- }
- static inline atomic_word atomic_exchange_explicit(volatile atomic_word* p, atomic_word v, memory_order_release_t)
- {
- ATOMIC_XCHG(atomic_word, "ldxr", "stlxr")
- }
- static inline atomic_word atomic_exchange_explicit(volatile atomic_word* p, atomic_word v, memory_order_acq_rel_t)
- {
- ATOMIC_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline atomic_word atomic_exchange_explicit(volatile atomic_word* p, atomic_word v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- // atomic_compare_exchange_weak_explicit: can fail spuriously even if *p == *oldval
- #undef ATOMIC_CMP_XCHG
- #define ATOMIC_CMP_XCHG(WORD, LD, ST) \
- atomic_word res; \
- atomic_word failure = 1; \
- __asm__ __volatile__ \
- ( \
- LD " " ATOMIC_PFIX(WORD) "2, [%4] \n\t" \
- "cmp " ATOMIC_PFIX(WORD) "2, " ATOMIC_PFIX(WORD) "5 \n\t" \
- "b.ne 1f \n\t" \
- ST " %w0, " ATOMIC_PFIX(WORD) "3, [%4] \n\t" \
- "1: \n\t" \
- "clrex \n\t" \
- : "+&r" (failure), "+m" (*p), "=&r" (res) \
- : "r" (newval), "r" (p), "r" (*oldval) \
- : "cc", "memory" \
- ); \
- *oldval = res; \
- return (failure == 0);
- /*
- * int support
- */
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_relaxed_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_acquire_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_release_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_acq_rel_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, int /* memory_order_seq_cst_t */, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_acquire_t, memory_order_acquire_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_release_t, memory_order_release_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, memory_order_acq_rel_t, memory_order_acq_rel_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile int* p, int *oldval, int newval, int /* memory_order_seq_cst_t */, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- /*
- * native word support
- */
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_relaxed_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acquire_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_release_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acq_rel_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, int /* memory_order_seq_cst_t */, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acquire_t, memory_order_acquire_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_release_t, memory_order_release_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acq_rel_t, memory_order_acq_rel_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, int /* memory_order_seq_cst_t */, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- // atomic_compare_exchange_strong_explicit: does loop and only returns false if *p != *oldval
- #undef ATOMIC_CMP_XCHG
- #define ATOMIC_CMP_XCHG(WORD, LD, ST) \
- atomic_word res; \
- atomic_word failure = 1; \
- __asm__ __volatile__ \
- ( \
- "0: \n\t" \
- "mov %w0, #1 \n\t" /* reset failure each loop */ \
- LD " " ATOMIC_PFIX(WORD) "2, [%4] \n\t" \
- "cmp " ATOMIC_PFIX(WORD) "2, " ATOMIC_PFIX(WORD) "5 \n\t" \
- "b.ne 1f \n\t" \
- ST " %w0, " ATOMIC_PFIX(WORD) "3, [%4] \n\t" \
- "cbnz %w0, 0b \n\t" \
- "1: \n\t" \
- "clrex \n\t" \
- : "+&r" (failure), "+m" (*p), "=&r" (res) \
- : "r" (newval), "r" (p), "r" (*oldval) \
- : "cc", "memory" \
- ); \
- *oldval = res; \
- return (failure == 0);
- /*
- * int support
- */
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_relaxed_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_acquire_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_release_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_acq_rel_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, int /* memory_order_seq_cst_t */, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_acquire_t, memory_order_acquire_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_release_t, memory_order_release_t)
- {
- ATOMIC_CMP_XCHG(int, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, memory_order_acq_rel_t, memory_order_acq_rel_t)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile int* p, int *oldval, int newval, int /* memory_order_seq_cst_t */, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_CMP_XCHG(int, "ldaxr", "stlxr")
- }
- /*
- * native word support
- */
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_relaxed_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acquire_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_release_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acq_rel_t, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, int /* memory_order_seq_cst_t */, memory_order_relaxed_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acquire_t, memory_order_acquire_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_release_t, memory_order_release_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, memory_order_acq_rel_t, memory_order_acq_rel_t)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word* p, atomic_word *oldval, atomic_word newval, int /* memory_order_seq_cst_t */, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_CMP_XCHG(atomic_word, "ldaxr", "stlxr")
- }
- #define ATOMIC_OP(WORD, LD, ST, OP) \
- long long res, tmp; \
- int success; \
- __asm__ __volatile__ \
- ( \
- "0: \n\t" \
- LD " " ATOMIC_PFIX(WORD) "2, [%5] \n\t" \
- OP " " ATOMIC_PFIX(WORD) "3, " ATOMIC_PFIX(WORD) "2, " ATOMIC_PFIX(WORD) "4 \n\t" \
- ST " %w0, " ATOMIC_PFIX(WORD) "3, [%5] \n\t" \
- "cbnz %w0, 0b \n\t" \
- : "=&r" (success), "+m" (*p), "=&r" (res), "=&r" (tmp) \
- : "Ir" ((long long) v), "r" (p) \
- : "cc", "memory" \
- ); \
- return (WORD) res;
- /*
- * int support
- */
- static inline int atomic_fetch_add_explicit(volatile int* p, int v, memory_order_relaxed_t)
- {
- ATOMIC_OP(int, "ldxr", "stxr", "add")
- }
- static inline int atomic_fetch_add_explicit(volatile int* p, int v, memory_order_acquire_t)
- {
- ATOMIC_OP(int, "ldaxr", "stxr", "add")
- }
- static inline int atomic_fetch_add_explicit(volatile int* p, int v, memory_order_release_t)
- {
- ATOMIC_OP(int, "ldxr", "stlxr", "add")
- }
- static inline int atomic_fetch_add_explicit(volatile int* p, int v, memory_order_acq_rel_t)
- {
- ATOMIC_OP(int, "ldaxr", "stlxr", "add")
- }
- static inline int atomic_fetch_add_explicit(volatile int* p, int v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_OP(int, "ldaxr", "stlxr", "add")
- }
- /*
- * native word support
- */
- static inline atomic_word atomic_fetch_add_explicit(volatile atomic_word* p, atomic_word v, memory_order_relaxed_t)
- {
- ATOMIC_OP(atomic_word, "ldxr", "stxr", "add")
- }
- static inline atomic_word atomic_fetch_add_explicit(volatile atomic_word* p, atomic_word v, memory_order_acquire_t)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stxr", "add")
- }
- static inline atomic_word atomic_fetch_add_explicit(volatile atomic_word* p, atomic_word v, memory_order_release_t)
- {
- ATOMIC_OP(atomic_word, "ldxr", "stlxr", "add")
- }
- static inline atomic_word atomic_fetch_add_explicit(volatile atomic_word* p, atomic_word v, memory_order_acq_rel_t)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stlxr", "add")
- }
- static inline atomic_word atomic_fetch_add_explicit(volatile atomic_word* p, atomic_word v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stlxr", "add")
- }
- /*
- * int support
- */
- static inline int atomic_fetch_sub_explicit(volatile int* p, int v, memory_order_relaxed_t)
- {
- ATOMIC_OP(int, "ldxr", "stxr", "sub")
- }
- static inline int atomic_fetch_sub_explicit(volatile int* p, int v, memory_order_acquire_t)
- {
- ATOMIC_OP(int, "ldaxr", "stxr", "sub")
- }
- static inline int atomic_fetch_sub_explicit(volatile int* p, int v, memory_order_release_t)
- {
- ATOMIC_OP(int, "ldxr", "stlxr", "sub")
- }
- static inline int atomic_fetch_sub_explicit(volatile int* p, int v, memory_order_acq_rel_t)
- {
- ATOMIC_OP(int, "ldaxr", "stlxr", "sub")
- }
- static inline int atomic_fetch_sub_explicit(volatile int* p, int v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_OP(int, "ldaxr", "stlxr", "sub")
- }
- /*
- * native word support
- */
- static inline atomic_word atomic_fetch_sub_explicit(volatile atomic_word* p, atomic_word v, memory_order_relaxed_t)
- {
- ATOMIC_OP(atomic_word, "ldxr", "stxr", "sub")
- }
- static inline atomic_word atomic_fetch_sub_explicit(volatile atomic_word* p, atomic_word v, memory_order_acquire_t)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stxr", "sub")
- }
- static inline atomic_word atomic_fetch_sub_explicit(volatile atomic_word* p, atomic_word v, memory_order_release_t)
- {
- ATOMIC_OP(atomic_word, "ldxr", "stlxr", "sub")
- }
- static inline atomic_word atomic_fetch_sub_explicit(volatile atomic_word* p, atomic_word v, memory_order_acq_rel_t)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stlxr", "sub")
- }
- static inline atomic_word atomic_fetch_sub_explicit(volatile atomic_word* p, atomic_word v, int /* memory_order_seq_cst_t */)
- {
- ATOMIC_OP(atomic_word, "ldaxr", "stlxr", "sub")
- }
- /*
- * extensions
- */
- static inline void atomic_retain(volatile int* p)
- {
- atomic_fetch_add_explicit(p, 1, memory_order_relaxed);
- }
- static inline bool atomic_release(volatile int* p)
- {
- bool res = atomic_fetch_sub_explicit(p, 1, memory_order_release) == 1;
- if (res)
- {
- atomic_thread_fence(memory_order_acquire);
- }
- return res;
- }
- /*
- * double word
- */
- // Note: the only way to get atomic 128-bit memory accesses on ARM64 is to use ldxp/stxp with a loop
- // (ldxp and stxp instructions are not guaranteed to appear atomic)
- static inline atomic_word2 atomic_load_explicit(const volatile atomic_word2* p, memory_order_relaxed_t)
- {
- atomic_word2 v;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldxp\t%1, %2, [%3]\n\t"
- "stxp\t%w0, %1, %2, [%3]\n\t"
- "cbnz\t%w0, 0b\n\t"
- : "=&r" (success), "=&r" (v.lo), "=&r" (v.hi)
- : "r" (p)
- );
- return v;
- }
- static inline atomic_word2 atomic_load_explicit(const volatile atomic_word2* p, memory_order_acquire_t)
- {
- atomic_word2 v;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldaxp\t%1, %2, [%3]\n\t"
- "stxp\t%w0, %1, %2, [%3]\n\t"
- "cbnz\t%w0, 0b\n\t"
- : "=&r" (success), "=&r" (v.lo), "=&r" (v.hi)
- : "r" (p)
- );
- return v;
- }
- static inline void atomic_store_explicit(volatile atomic_word2* p, atomic_word2 v, memory_order_relaxed_t)
- {
- atomic_word lo;
- atomic_word hi;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldxp\t%2, %3, [%6]\n\t"
- "stxp\t%w0, %4, %5, [%6]\n\t"
- "cbnz\t%w0, 0b\n\t"
- : "=&r" (success), "=m" (*p), "=&r" (lo), "=&r" (hi)
- : "r" (v.lo), "r" (v.hi), "r" (p)
- : "memory"
- );
- }
- static inline void atomic_store_explicit(volatile atomic_word2* p, atomic_word2 v, memory_order_release_t)
- {
- atomic_word lo;
- atomic_word hi;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldxp\t%2, %3, [%6]\n\t"
- "stlxp\t%w0, %4, %5, [%6]\n\t"
- "cbnz\t%w0, 0b\n\t"
- : "=&r" (success), "=m" (*p), "=&r" (lo), "=&r" (hi)
- : "r" (v.lo), "r" (v.hi), "r" (p)
- : "memory"
- );
- }
- static inline atomic_word2 atomic_exchange_explicit(volatile atomic_word2* p, atomic_word2 val, memory_order_acq_rel_t)
- {
- atomic_word2 oldval;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldaxp\t%2, %3, [%6]\n\t"
- "stlxp\t%w0, %5, %4, [%6]\n\t"
- "cbnz\t%w0, 0b\n\t"
- : "=&r" (success), "+m" (*p), "=&r" (oldval.lo), "=&r" (oldval.hi)
- : "r" (val.hi), "r" (val.lo), "r" (p)
- : "memory"
- );
- return oldval;
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word2* p, atomic_word2* oldval, atomic_word2 newval, memory_order_acquire_t, memory_order_relaxed_t)
- {
- atomic_word lo = oldval->lo;
- atomic_word hi = oldval->hi;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldaxp\t%2, %3, [%8]\n\t"
- "cmp\t%3, %5\n\t"
- "b.ne\t1f\n\t"
- "cmp\t%2, %4\n\t"
- "b.ne\t1f\n\t"
- "stxp\t%w0, %6, %7, [%8]\n\t"
- "cbnz\t%w0, 0b\n\t"
- "1:\n\t"
- "clrex\n\t"
- : "=&r" (success), "+m" (*p), "=&r" (oldval->lo), "=&r" (oldval->hi)
- : "r" (lo), "r" (hi), "r" (newval.lo), "r" (newval.hi), "r" (p), "0" (1)
- : "cc", "memory"
- );
- return success == 0;
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word2* p, atomic_word2* oldval, atomic_word2 newval, memory_order_release_t, memory_order_relaxed_t)
- {
- atomic_word lo = oldval->lo;
- atomic_word hi = oldval->hi;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldxp\t%2, %3, [%8]\n\t"
- "cmp\t%3, %5\n\t"
- "b.ne\t1f\n\t"
- "cmp\t%2, %4\n\t"
- "b.ne\t1f\n\t"
- "stlxp\t%w0, %6, %7, [%8]\n\t"
- "cbnz\t%w0, 0b\n\t"
- "1:\n\t"
- "clrex\n\t"
- : "=&r" (success), "+m" (*p), "=&r" (oldval->lo), "=&r" (oldval->hi)
- : "r" (lo), "r" (hi), "r" (newval.lo), "r" (newval.hi), "r" (p), "0" (1)
- : "cc", "memory"
- );
- return success == 0;
- }
- static inline bool atomic_compare_exchange_strong_explicit(volatile atomic_word2* p, atomic_word2* oldval, atomic_word2 newval, int /*memory_order_acq_rel_t*/, memory_order_relaxed_t)
- {
- atomic_word lo = oldval->lo;
- atomic_word hi = oldval->hi;
- atomic_word success;
- __asm__ __volatile__
- (
- "0:\n\t"
- "ldaxp\t%2, %3, [%8]\n\t"
- "cmp\t%3, %5\n\t"
- "b.ne\t1f\n\t"
- "cmp\t%2, %4\n\t"
- "b.ne\t1f\n\t"
- "stlxp\t%w0, %6, %7, [%8]\n\t"
- "cbnz\t%w0, 0b\n\t"
- "1:\n\t"
- "clrex\n\t"
- : "=&r" (success), "+m" (*p), "=&r" (oldval->lo), "=&r" (oldval->hi)
- : "r" (lo), "r" (hi), "r" (newval.lo), "r" (newval.hi), "r" (p), "0" (1)
- : "cc", "memory"
- );
- return success == 0;
- }
- template<class SuccOrder, class FailOrder>
- static inline bool atomic_compare_exchange_weak_explicit(volatile atomic_word2* p, atomic_word2* oldval, atomic_word2 newval, SuccOrder o1, FailOrder o2)
- {
- // TODO: implement proper weak compare exchange
- return atomic_compare_exchange_strong_explicit(p, oldval, newval, o1, o2);
- }
|