lib: atomics: fix AMO test macros

The "RISC-V C API" [1] defines architecture extension test macros
says naming rule for the test macros is __riscv_<ext_name>, where
<ext_name> is all lower-case.

Three extensions dealing with atomics implementation are:
  "zaamo" consists of AMO instructions,
  "zalrsc" - LR/SC,
  "a" extension means both "zaamo" and "zalrsc"
Built-in test macros are __riscv_a, __riscv_zaamo and __riscv_zalrsc.
Alternative to the __riscv_a macro name, __riscv_atomic, is deprecated.

Use correct test macro __riscv_zaamo for the AMO variant of atomics.
It used to be __riscv_atomic that is both deprecated and incorrect
because it tests for the "a" extension; i.e. both "zaamo" and "zalrsc"
If ISA enables only zaamo but not zalrsc, code as it was would not compile.

Older toolchains may have neither __riscv_zaamo nor __riscv_zalrsc, so
query __riscv_atomic - it should be treated as both __riscv_zaamo and
__riscv_zalrsc, in all present cases __riscv_zaamo is more favorable
so take is as alternative for __riscv_zaamo

[1] https://github.com/riscv-non-isa/riscv-c-api-doc

Signed-off-by: Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251228073321.1533844-1-vladimir.kondratiev@mobileye.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Vladimir Kondratiev
2025-12-28 09:33:21 +02:00
committed by Anup Patel
parent 4c1c77e085
commit f6fa62bd16
5 changed files with 19 additions and 19 deletions

View File

@@ -59,10 +59,10 @@ _try_lottery:
/* Jump to relocation wait loop if we don't get relocation lottery */ /* Jump to relocation wait loop if we don't get relocation lottery */
lla a6, _boot_lottery lla a6, _boot_lottery
li a7, BOOT_LOTTERY_ACQUIRED li a7, BOOT_LOTTERY_ACQUIRED
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
amoswap.w a6, a7, (a6) amoswap.w a6, a7, (a6)
bnez a6, _wait_for_boot_hart bnez a6, _wait_for_boot_hart
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
_sc_fail: _sc_fail:
lr.w t0, (a6) lr.w t0, (a6)
sc.w t1, a7, (a6) sc.w t1, a7, (a6)

View File

@@ -30,9 +30,9 @@ _start:
/* Pick one hart to run the main boot sequence */ /* Pick one hart to run the main boot sequence */
lla a3, _hart_lottery lla a3, _hart_lottery
li a2, 1 li a2, 1
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
amoadd.w a3, a2, (a3) amoadd.w a3, a2, (a3)
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
_sc_fail: _sc_fail:
lr.w t0, (a3) lr.w t0, (a3)
addw t1, t0, a2 addw t1, t0, a2

View File

@@ -12,8 +12,8 @@
#include <sbi/riscv_atomic.h> #include <sbi/riscv_atomic.h>
#include <sbi/riscv_barrier.h> #include <sbi/riscv_barrier.h>
#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc) #if !defined(__riscv_atomic) && !defined(__riscv_zaamo) && !defined(__riscv_zalrsc)
#error "opensbi strongly relies on the A extension of RISC-V" #error "opensbi strongly relies on the Zaamo or Zalrsc extensions of RISC-V"
#endif #endif
long atomic_read(atomic_t *atom) long atomic_read(atomic_t *atom)
@@ -31,7 +31,7 @@ void atomic_write(atomic_t *atom, long value)
long atomic_add_return(atomic_t *atom, long value) long atomic_add_return(atomic_t *atom, long value)
{ {
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
long ret; long ret;
#if __SIZEOF_LONG__ == 4 #if __SIZEOF_LONG__ == 4
__asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0" __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
@@ -44,7 +44,7 @@ long atomic_add_return(atomic_t *atom, long value)
: "r"(value) : "r"(value)
: "memory"); : "memory");
#endif #endif
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
long ret, temp; long ret, temp;
#if __SIZEOF_LONG__ == 4 #if __SIZEOF_LONG__ == 4
__asm__ __volatile__("1:lr.w.aqrl %1,%0\n" __asm__ __volatile__("1:lr.w.aqrl %1,%0\n"
@@ -64,7 +64,7 @@ long atomic_add_return(atomic_t *atom, long value)
: "memory"); : "memory");
#endif #endif
#else #else
#error "need a or zalrsc" #error "need A or Zaamo or Zalrsc"
#endif #endif
return ret + value; return ret + value;
@@ -75,7 +75,7 @@ long atomic_sub_return(atomic_t *atom, long value)
return atomic_add_return(atom, -value); return atomic_add_return(atom, -value);
} }
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
#define __axchg(ptr, new, size) \ #define __axchg(ptr, new, size) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
@@ -101,7 +101,7 @@ long atomic_sub_return(atomic_t *atom, long value)
} \ } \
__ret; \ __ret; \
}) })
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
#define __axchg(ptr, new, size) \ #define __axchg(ptr, new, size) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
@@ -132,7 +132,7 @@ long atomic_sub_return(atomic_t *atom, long value)
__ret; \ __ret; \
}) })
#else #else
#error "need a or zalrsc" #error "need A or Zaamo or Zalrsc"
#endif #endif
#define axchg(ptr, x) \ #define axchg(ptr, x) \

View File

@@ -53,15 +53,15 @@ void spin_lock(spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
/* Atomically increment the next ticket. */ /* Atomically increment the next ticket. */
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
" amoadd.w.aqrl %0, %4, %3\n" " amoadd.w.aqrl %0, %4, %3\n"
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
"3: lr.w.aqrl %0, %3\n" "3: lr.w.aqrl %0, %3\n"
" addw %1, %0, %4\n" " addw %1, %0, %4\n"
" sc.w.aqrl %1, %1, %3\n" " sc.w.aqrl %1, %1, %3\n"
" bnez %1, 3b\n" " bnez %1, 3b\n"
#else #else
#error "need a or zalrsc" #error "need A or Zaamo or Zalrsc"
#endif #endif
/* Did we get the lock? */ /* Did we get the lock? */

View File

@@ -11,18 +11,18 @@
#include <sbi/sbi_illegal_atomic.h> #include <sbi/sbi_illegal_atomic.h>
#include <sbi/sbi_illegal_insn.h> #include <sbi/sbi_illegal_insn.h>
#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc) #if !defined(__riscv_atomic) && !defined(__riscv_zaamo) && !defined(__riscv_zalrsc)
#error "opensbi strongly relies on the A extension of RISC-V" #error "opensbi strongly relies on the Zaamo or Zalrsc extension of RISC-V"
#endif #endif
#ifdef __riscv_atomic #if defined(__riscv_atomic) || defined(__riscv_zaamo)
int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs) int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs)
{ {
return truly_illegal_insn(insn, regs); return truly_illegal_insn(insn, regs);
} }
#elif __riscv_zalrsc #elif defined(__riscv_zalrsc)
#define DEFINE_UNPRIVILEGED_LR_FUNCTION(type, aqrl, insn) \ #define DEFINE_UNPRIVILEGED_LR_FUNCTION(type, aqrl, insn) \
static type lr_##type##aqrl(const type *addr, \ static type lr_##type##aqrl(const type *addr, \