From f6fa62bd169e55f479b137da13ca83b27bb304a9 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Sun, 28 Dec 2025 09:33:21 +0200 Subject: [PATCH] lib: atomics: fix AMO test macros The "RISC-V C API" [1] defines architecture extension test macros says naming rule for the test macros is __riscv_, where is all lower-case. Three extensions dealing with atomics implementation are: "zaamo" consists of AMO instructions, "zalrsc" - LR/SC, "a" extension means both "zaamo" and "zalrsc" Built-in test macros are __riscv_a, __riscv_zaamo and __riscv_zalrsc. Alternative to the __riscv_a macro name, __riscv_atomic, is deprecated. Use correct test macro __riscv_zaamo for the AMO variant of atomics. It used to be __riscv_atomic that is both deprecated and incorrect because it tests for the "a" extension; i.e. both "zaamo" and "zalrsc" If ISA enables only zaamo but not zalrsc, code as it was would not compile. Older toolchains may have neither __riscv_zaamo nor __riscv_zalrsc, so query __riscv_atomic - it should be treated as both __riscv_zaamo and __riscv_zalrsc, in all present cases __riscv_zaamo is more favorable so take is as alternative for __riscv_zaamo [1] https://github.com/riscv-non-isa/riscv-c-api-doc Signed-off-by: Vladimir Kondratiev Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20251228073321.1533844-1-vladimir.kondratiev@mobileye.com Signed-off-by: Anup Patel --- firmware/fw_base.S | 4 ++-- firmware/payloads/test_head.S | 4 ++-- lib/sbi/riscv_atomic.c | 16 ++++++++-------- lib/sbi/riscv_locks.c | 6 +++--- lib/sbi/sbi_illegal_atomic.c | 8 ++++---- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/firmware/fw_base.S b/firmware/fw_base.S index 5300ecf2..bce9e226 100644 --- a/firmware/fw_base.S +++ b/firmware/fw_base.S @@ -59,10 +59,10 @@ _try_lottery: /* Jump to relocation wait loop if we don't get relocation lottery */ lla a6, _boot_lottery li a7, BOOT_LOTTERY_ACQUIRED -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) amoswap.w a6, a7, (a6) bnez a6, _wait_for_boot_hart -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) _sc_fail: lr.w t0, (a6) sc.w t1, a7, (a6) diff --git a/firmware/payloads/test_head.S b/firmware/payloads/test_head.S index 070ce8aa..9a87e56f 100644 --- a/firmware/payloads/test_head.S +++ b/firmware/payloads/test_head.S @@ -30,9 +30,9 @@ _start: /* Pick one hart to run the main boot sequence */ lla a3, _hart_lottery li a2, 1 -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) amoadd.w a3, a2, (a3) -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) _sc_fail: lr.w t0, (a3) addw t1, t0, a2 diff --git a/lib/sbi/riscv_atomic.c b/lib/sbi/riscv_atomic.c index df16a2eb..fcf07f27 100644 --- a/lib/sbi/riscv_atomic.c +++ b/lib/sbi/riscv_atomic.c @@ -12,8 +12,8 @@ #include #include -#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc) -#error "opensbi strongly relies on the A extension of RISC-V" +#if !defined(__riscv_atomic) && !defined(__riscv_zaamo) && !defined(__riscv_zalrsc) +#error "opensbi strongly relies on the Zaamo or Zalrsc extensions of RISC-V" #endif long atomic_read(atomic_t *atom) @@ -31,7 +31,7 @@ void atomic_write(atomic_t *atom, long value) long atomic_add_return(atomic_t *atom, long value) { -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) long ret; #if __SIZEOF_LONG__ == 4 __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0" @@ -44,7 +44,7 @@ long atomic_add_return(atomic_t *atom, long value) : "r"(value) : "memory"); #endif -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) long ret, temp; #if __SIZEOF_LONG__ == 4 __asm__ __volatile__("1:lr.w.aqrl %1,%0\n" @@ -64,7 +64,7 @@ long atomic_add_return(atomic_t *atom, long value) : "memory"); #endif #else -#error "need a or zalrsc" +#error "need A or Zaamo or Zalrsc" #endif return ret + value; @@ -75,7 +75,7 @@ long atomic_sub_return(atomic_t *atom, long value) return atomic_add_return(atom, -value); } -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) #define __axchg(ptr, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ @@ -101,7 +101,7 @@ long atomic_sub_return(atomic_t *atom, long value) } \ __ret; \ }) -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) #define __axchg(ptr, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ @@ -132,7 +132,7 @@ long atomic_sub_return(atomic_t *atom, long value) __ret; \ }) #else -#error "need a or zalrsc" +#error "need A or Zaamo or Zalrsc" #endif #define axchg(ptr, x) \ diff --git a/lib/sbi/riscv_locks.c b/lib/sbi/riscv_locks.c index 41e8fabd..c29a9659 100644 --- a/lib/sbi/riscv_locks.c +++ b/lib/sbi/riscv_locks.c @@ -53,15 +53,15 @@ void spin_lock(spinlock_t *lock) __asm__ __volatile__( /* Atomically increment the next ticket. */ -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) " amoadd.w.aqrl %0, %4, %3\n" -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) "3: lr.w.aqrl %0, %3\n" " addw %1, %0, %4\n" " sc.w.aqrl %1, %1, %3\n" " bnez %1, 3b\n" #else -#error "need a or zalrsc" +#error "need A or Zaamo or Zalrsc" #endif /* Did we get the lock? */ diff --git a/lib/sbi/sbi_illegal_atomic.c b/lib/sbi/sbi_illegal_atomic.c index 8fd6c557..977a9ad0 100644 --- a/lib/sbi/sbi_illegal_atomic.c +++ b/lib/sbi/sbi_illegal_atomic.c @@ -11,18 +11,18 @@ #include #include -#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc) -#error "opensbi strongly relies on the A extension of RISC-V" +#if !defined(__riscv_atomic) && !defined(__riscv_zaamo) && !defined(__riscv_zalrsc) +#error "opensbi strongly relies on the Zaamo or Zalrsc extension of RISC-V" #endif -#ifdef __riscv_atomic +#if defined(__riscv_atomic) || defined(__riscv_zaamo) int sbi_illegal_atomic(ulong insn, struct sbi_trap_regs *regs) { return truly_illegal_insn(insn, regs); } -#elif __riscv_zalrsc +#elif defined(__riscv_zalrsc) #define DEFINE_UNPRIVILEGED_LR_FUNCTION(type, aqrl, insn) \ static type lr_##type##aqrl(const type *addr, \