mirror of
https://github.com/riscv-software-src/opensbi.git
synced 2025-08-24 15:31:22 +01:00
lib: Emit lr and sc instructions based on -march flags
When -march=rv64im_zalrsc_zicsr is used, provide atomic operations and locks using lr and sc instructions only. Signed-off-by: Chao-ying Fu <cfu@mips.com> Reviewed-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/r/20250226014727.19710-1-cfu@mips.com Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
@@ -19,6 +19,10 @@ Base Platform Requirements
|
||||
The base RISC-V platform requirements for OpenSBI are as follows:
|
||||
|
||||
1. At least rv32ima_zicsr or rv64ima_zicsr required on all HARTs
|
||||
|
||||
* Users may restrict the usage of atomic instructions to lr/sc
|
||||
via rv32im_zalrsc_zicsr or rv64im_zalrsc_zicsr if preferred
|
||||
|
||||
2. At least one HART should have S-mode support because:
|
||||
|
||||
* SBI calls are meant for RISC-V S-mode (Supervisor mode)
|
||||
|
@@ -59,8 +59,18 @@ _try_lottery:
|
||||
/* Jump to relocation wait loop if we don't get relocation lottery */
|
||||
lla a6, _boot_lottery
|
||||
li a7, BOOT_LOTTERY_ACQUIRED
|
||||
#ifdef __riscv_atomic
|
||||
amoswap.w a6, a7, (a6)
|
||||
bnez a6, _wait_for_boot_hart
|
||||
#elif __riscv_zalrsc
|
||||
_sc_fail:
|
||||
lr.w t0, (a6)
|
||||
sc.w t1, a7, (a6)
|
||||
bnez t1, _sc_fail
|
||||
bnez t0, _wait_for_boot_hart
|
||||
#else
|
||||
#error "need a or zalrsc"
|
||||
#endif
|
||||
|
||||
/* relocate the global table content */
|
||||
li t0, FW_TEXT_START /* link start */
|
||||
|
@@ -30,7 +30,18 @@ _start:
|
||||
/* Pick one hart to run the main boot sequence */
|
||||
lla a3, _hart_lottery
|
||||
li a2, 1
|
||||
#ifdef __riscv_atomic
|
||||
amoadd.w a3, a2, (a3)
|
||||
#elif __riscv_zalrsc
|
||||
_sc_fail:
|
||||
lr.w t0, (a3)
|
||||
addw t1, t0, a2
|
||||
sc.w t1, t1, (a3)
|
||||
bnez t1, _sc_fail
|
||||
move a3, t0
|
||||
#else
|
||||
#error "need a or zalrsc"
|
||||
#endif
|
||||
bnez a3, _start_hang
|
||||
|
||||
/* Save a0 and a1 */
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#include <sbi/riscv_atomic.h>
|
||||
#include <sbi/riscv_barrier.h>
|
||||
|
||||
#ifndef __riscv_atomic
|
||||
#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc)
|
||||
#error "opensbi strongly relies on the A extension of RISC-V"
|
||||
#endif
|
||||
|
||||
@@ -31,6 +31,7 @@ void atomic_write(atomic_t *atom, long value)
|
||||
|
||||
long atomic_add_return(atomic_t *atom, long value)
|
||||
{
|
||||
#ifdef __riscv_atomic
|
||||
long ret;
|
||||
#if __SIZEOF_LONG__ == 4
|
||||
__asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
|
||||
@@ -43,6 +44,29 @@ long atomic_add_return(atomic_t *atom, long value)
|
||||
: "r"(value)
|
||||
: "memory");
|
||||
#endif
|
||||
#elif __riscv_zalrsc
|
||||
long ret, temp;
|
||||
#if __SIZEOF_LONG__ == 4
|
||||
__asm__ __volatile__("1:lr.w.aqrl %1,%0\n"
|
||||
" addw %2,%1,%3\n"
|
||||
" sc.w.aqrl %2,%2,%0\n"
|
||||
" bnez %2,1b"
|
||||
: "+A"(atom->counter), "=&r"(ret), "=&r"(temp)
|
||||
: "r"(value)
|
||||
: "memory");
|
||||
#elif __SIZEOF_LONG__ == 8
|
||||
__asm__ __volatile__("1:lr.d.aqrl %1,%0\n"
|
||||
" add %2,%1,%3\n"
|
||||
" sc.d.aqrl %2,%2,%0\n"
|
||||
" bnez %2,1b"
|
||||
: "+A"(atom->counter), "=&r"(ret), "=&r"(temp)
|
||||
: "r"(value)
|
||||
: "memory");
|
||||
#endif
|
||||
#else
|
||||
#error "need a or zalrsc"
|
||||
#endif
|
||||
|
||||
return ret + value;
|
||||
}
|
||||
|
||||
@@ -51,6 +75,7 @@ long atomic_sub_return(atomic_t *atom, long value)
|
||||
return atomic_add_return(atom, -value);
|
||||
}
|
||||
|
||||
#ifdef __riscv_atomic
|
||||
#define __axchg(ptr, new, size) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
@@ -76,6 +101,39 @@ long atomic_sub_return(atomic_t *atom, long value)
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
#elif __riscv_zalrsc
|
||||
#define __axchg(ptr, new, size) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
__typeof__(new) __new = (new); \
|
||||
__typeof__(*(ptr)) __ret, __temp; \
|
||||
switch (size) { \
|
||||
case 4: \
|
||||
__asm__ __volatile__ ( \
|
||||
"1: lr.w.aqrl %0, %1\n" \
|
||||
" sc.w.aqrl %2, %3, %1\n" \
|
||||
" bnez %2, 1b\n" \
|
||||
: "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \
|
||||
: "r" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__ ( \
|
||||
"1: lr.d.aqrl %0, %1\n" \
|
||||
" sc.d.aqrl %2, %3, %1\n" \
|
||||
" bnez %2, 1b\n" \
|
||||
: "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \
|
||||
: "r" (__new) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
default: \
|
||||
break; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
#else
|
||||
#error "need a or zalrsc"
|
||||
#endif
|
||||
|
||||
#define axchg(ptr, x) \
|
||||
({ \
|
||||
|
@@ -53,7 +53,16 @@ void spin_lock(spinlock_t *lock)
|
||||
|
||||
__asm__ __volatile__(
|
||||
/* Atomically increment the next ticket. */
|
||||
#ifdef __riscv_atomic
|
||||
" amoadd.w.aqrl %0, %4, %3\n"
|
||||
#elif __riscv_zalrsc
|
||||
"3: lr.w.aqrl %0, %3\n"
|
||||
" addw %1, %0, %4\n"
|
||||
" sc.w.aqrl %1, %1, %3\n"
|
||||
" bnez %1, 3b\n"
|
||||
#else
|
||||
#error "need a or zalrsc"
|
||||
#endif
|
||||
|
||||
/* Did we get the lock? */
|
||||
" srli %1, %0, %6\n"
|
||||
|
Reference in New Issue
Block a user