diff --git a/docs/platform_requirements.md b/docs/platform_requirements.md index a843febf..dfda4f48 100644 --- a/docs/platform_requirements.md +++ b/docs/platform_requirements.md @@ -19,6 +19,10 @@ Base Platform Requirements The base RISC-V platform requirements for OpenSBI are as follows: 1. At least rv32ima_zicsr or rv64ima_zicsr required on all HARTs + + * Users may restrict the usage of atomic instructions to lr/sc + via rv32im_zalrsc_zicsr or rv64im_zalrsc_zicsr if preferred + 2. At least one HART should have S-mode support because: * SBI calls are meant for RISC-V S-mode (Supervisor mode) diff --git a/firmware/fw_base.S b/firmware/fw_base.S index 536bcd26..2498797c 100644 --- a/firmware/fw_base.S +++ b/firmware/fw_base.S @@ -59,8 +59,18 @@ _try_lottery: /* Jump to relocation wait loop if we don't get relocation lottery */ lla a6, _boot_lottery li a7, BOOT_LOTTERY_ACQUIRED +#ifdef __riscv_atomic amoswap.w a6, a7, (a6) bnez a6, _wait_for_boot_hart +#elif __riscv_zalrsc +_sc_fail: + lr.w t0, (a6) + sc.w t1, a7, (a6) + bnez t1, _sc_fail + bnez t0, _wait_for_boot_hart +#else +#error "need a or zalrsc" +#endif /* relocate the global table content */ li t0, FW_TEXT_START /* link start */ diff --git a/firmware/payloads/test_head.S b/firmware/payloads/test_head.S index 7a2ac126..7d25e07d 100644 --- a/firmware/payloads/test_head.S +++ b/firmware/payloads/test_head.S @@ -30,7 +30,18 @@ _start: /* Pick one hart to run the main boot sequence */ lla a3, _hart_lottery li a2, 1 +#ifdef __riscv_atomic amoadd.w a3, a2, (a3) +#elif __riscv_zalrsc +_sc_fail: + lr.w t0, (a3) + addw t1, t0, a2 + sc.w t1, t1, (a3) + bnez t1, _sc_fail + move a3, t0 +#else +#error "need a or zalrsc" +#endif bnez a3, _start_hang /* Save a0 and a1 */ diff --git a/lib/sbi/riscv_atomic.c b/lib/sbi/riscv_atomic.c index 32cf3f03..df16a2eb 100644 --- a/lib/sbi/riscv_atomic.c +++ b/lib/sbi/riscv_atomic.c @@ -12,7 +12,7 @@ #include #include -#ifndef __riscv_atomic +#if !defined(__riscv_atomic) && !defined(__riscv_zalrsc) #error "opensbi strongly relies on the A extension of RISC-V" #endif @@ -31,6 +31,7 @@ void atomic_write(atomic_t *atom, long value) long atomic_add_return(atomic_t *atom, long value) { +#ifdef __riscv_atomic long ret; #if __SIZEOF_LONG__ == 4 __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0" @@ -43,6 +44,29 @@ long atomic_add_return(atomic_t *atom, long value) : "r"(value) : "memory"); #endif +#elif __riscv_zalrsc + long ret, temp; +#if __SIZEOF_LONG__ == 4 + __asm__ __volatile__("1:lr.w.aqrl %1,%0\n" + " addw %2,%1,%3\n" + " sc.w.aqrl %2,%2,%0\n" + " bnez %2,1b" + : "+A"(atom->counter), "=&r"(ret), "=&r"(temp) + : "r"(value) + : "memory"); +#elif __SIZEOF_LONG__ == 8 + __asm__ __volatile__("1:lr.d.aqrl %1,%0\n" + " add %2,%1,%3\n" + " sc.d.aqrl %2,%2,%0\n" + " bnez %2,1b" + : "+A"(atom->counter), "=&r"(ret), "=&r"(temp) + : "r"(value) + : "memory"); +#endif +#else +#error "need a or zalrsc" +#endif + return ret + value; } @@ -51,6 +75,7 @@ long atomic_sub_return(atomic_t *atom, long value) return atomic_add_return(atom, -value); } +#ifdef __riscv_atomic #define __axchg(ptr, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ @@ -76,6 +101,39 @@ long atomic_sub_return(atomic_t *atom, long value) } \ __ret; \ }) +#elif __riscv_zalrsc +#define __axchg(ptr, new, size) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret, __temp; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + "1: lr.w.aqrl %0, %1\n" \ + " sc.w.aqrl %2, %3, %1\n" \ + " bnez %2, 1b\n" \ + : "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \ + : "r" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + "1: lr.d.aqrl %0, %1\n" \ + " sc.d.aqrl %2, %3, %1\n" \ + " bnez %2, 1b\n" \ + : "=&r" (__ret), "+A" (*__ptr), "=&r" (__temp) \ + : "r" (__new) \ + : "memory"); \ + break; \ + default: \ + break; \ + } \ + __ret; \ + }) +#else +#error "need a or zalrsc" +#endif #define axchg(ptr, x) \ ({ \ diff --git a/lib/sbi/riscv_locks.c b/lib/sbi/riscv_locks.c index acab7769..41e8fabd 100644 --- a/lib/sbi/riscv_locks.c +++ b/lib/sbi/riscv_locks.c @@ -53,7 +53,16 @@ void spin_lock(spinlock_t *lock) __asm__ __volatile__( /* Atomically increment the next ticket. */ +#ifdef __riscv_atomic " amoadd.w.aqrl %0, %4, %3\n" +#elif __riscv_zalrsc + "3: lr.w.aqrl %0, %3\n" + " addw %1, %0, %4\n" + " sc.w.aqrl %1, %1, %3\n" + " bnez %1, 3b\n" +#else +#error "need a or zalrsc" +#endif /* Did we get the lock? */ " srli %1, %0, %6\n"