Files
opensbi/lib/sbi/riscv_locks.c
Vladimir Kondratiev f6fa62bd16 lib: atomics: fix AMO test macros
The "RISC-V C API" [1] defines architecture extension test macros
says naming rule for the test macros is __riscv_<ext_name>, where
<ext_name> is all lower-case.

Three extensions dealing with atomics implementation are:
  "zaamo" consists of AMO instructions,
  "zalrsc" - LR/SC,
  "a" extension means both "zaamo" and "zalrsc"
Built-in test macros are __riscv_a, __riscv_zaamo and __riscv_zalrsc.
Alternative to the __riscv_a macro name, __riscv_atomic, is deprecated.

Use correct test macro __riscv_zaamo for the AMO variant of atomics.
It used to be __riscv_atomic that is both deprecated and incorrect
because it tests for the "a" extension; i.e. both "zaamo" and "zalrsc"
If ISA enables only zaamo but not zalrsc, code as it was would not compile.

Older toolchains may have neither __riscv_zaamo nor __riscv_zalrsc, so
query __riscv_atomic - it should be treated as both __riscv_zaamo and
__riscv_zalrsc, in all present cases __riscv_zaamo is more favorable
so take is as alternative for __riscv_zaamo

[1] https://github.com/riscv-non-isa/riscv-c-api-doc

Signed-off-by: Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251228073321.1533844-1-vladimir.kondratiev@mobileye.com
Signed-off-by: Anup Patel <anup@brainfault.org>
2025-12-28 20:44:03 +05:30

87 lines
1.9 KiB
C

/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
* Copyright (c) 2021 Christoph Müllner <cmuellner@linux.com>
*/
#include <sbi/riscv_barrier.h>
#include <sbi/riscv_locks.h>
static inline bool spin_lock_unlocked(spinlock_t lock)
{
return lock.owner == lock.next;
}
bool spin_lock_check(spinlock_t *lock)
{
RISCV_FENCE(r, rw);
return !spin_lock_unlocked(*lock);
}
bool spin_trylock(spinlock_t *lock)
{
unsigned long inc = 1u << TICKET_SHIFT;
unsigned long mask = 0xffffu << TICKET_SHIFT;
u32 l0, tmp1, tmp2;
__asm__ __volatile__(
/* Get the current lock counters. */
"1: lr.w.aq %0, %3\n"
" slli %2, %0, %6\n"
" and %2, %2, %5\n"
" and %1, %0, %5\n"
/* Is the lock free right now? */
" bne %1, %2, 2f\n"
" add %0, %0, %4\n"
/* Acquire the lock. */
" sc.w.rl %0, %0, %3\n"
" bnez %0, 1b\n"
"2:"
: "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
: "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
: "memory");
return l0 == 0;
}
void spin_lock(spinlock_t *lock)
{
unsigned long inc = 1u << TICKET_SHIFT;
unsigned long mask = 0xffffu;
u32 l0, tmp1, tmp2;
__asm__ __volatile__(
/* Atomically increment the next ticket. */
#if defined(__riscv_atomic) || defined(__riscv_zaamo)
" amoadd.w.aqrl %0, %4, %3\n"
#elif defined(__riscv_zalrsc)
"3: lr.w.aqrl %0, %3\n"
" addw %1, %0, %4\n"
" sc.w.aqrl %1, %1, %3\n"
" bnez %1, 3b\n"
#else
#error "need A or Zaamo or Zalrsc"
#endif
/* Did we get the lock? */
" srli %1, %0, %6\n"
" and %1, %1, %5\n"
"1: and %2, %0, %5\n"
" beq %1, %2, 2f\n"
/* If not, then spin on the lock. */
" lw %0, %3\n"
RISCV_ACQUIRE_BARRIER
" j 1b\n"
"2:"
: "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
: "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
: "memory");
}
void spin_unlock(spinlock_t *lock)
{
__smp_store_release(&lock->owner, lock->owner + 1);
}