lib: sbi: Set the scratch allocation to alignment to cacheline size

Set the scratch allocation alignment to cacheline size specified by
riscv,cbom-block-size in the DTS file to avoid two atomic variables
from the same cache line causing livelock on some platforms. If the
cacheline is not specified, we set it a default value.

Signed-off-by: Raj Vishwanathan <Raj.Vishwanathan@gmail.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Samuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20250423225045.267983-1-Raj.Vishwanathan@gmail.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Raj Vishwanathan
2025-04-23 15:50:45 -07:00
committed by Anup Patel
parent 4d0128ec58
commit 99aabc6b84
5 changed files with 63 additions and 2 deletions

View File

@@ -14,6 +14,8 @@
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h>
#define DEFAULT_SCRATCH_ALLOC_ALIGN __SIZEOF_POINTER__
u32 sbi_scratch_hart_count;
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS] = { [0 ... SBI_HARTMASK_MAX_BITS-1] = -1U };
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
@@ -21,6 +23,19 @@ struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
/*
* Get the alignment size.
* Return DEFAULT_SCRATCH_ALLOC_ALIGNMENT or riscv,cbom_block_size
*/
static unsigned long sbi_get_scratch_alloc_align(void)
{
const struct sbi_platform *plat = sbi_platform_thishart_ptr();
if (!plat || !plat->cbom_block_size)
return DEFAULT_SCRATCH_ALLOC_ALIGN;
return plat->cbom_block_size;
}
u32 sbi_hartid_to_hartindex(u32 hartid)
{
sbi_for_each_hartindex(i)
@@ -57,6 +72,7 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
void *ptr;
unsigned long ret = 0;
struct sbi_scratch *rscratch;
unsigned long scratch_alloc_align = 0;
/*
* We have a simple brain-dead allocator which never expects
@@ -70,8 +86,14 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
if (!size)
return 0;
size += __SIZEOF_POINTER__ - 1;
size &= ~((unsigned long)__SIZEOF_POINTER__ - 1);
scratch_alloc_align = sbi_get_scratch_alloc_align();
/*
* We let the allocation align to cacheline bytes to avoid livelock on
* certain platforms due to atomic variables from the same cache line.
*/
size += scratch_alloc_align - 1;
size &= ~(scratch_alloc_align - 1);
spin_lock(&extra_lock);