mirror of
https://github.com/riscv-software-src/opensbi.git
synced 2025-08-24 15:31:22 +01:00
lib: sbi: Set the scratch allocation to alignment to cacheline size
Set the scratch allocation alignment to cacheline size specified by riscv,cbom-block-size in the DTS file to avoid two atomic variables from the same cache line causing livelock on some platforms. If the cacheline is not specified, we set it a default value. Signed-off-by: Raj Vishwanathan <Raj.Vishwanathan@gmail.com> Reviewed-by: Anup Patel <anup@brainfault.org> Reviewed-by: Samuel Holland <samuel.holland@sifive.com> Link: https://lore.kernel.org/r/20250423225045.267983-1-Raj.Vishwanathan@gmail.com Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:

committed by
Anup Patel

parent
4d0128ec58
commit
99aabc6b84
@@ -14,6 +14,8 @@
|
||||
#include <sbi/sbi_scratch.h>
|
||||
#include <sbi/sbi_string.h>
|
||||
|
||||
#define DEFAULT_SCRATCH_ALLOC_ALIGN __SIZEOF_POINTER__
|
||||
|
||||
u32 sbi_scratch_hart_count;
|
||||
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS] = { [0 ... SBI_HARTMASK_MAX_BITS-1] = -1U };
|
||||
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
|
||||
@@ -21,6 +23,19 @@ struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
|
||||
static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
|
||||
static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
|
||||
|
||||
/*
|
||||
* Get the alignment size.
|
||||
* Return DEFAULT_SCRATCH_ALLOC_ALIGNMENT or riscv,cbom_block_size
|
||||
*/
|
||||
static unsigned long sbi_get_scratch_alloc_align(void)
|
||||
{
|
||||
const struct sbi_platform *plat = sbi_platform_thishart_ptr();
|
||||
|
||||
if (!plat || !plat->cbom_block_size)
|
||||
return DEFAULT_SCRATCH_ALLOC_ALIGN;
|
||||
return plat->cbom_block_size;
|
||||
}
|
||||
|
||||
u32 sbi_hartid_to_hartindex(u32 hartid)
|
||||
{
|
||||
sbi_for_each_hartindex(i)
|
||||
@@ -57,6 +72,7 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
|
||||
void *ptr;
|
||||
unsigned long ret = 0;
|
||||
struct sbi_scratch *rscratch;
|
||||
unsigned long scratch_alloc_align = 0;
|
||||
|
||||
/*
|
||||
* We have a simple brain-dead allocator which never expects
|
||||
@@ -70,8 +86,14 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
size += __SIZEOF_POINTER__ - 1;
|
||||
size &= ~((unsigned long)__SIZEOF_POINTER__ - 1);
|
||||
scratch_alloc_align = sbi_get_scratch_alloc_align();
|
||||
|
||||
/*
|
||||
* We let the allocation align to cacheline bytes to avoid livelock on
|
||||
* certain platforms due to atomic variables from the same cache line.
|
||||
*/
|
||||
size += scratch_alloc_align - 1;
|
||||
size &= ~(scratch_alloc_align - 1);
|
||||
|
||||
spin_lock(&extra_lock);
|
||||
|
||||
|
@@ -246,6 +246,30 @@ int fdt_parse_hart_id(const void *fdt, int cpu_offset, u32 *hartid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fdt_parse_cbom_block_size(const void *fdt, int cpu_offset, unsigned long *cbom_block_size)
|
||||
{
|
||||
int len;
|
||||
const void *prop;
|
||||
const fdt32_t *val;
|
||||
|
||||
if (!fdt || cpu_offset < 0)
|
||||
return SBI_EINVAL;
|
||||
|
||||
prop = fdt_getprop(fdt, cpu_offset, "device_type", &len);
|
||||
if (!prop || !len)
|
||||
return SBI_EINVAL;
|
||||
if (strncmp (prop, "cpu", strlen ("cpu")))
|
||||
return SBI_EINVAL;
|
||||
|
||||
val = fdt_getprop(fdt, cpu_offset, "riscv,cbom-block-size", &len);
|
||||
if (!val || len < sizeof(fdt32_t))
|
||||
return SBI_EINVAL;
|
||||
|
||||
if (cbom_block_size)
|
||||
*cbom_block_size = fdt32_to_cpu(*val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fdt_parse_max_enabled_hart_id(const void *fdt, u32 *max_hartid)
|
||||
{
|
||||
u32 hartid;
|
||||
|
Reference in New Issue
Block a user