lib: sbi_hart: Detect number of supported PMP regions

It is not mandatory for a RISC-V systems to implement all PMP
regions so we have to check all PMPADDRx CSRs to determine excat
number of supported PMP regions.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
This commit is contained in:
Anup Patel
2020-05-18 12:04:18 +05:30
committed by Anup Patel
parent 2966510eed
commit a38bea9341
4 changed files with 103 additions and 52 deletions

View File

@@ -38,6 +38,10 @@ static inline ulong sbi_hart_expected_trap_addr(void)
}
void sbi_hart_delegation_dump(struct sbi_scratch *scratch);
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch);
int sbi_hart_pmp_get(struct sbi_scratch *scratch, unsigned int n,
unsigned long *prot_out, unsigned long *addr_out,
unsigned long *size);
void sbi_hart_pmp_dump(struct sbi_scratch *scratch);
int sbi_hart_pmp_check_addr(struct sbi_scratch *scratch, unsigned long daddr,
unsigned long attr);

View File

@@ -24,6 +24,11 @@ extern void __sbi_expected_trap(void);
extern void __sbi_expected_trap_hext(void);
void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
struct hart_features {
unsigned long features;
unsigned int pmp_count;
};
static unsigned long hart_features_offset;
static void mstatus_init(struct sbi_scratch *scratch, u32 hartid)
@@ -125,15 +130,34 @@ void sbi_hart_delegation_dump(struct sbi_scratch *scratch)
#endif
}
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
{
struct hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_count;
}
int sbi_hart_pmp_get(struct sbi_scratch *scratch, unsigned int n,
unsigned long *prot_out, unsigned long *addr_out,
unsigned long *size)
{
if (sbi_hart_pmp_count(scratch) <= n)
return SBI_EINVAL;
return pmp_get(n, prot_out, addr_out, size);
}
void sbi_hart_pmp_dump(struct sbi_scratch *scratch)
{
unsigned long prot, addr, size;
unsigned int i;
unsigned int i, pmp_count;
if (!sbi_hart_has_feature(scratch, SBI_HART_HAS_PMP))
return;
for (i = 0; i < PMP_COUNT; i++) {
pmp_count = sbi_hart_pmp_count(scratch);
for (i = 0; i < pmp_count; i++) {
pmp_get(i, &prot, &addr, &size);
if (!(prot & PMP_A))
continue;
@@ -158,12 +182,14 @@ void sbi_hart_pmp_dump(struct sbi_scratch *scratch)
int sbi_hart_pmp_check_addr(struct sbi_scratch *scratch, unsigned long addr,
unsigned long attr)
{
unsigned long prot, size, i, tempaddr;
unsigned long prot, size, tempaddr;
unsigned int i, pmp_count;
if (!sbi_hart_has_feature(scratch, SBI_HART_HAS_PMP))
return SBI_OK;
for (i = 0; i < PMP_COUNT; i++) {
pmp_count = sbi_hart_pmp_count(scratch);
for (i = 0; i < pmp_count; i++) {
pmp_get(i, &prot, &tempaddr, &size);
if (!(prot & PMP_A))
continue;
@@ -177,7 +203,7 @@ int sbi_hart_pmp_check_addr(struct sbi_scratch *scratch, unsigned long addr,
static int pmp_init(struct sbi_scratch *scratch, u32 hartid)
{
u32 i, pmp_idx = 0, count;
u32 i, pmp_idx = 0, pmp_count, count;
unsigned long fw_start, fw_size_log2;
ulong prot, addr, log2size;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
@@ -192,7 +218,8 @@ static int pmp_init(struct sbi_scratch *scratch, u32 hartid)
/* Platform specific PMP regions */
count = sbi_platform_pmp_region_count(plat, hartid);
for (i = 0; i < count && pmp_idx < (PMP_COUNT - 1); i++) {
pmp_count = sbi_hart_pmp_count(scratch);
for (i = 0; i < count && pmp_idx < (pmp_count - 1); i++) {
if (sbi_platform_pmp_region_info(plat, hartid, i, &prot, &addr,
&log2size))
continue;
@@ -219,11 +246,10 @@ static int pmp_init(struct sbi_scratch *scratch, u32 hartid)
*/
bool sbi_hart_has_feature(struct sbi_scratch *scratch, unsigned long feature)
{
unsigned long *hart_features;
struct hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
hart_features = sbi_scratch_offset_ptr(scratch, hart_features_offset);
if (*hart_features & feature)
if (hfeatures->features & feature)
return true;
else
return false;
@@ -231,11 +257,10 @@ bool sbi_hart_has_feature(struct sbi_scratch *scratch, unsigned long feature)
static unsigned long hart_get_features(struct sbi_scratch *scratch)
{
unsigned long *hart_features;
struct hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
hart_features = sbi_scratch_offset_ptr(scratch, hart_features_offset);
return *hart_features;
return hfeatures->features;
}
static inline char *sbi_hart_feature_id2string(unsigned long feature)
@@ -311,65 +336,84 @@ done:
static void hart_detect_features(struct sbi_scratch *scratch)
{
struct sbi_trap_info trap = {0};
unsigned long *hart_features;
unsigned long csr_val;
struct hart_features *hfeatures;
unsigned long val;
hart_features = sbi_scratch_offset_ptr(scratch, hart_features_offset);
/* Reset hart features */
hfeatures = sbi_scratch_offset_ptr(scratch, hart_features_offset);
hfeatures->features = 0;
hfeatures->pmp_count = 0;
/* Detect if hart supports PMP feature */
csr_val = csr_read_allowed(CSR_PMPCFG0, (unsigned long)&trap);
if (!trap.cause) {
csr_write_allowed(CSR_PMPCFG0, (unsigned long)&trap, csr_val);
if (!trap.cause)
*hart_features |= SBI_HART_HAS_PMP;
#define __detect_pmp(__pmp_csr) \
val = csr_read_allowed(__pmp_csr, (ulong)&trap); \
if (!trap.cause) { \
csr_write_allowed(__pmp_csr, (ulong)&trap, val);\
if (!trap.cause) \
hfeatures->pmp_count++; \
}
__detect_pmp(CSR_PMPADDR0);
__detect_pmp(CSR_PMPADDR1);
__detect_pmp(CSR_PMPADDR2);
__detect_pmp(CSR_PMPADDR3);
__detect_pmp(CSR_PMPADDR4);
__detect_pmp(CSR_PMPADDR5);
__detect_pmp(CSR_PMPADDR6);
__detect_pmp(CSR_PMPADDR7);
__detect_pmp(CSR_PMPADDR8);
__detect_pmp(CSR_PMPADDR9);
__detect_pmp(CSR_PMPADDR10);
__detect_pmp(CSR_PMPADDR11);
__detect_pmp(CSR_PMPADDR12);
__detect_pmp(CSR_PMPADDR13);
__detect_pmp(CSR_PMPADDR14);
__detect_pmp(CSR_PMPADDR15);
#undef __detect_pmp
/* Set hart PMP feature if we have at least one PMP region */
if (hfeatures->pmp_count)
hfeatures->features |= SBI_HART_HAS_PMP;
/* Detect if hart supports SCOUNTEREN feature */
trap.cause = 0;
csr_val = csr_read_allowed(CSR_SCOUNTEREN, (unsigned long)&trap);
val = csr_read_allowed(CSR_SCOUNTEREN, (unsigned long)&trap);
if (!trap.cause) {
csr_write_allowed(CSR_SCOUNTEREN, (unsigned long)&trap,
csr_val);
csr_write_allowed(CSR_SCOUNTEREN, (unsigned long)&trap, val);
if (!trap.cause)
*hart_features |= SBI_HART_HAS_SCOUNTEREN;
hfeatures->features |= SBI_HART_HAS_SCOUNTEREN;
}
/* Detect if hart supports MCOUNTEREN feature */
trap.cause = 0;
csr_val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
if (!trap.cause) {
csr_write_allowed(CSR_MCOUNTEREN, (unsigned long)&trap,
csr_val);
csr_write_allowed(CSR_MCOUNTEREN, (unsigned long)&trap, val);
if (!trap.cause)
*hart_features |= SBI_HART_HAS_MCOUNTEREN;
hfeatures->features |= SBI_HART_HAS_MCOUNTEREN;
}
/* Detect if hart supports time CSR */
trap.cause = 0;
csr_read_allowed(CSR_TIME, (unsigned long)&trap);
if (!trap.cause)
*hart_features |= SBI_HART_HAS_TIME;
hfeatures->features |= SBI_HART_HAS_TIME;
}
int sbi_hart_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
{
int rc;
unsigned long *hart_features;
if (cold_boot) {
if (misa_extension('H'))
sbi_hart_expected_trap = &__sbi_expected_trap_hext;
hart_features_offset = sbi_scratch_alloc_offset(
sizeof(*hart_features),
sizeof(struct hart_features),
"HART_FEATURES");
if (!hart_features_offset)
return SBI_ENOMEM;
}
hart_features = sbi_scratch_offset_ptr(scratch, hart_features_offset);
*hart_features = 0;
hart_detect_features(scratch);
mstatus_init(scratch, hartid);

View File

@@ -68,6 +68,7 @@ static void sbi_boot_prints(struct sbi_scratch *scratch, u32 hartid)
sbi_printf("Boot HART ISA : %s\n", str);
sbi_hart_get_features_str(scratch, str, sizeof(str));
sbi_printf("BOOT HART Features : %s\n", str);
sbi_printf("BOOT HART PMP Count : %d\n", sbi_hart_pmp_count(scratch));
/* Firmware details */
sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start);

View File

@@ -8,7 +8,6 @@
*/
#include <libfdt.h>
#include <sbi/riscv_asm.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_math.h>
#include <sbi/sbi_hart.h>
@@ -183,6 +182,15 @@ int fdt_reserved_memory_fixup(void *fdt)
return err;
}
/*
* We assume the given device tree does not contain any memory region
* child node protected by PMP. Normally PMP programming happens at
* M-mode firmware. The memory space used by OpenSBI is protected.
* Some additional memory spaces may be protected by platform codes.
*
* With above assumption, we create child nodes directly.
*/
if (!sbi_hart_has_feature(scratch, SBI_HART_HAS_PMP)) {
/*
* Update the DT with firmware start & size even if PMP is not
@@ -193,24 +201,18 @@ int fdt_reserved_memory_fixup(void *fdt)
size = (1UL << log2roundup(scratch->fw_size));
return fdt_resv_memory_update_node(fdt, addr, size, 0, parent);
}
/*
* We assume the given device tree does not contain any memory region
* child node protected by PMP. Normally PMP programming happens at
* M-mode firmware. The memory space used by OpenSBI is protected.
* Some additional memory spaces may be protected by platform codes.
*
* With above assumption, we create child nodes directly.
*/
for (i = 0, j = 0; i < PMP_COUNT; i++) {
pmp_get(i, &prot, &addr, &size);
for (i = 0, j = 0; i < sbi_hart_pmp_count(scratch); i++) {
err = sbi_hart_pmp_get(scratch, i, &prot, &addr, &size);
if (err)
continue;
if (!(prot & PMP_A))
continue;
if (!(prot & (PMP_R | PMP_W | PMP_X))) {
return fdt_resv_memory_update_node(fdt, addr, size,
j, parent);
j++;
}
if (prot & (PMP_R | PMP_W | PMP_X))
continue;
fdt_resv_memory_update_node(fdt, addr, size, j, parent);
j++;
}
return 0;