lib: sbi: Factor-out PMP programming into separate sources

The PMP programming is a significant part of sbi_hart.c so factor-out
this into separate sources sbi_hart_pmp.c and sbi_hart_pmp.h for better
maintainability.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Link: https://lore.kernel.org/r/20251209135235.423391-6-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Anup Patel
2025-12-09 19:22:35 +05:30
committed by Anup Patel
parent 42139bb9b7
commit 5eec86eec8
6 changed files with 382 additions and 351 deletions

View File

@@ -13,26 +13,21 @@
#include <sbi/riscv_fp.h>
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_domain.h>
#include <sbi/sbi_csr_detect.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_math.h>
#include <sbi/sbi_hart_pmp.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h>
#include <sbi/sbi_string.h>
#include <sbi/sbi_trap.h>
#include <sbi/sbi_hfence.h>
extern void __sbi_expected_trap(void);
extern void __sbi_expected_trap_hext(void);
void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
static unsigned long hart_features_offset;
static DECLARE_BITMAP(fw_smepmp_ids, PMP_COUNT);
static bool fw_smepmp_ids_inited;
unsigned long hart_features_offset;
static void mstatus_init(struct sbi_scratch *scratch)
{
@@ -277,30 +272,6 @@ unsigned int sbi_hart_mhpm_mask(struct sbi_scratch *scratch)
return hfeatures->mhpm_mask;
}
unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_count;
}
unsigned int sbi_hart_pmp_log2gran(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_log2gran;
}
unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
sbi_scratch_offset_ptr(scratch, hart_features_offset);
return hfeatures->pmp_addr_bits;
}
unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
@@ -309,307 +280,6 @@ unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
return hfeatures->mhpm_bits;
}
bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx)
{
if (!fw_smepmp_ids_inited)
return false;
return bitmap_test(fw_smepmp_ids, pmp_idx) ? true : false;
}
static void sbi_hart_pmp_fence(void)
{
/*
* As per section 3.7.2 of privileged specification v1.12,
* virtual address translations can be speculatively performed
* (even before actual access). These, along with PMP traslations,
* can be cached. This can pose a problem with CPU hotplug
* and non-retentive suspend scenario because PMP states are
* not preserved.
* It is advisable to flush the caching structures under such
* conditions.
*/
if (misa_extension('S')) {
__asm__ __volatile__("sfence.vma");
/*
* If hypervisor mode is supported, flush caching
* structures in guest mode too.
*/
if (misa_extension('H'))
__sbi_hfence_gvma_all();
}
}
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
struct sbi_domain *dom,
struct sbi_domain_memregion *reg,
unsigned int pmp_idx,
unsigned int pmp_flags,
unsigned int pmp_log2gran,
unsigned long pmp_addr_max)
{
unsigned long pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
static bool is_valid_pmp_idx(unsigned int pmp_count, unsigned int pmp_idx)
{
if (pmp_count > pmp_idx)
return true;
sbi_printf("error: insufficient PMP entries\n");
return false;
}
static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_log2gran, pmp_bits;
unsigned int pmp_idx, pmp_count;
unsigned long pmp_addr_max;
unsigned int pmp_flags;
pmp_count = sbi_hart_pmp_count(scratch);
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
/*
* Set the RLB so that, we can write to PMP entries without
* enforcement even if some entries are locked.
*/
csr_set(CSR_MSECCFG, MSECCFG_RLB);
/* Disable the reserved entry */
pmp_disable(SBI_SMEPMP_RESV_ENTRY);
/* Program M-only regions when MML is not set. */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
/* Skip shared and SU-only regions */
if (!SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
/*
* Track firmware PMP entries to preserve them during
* domain switches. Under SmePMP, M-mode requires
* explicit PMP entries to access firmware code/data.
* These entries must remain enabled across domain
* context switches to prevent M-mode access faults.
*/
if (SBI_DOMAIN_MEMREGION_IS_FIRMWARE(reg->flags)) {
if (fw_smepmp_ids_inited) {
/* Check inconsistent firmware region */
if (!sbi_hart_smepmp_is_fw_region(pmp_idx))
return SBI_EINVAL;
} else {
bitmap_set(fw_smepmp_ids, pmp_idx, 1);
}
}
pmp_flags = sbi_domain_get_smepmp_flags(reg);
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
fw_smepmp_ids_inited = true;
/* Set the MML to enforce new encoding */
csr_set(CSR_MSECCFG, MSECCFG_MML);
/* Program shared and SU-only regions */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
pmp_idx++;
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
/* Skip M-only regions */
if (SBI_DOMAIN_MEMREGION_M_ONLY_ACCESS(reg->flags)) {
pmp_idx++;
continue;
}
pmp_flags = sbi_domain_get_smepmp_flags(reg);
sbi_hart_smepmp_set(scratch, dom, reg, pmp_idx++, pmp_flags,
pmp_log2gran, pmp_addr_max);
}
/*
* All entries are programmed.
* Keep the RLB bit so that dynamic mappings can be done.
*/
sbi_hart_pmp_fence();
return 0;
}
static int sbi_hart_smepmp_map_range(struct sbi_scratch *scratch,
unsigned long addr, unsigned long size)
{
/* shared R/W access for M and S/U mode */
unsigned int pmp_flags = (PMP_W | PMP_X);
unsigned long order, base = 0;
if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
return SBI_ENOSPC;
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
order <= __riscv_xlen; order++) {
if (order < __riscv_xlen) {
base = addr & ~((1UL << order) - 1UL);
if ((base <= addr) &&
(addr < (base + (1UL << order))) &&
(base <= (addr + size - 1UL)) &&
((addr + size - 1UL) < (base + (1UL << order))))
break;
} else {
return SBI_EFAIL;
}
}
sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
pmp_flags, base, order);
pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
return SBI_OK;
}
static int sbi_hart_smepmp_unmap_range(struct sbi_scratch *scratch,
unsigned long addr, unsigned long size)
{
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
}
static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned long pmp_addr, pmp_addr_max;
unsigned int pmp_log2gran, pmp_bits;
unsigned int pmp_idx, pmp_count;
unsigned int pmp_flags;
pmp_count = sbi_hart_pmp_count(scratch);
pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
pmp_flags = 0;
/*
* If permissions are to be enforced for all modes on
* this region, the lock bit should be set.
*/
if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
pmp_flags |= PMP_L;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
pmp_flags |= PMP_R;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
pmp_flags |= PMP_W;
if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
pmp_flags |= PMP_X;
pmp_addr = reg->base >> PMP_SHIFT;
if (pmp_log2gran <= reg->order && pmp_addr < pmp_addr_max) {
sbi_platform_pmp_set(sbi_platform_ptr(scratch),
pmp_idx, reg->flags, pmp_flags,
reg->base, reg->order);
pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
} else {
sbi_printf("Can not configure pmp for domain %s because"
" memory region address 0x%lx or size 0x%lx "
"is not in range.\n", dom->name, reg->base,
reg->order);
}
}
sbi_hart_pmp_fence();
return 0;
}
static void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
{
int i, pmp_count = sbi_hart_pmp_count(scratch);
for (i = 0; i < pmp_count; i++) {
/* Don't revoke firmware access permissions */
if (sbi_hart_smepmp_is_fw_region(i))
continue;
sbi_platform_pmp_disable(sbi_platform_ptr(scratch), i);
pmp_disable(i);
}
}
static struct sbi_hart_protection pmp_protection = {
.name = "pmp",
.rating = 100,
.configure = sbi_hart_oldpmp_configure,
.unconfigure = sbi_hart_pmp_unconfigure,
};
static struct sbi_hart_protection epmp_protection = {
.name = "epmp",
.rating = 200,
.configure = sbi_hart_smepmp_configure,
.unconfigure = sbi_hart_pmp_unconfigure,
.map_range = sbi_hart_smepmp_map_range,
.unmap_range = sbi_hart_smepmp_unmap_range,
};
static int sbi_hart_pmp_init(struct sbi_scratch *scratch)
{
int rc;
if (sbi_hart_pmp_count(scratch)) {
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) {
rc = sbi_hart_protection_register(&epmp_protection);
if (rc)
return rc;
} else {
rc = sbi_hart_protection_register(&pmp_protection);
if (rc)
return rc;
}
}
return 0;
}
int sbi_hart_priv_version(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =