lib: sbi: give platform choice of using single memregion to cover OpenSBI

By default the OpenSBI itself is covered by 2 memregions for RX/RW
sections. This is required by platforms with Smepmp to enforce
proper permissions in M mode. Note: M-mode only regions can't
have RWX permissions with Smepmp. Platforms with traditional PMPs
won't be able to benefit from it, as both regions are effectively
RWX in M mode, but usually it's harmless to so. Now we provide
these platforms with an option to disable this logic. It saves 1
PMP entry. For platforms really in short of PMPs, it does make a
difference.

Note: Platform requesting single OpenSBI memregion must be using
      traditional (old) PMP. We expect the platform code to do
      the right thing.

Signed-off-by: Bo Gan <ganboing@gmail.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251218104243.562667-5-ganboing@gmail.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Bo Gan
2025-12-18 02:42:40 -08:00
committed by Anup Patel
parent ed1deab09e
commit 878c2676e6
2 changed files with 44 additions and 11 deletions

View File

@@ -76,6 +76,9 @@ struct sbi_platform_operations {
/* Check if specified HART is allowed to do cold boot */ /* Check if specified HART is allowed to do cold boot */
bool (*cold_boot_allowed)(u32 hartid); bool (*cold_boot_allowed)(u32 hartid);
/* Check if platform requires single firmware region */
bool (*single_fw_region)(void);
/* Platform nascent initialization */ /* Platform nascent initialization */
int (*nascent_init)(void); int (*nascent_init)(void);
@@ -347,6 +350,24 @@ static inline bool sbi_platform_cold_boot_allowed(
return true; return true;
} }
/**
* Check whether platform requires single firmware region
*
* Note: Single firmware region only works with legacy PMP because with
* Smepmp M-mode only regions can't have RWX permissions.
*
* @param plat pointer to struct sbi_platform
*
* @return true if single firmware region required and false otherwise
*/
static inline bool sbi_platform_single_fw_region(
const struct sbi_platform *plat)
{
if (plat && sbi_platform_ops(plat)->single_fw_region)
return sbi_platform_ops(plat)->single_fw_region();
return false;
}
/** /**
* Nascent (very early) initialization for current HART * Nascent (very early) initialization for current HART
* *

View File

@@ -923,18 +923,30 @@ int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
root.possible_harts = root_hmask; root.possible_harts = root_hmask;
/* Root domain firmware memory region */ /* Root domain firmware memory region */
sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset, if (sbi_platform_single_fw_region(sbi_platform_ptr(scratch))) {
(SBI_DOMAIN_MEMREGION_M_READABLE | sbi_domain_memregion_init(scratch->fw_start, scratch->fw_size,
SBI_DOMAIN_MEMREGION_M_EXECUTABLE | (SBI_DOMAIN_MEMREGION_M_READABLE |
SBI_DOMAIN_MEMREGION_FW), SBI_DOMAIN_MEMREGION_M_WRITABLE |
&root_memregs[root_memregs_count++]); SBI_DOMAIN_MEMREGION_M_EXECUTABLE |
SBI_DOMAIN_MEMREGION_FW),
&root_memregs[root_memregs_count++]);
} else {
sbi_domain_memregion_init(scratch->fw_start,
scratch->fw_rw_offset,
(SBI_DOMAIN_MEMREGION_M_READABLE |
SBI_DOMAIN_MEMREGION_M_EXECUTABLE |
SBI_DOMAIN_MEMREGION_FW),
&root_memregs[root_memregs_count++]);
sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset), sbi_domain_memregion_init((scratch->fw_start +
(scratch->fw_size - scratch->fw_rw_offset), scratch->fw_rw_offset),
(SBI_DOMAIN_MEMREGION_M_READABLE | (scratch->fw_size -
SBI_DOMAIN_MEMREGION_M_WRITABLE | scratch->fw_rw_offset),
SBI_DOMAIN_MEMREGION_FW), (SBI_DOMAIN_MEMREGION_M_READABLE |
&root_memregs[root_memregs_count++]); SBI_DOMAIN_MEMREGION_M_WRITABLE |
SBI_DOMAIN_MEMREGION_FW),
&root_memregs[root_memregs_count++]);
}
root.fw_region_inited = true; root.fw_region_inited = true;