lib: sbi_pmu: Before using we should ensure PMU init done

If trap earlier before sbi_pmu_init done, some path would call
sbi_pmu_ctr_incr_fw, then it would go wrong:
1. if phs_ptr_offset is zero, then it get a wrong pmu state ptr
2. if phs_ptr_offset is ok, but we didn't call pmu_set_hart_state_ptr
it would be NULL POINT

Of course, the above situation will not occur at present, but it is
reasonable to check before using.

Signed-off-by: yang.zhang <yang.zhang@hexintek.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
yang.zhang
2024-02-01 10:01:26 +08:00
committed by Anup Patel
parent 46c8c6582d
commit 88273fe19e

View File

@@ -74,7 +74,7 @@ struct sbi_pmu_hart_state {
static unsigned long phs_ptr_offset;
#define pmu_get_hart_state_ptr(__scratch) \
sbi_scratch_read_type((__scratch), void *, phs_ptr_offset)
phs_ptr_offset ? sbi_scratch_read_type((__scratch), void *, phs_ptr_offset) : NULL
#define pmu_thishart_state_ptr() \
pmu_get_hart_state_ptr(sbi_scratch_thishart_ptr())
@@ -207,6 +207,9 @@ int sbi_pmu_ctr_fw_read(uint32_t cidx, uint64_t *cval)
uint32_t event_code;
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
if (unlikely(!phs))
return SBI_EINVAL;
event_idx_type = pmu_ctr_validate(phs, cidx, &event_code);
if (event_idx_type != SBI_PMU_EVENT_TYPE_FW)
return SBI_EINVAL;
@@ -432,6 +435,10 @@ int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
unsigned long flags, uint64_t ival)
{
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
if (unlikely(!phs))
return SBI_EINVAL;
int event_idx_type;
uint32_t event_code;
int ret = SBI_EINVAL;
@@ -535,6 +542,10 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
unsigned long flag)
{
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
if (unlikely(!phs))
return SBI_EINVAL;
int ret = SBI_EINVAL;
int event_idx_type;
uint32_t event_code;
@@ -794,6 +805,10 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
uint64_t event_data)
{
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
if (unlikely(!phs))
return SBI_EINVAL;
int ret, event_type, ctr_idx = SBI_ENOTSUPP;
u32 event_code;
@@ -869,6 +884,9 @@ int sbi_pmu_ctr_incr_fw(enum sbi_pmu_fw_event_code_id fw_id)
uint64_t *fcounter = NULL;
struct sbi_pmu_hart_state *phs = pmu_thishart_state_ptr();
if (unlikely(!phs))
return 0;
if (likely(!phs->fw_counters_started))
return 0;
@@ -961,13 +979,18 @@ void sbi_pmu_set_device(const struct sbi_pmu_device *dev)
void sbi_pmu_exit(struct sbi_scratch *scratch)
{
struct sbi_pmu_hart_state *phs = pmu_get_hart_state_ptr(scratch);
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8);
if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
csr_write(CSR_MCOUNTEREN, -1);
pmu_reset_event_map(pmu_get_hart_state_ptr(scratch));
if (unlikely(!phs))
return;
pmu_reset_event_map(phs);
}
int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)