lib: sbi: Allow forceful queueing of data in sbi_fifo_enqueue()

Extend sbi_fifo_enqueue() to allow forceful queueing by droping
data from the tail.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-By: Himanshu Chauhan <hchauhan@ventanamicro.com>
This commit is contained in:
Anup Patel
2024-07-04 22:37:44 +05:30
committed by Anup Patel
parent 9a275fc153
commit 94c3c53a56
4 changed files with 45 additions and 31 deletions

View File

@@ -30,7 +30,7 @@ enum sbi_fifo_inplace_update_types {
};
int sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data);
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data);
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data, bool force);
void sbi_fifo_init(struct sbi_fifo *fifo, void *queue_mem, u16 entries,
u16 entry_size);
int sbi_fifo_is_empty(struct sbi_fifo *fifo);

View File

@@ -90,6 +90,39 @@ static inline void __sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
fifo->avail++;
}
/* Note: must be called with fifo->qlock held */
static inline void __sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data)
{
if (!data)
goto skip_data_copy;
switch (fifo->entry_size) {
case 1:
*(char *)data = *(char *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
case 2:
*(u16 *)data = *(u16 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
case 4:
*(u32 *)data = *(u32 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
#if __riscv_xlen > 32
case 8:
*(u64 *)data = *(u64 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
#endif
default:
sbi_memcpy(data, fifo->queue + (u32)fifo->tail * fifo->entry_size,
fifo->entry_size);
break;
}
skip_data_copy:
fifo->avail--;
fifo->tail++;
if (fifo->tail >= fifo->num_entries)
fifo->tail = 0;
}
/* Note: must be called with fifo->qlock held */
static inline bool __sbi_fifo_is_empty(struct sbi_fifo *fifo)
@@ -173,7 +206,7 @@ int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
return ret;
}
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data, bool force)
{
if (!fifo || !data)
return SBI_EINVAL;
@@ -181,9 +214,13 @@ int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
spin_lock(&fifo->qlock);
if (__sbi_fifo_is_full(fifo)) {
spin_unlock(&fifo->qlock);
return SBI_ENOSPC;
if (!force) {
spin_unlock(&fifo->qlock);
return SBI_ENOSPC;
}
__sbi_fifo_dequeue(fifo, NULL);
}
__sbi_fifo_enqueue(fifo, data);
spin_unlock(&fifo->qlock);
@@ -203,31 +240,7 @@ int sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data)
return SBI_ENOENT;
}
switch (fifo->entry_size) {
case 1:
*(char *)data = *(char *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
case 2:
*(u16 *)data = *(u16 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
case 4:
*(u32 *)data = *(u32 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
#if __riscv_xlen > 32
case 8:
*(u64 *)data = *(u64 *)(fifo->queue + (u32)fifo->tail * fifo->entry_size);
break;
#endif
default:
sbi_memcpy(data, fifo->queue + (u32)fifo->tail * fifo->entry_size,
fifo->entry_size);
break;
}
fifo->avail--;
fifo->tail++;
if (fifo->tail >= fifo->num_entries)
fifo->tail = 0;
__sbi_fifo_dequeue(fifo, data);
spin_unlock(&fifo->qlock);

View File

@@ -667,7 +667,7 @@ static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id)
sse_inject_fifo_r =
sbi_scratch_offset_ptr(remote_scratch, sse_inject_fifo_off);
ret = sbi_fifo_enqueue(sse_inject_fifo_r, &evt);
ret = sbi_fifo_enqueue(sse_inject_fifo_r, &evt, false);
if (ret)
return SBI_EFAIL;

View File

@@ -351,7 +351,8 @@ static int tlb_update(struct sbi_scratch *scratch,
ret = sbi_fifo_inplace_update(tlb_fifo_r, data, tlb_update_cb);
if (ret == SBI_FIFO_UNCHANGED && sbi_fifo_enqueue(tlb_fifo_r, data) < 0) {
if (ret == SBI_FIFO_UNCHANGED &&
sbi_fifo_enqueue(tlb_fifo_r, data, false) < 0) {
/**
* For now, Busy loop until there is space in the fifo.
* There may be case where target hart is also