corrects vector_functions

This commit is contained in:
Eyck-Alexander Jentzsch 2025-02-09 17:49:56 +01:00
parent 2b85748279
commit c1f9328528
2 changed files with 19 additions and 14 deletions

View File

@ -92,15 +92,18 @@ vreg_view read_vmask(uint8_t* V, uint16_t VLEN, uint16_t elem_count, uint8_t reg
}
uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint64_t, uint8_t*)> load_store_fn, uint8_t* V, uint16_t VLEN,
uint8_t addressed_register, uint64_t base_addr, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm,
uint8_t elem_size_byte, uint64_t elem_count, int8_t EMUL_pow, uint8_t segment_size, int64_t stride) {
uint8_t elem_size_byte, uint64_t elem_count, int8_t EMUL_pow, uint8_t segment_size, int64_t stride,
bool use_stride) {
// eew = elem_size_byte * 8
assert(pow(2, EMUL_pow) * segment_size <= 8);
assert(segment_size > 0);
assert((elem_count & (elem_count - 1)) == 0); // check that elem_count is power of 2
// assert((elem_count & (elem_count - 1)) == 0); // check that elem_count is power of 2, this check does not hold for vlm.v and vsm.v
assert(elem_count <= VLEN * RFS / 8);
unsigned emul_stride = EMUL_pow <= 0 ? 1 : pow(2, EMUL_pow);
assert(emul_stride * segment_size <= 8);
assert(!(addressed_register % emul_stride));
if(!use_stride)
stride = elem_size_byte * segment_size;
vreg_view mask_view = read_vmask(V, VLEN, elem_count, 0);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
@ -113,7 +116,7 @@ uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint6
// base + selected register + current_elem + current_segment
uint8_t* addressed_elem = V + (addressed_register * VLEN / 8) + (elem_size_byte * idx) + (VLEN / 8 * s_idx * emul_stride);
assert(addressed_elem <= V + VLEN * RFS / 8);
uint64_t addr = base_addr + (elem_size_byte) * (idx * segment_size + s_idx) * stride;
uint64_t addr = base_addr + stride * idx + s_idx * elem_size_byte;
if(!load_store_fn(core, addr, elem_size_byte, addressed_elem))
return trap_idx;
}
@ -129,7 +132,8 @@ uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint6
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
// elements w/ index larger than vl are in the tail
for(unsigned idx = std::min(elem_count, vl); idx < VLEN / 8; idx++) {
unsigned maximum_elems = VLEN * vtype.lmul() / (elem_size_byte * 8);
for(unsigned idx = std::min(elem_count, vl); idx < maximum_elems; idx++) {
for(unsigned s_idx = 0; s_idx < segment_size; s_idx++) {
// base + selected register + current_elem + current_segment
uint8_t* addressed_elem = V + (addressed_register * VLEN / 8) + (elem_size_byte * idx) + (VLEN / 8 * s_idx * emul_stride);
@ -140,16 +144,16 @@ uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint6
}
return 0;
}
int64_t read_n_bits(uint8_t* V, unsigned n) {
uint64_t read_n_bits(uint8_t* V, unsigned n) {
switch(n) {
case 8:
return static_cast<int64_t>(*reinterpret_cast<int8_t*>(V));
return *reinterpret_cast<uint8_t*>(V);
case 16:
return static_cast<int64_t>(*reinterpret_cast<int16_t*>(V));
return *reinterpret_cast<uint16_t*>(V);
case 32:
return static_cast<int64_t>(*reinterpret_cast<int32_t*>(V));
return *reinterpret_cast<uint32_t*>(V);
case 64:
return static_cast<int64_t>(*reinterpret_cast<int64_t*>(V));
return *reinterpret_cast<uint64_t*>(V);
default:
throw new std::invalid_argument("Invalid arg in read_n_bits");
}
@ -165,10 +169,9 @@ uint64_t vector_load_store_index(void* core, std::function<bool(void*, uint64_t,
assert(segment_size > 0);
assert((elem_count & (elem_count - 1)) == 0); // check that elem_count is power of 2
assert(elem_count <= VLEN * RFS / 8);
unsigned data_emul_stride = vtype.lmul() < 0 ? 0 : vtype.lmul();
unsigned data_emul_stride = vtype.lmul() < 1 ? 1 : vtype.lmul();
assert(data_emul_stride * segment_size <= 8);
unsigned data_elem_size_byte = vtype.sew() / 8;
assert(!(addressed_register % data_emul_stride));
vreg_view mask_view = read_vmask(V, VLEN, elem_count, 0);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
@ -180,7 +183,7 @@ uint64_t vector_load_store_index(void* core, std::function<bool(void*, uint64_t,
uint8_t* offset_elem = V + (index_register * VLEN / 8) + (index_elem_size_byte * idx);
assert(offset_elem <= (V + VLEN * RFS / 8 - index_elem_size_byte)); // ensure reading index_elem_size_bytes is legal
// read sew bits from offset_elem truncate / extend to XLEN bits
int64_t offset_val = read_n_bits(offset_elem, index_elem_size_byte * 8);
uint64_t offset_val = read_n_bits(offset_elem, index_elem_size_byte * 8);
assert(XLEN == 64 | XLEN == 32);
uint64_t mask = XLEN == 64 ? std::numeric_limits<uint64_t>::max() : std::numeric_limits<uint32_t>::max();
unsigned index_offset = offset_val & mask;
@ -207,7 +210,8 @@ uint64_t vector_load_store_index(void* core, std::function<bool(void*, uint64_t,
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
// elements w/ index larger than vl are in the tail
for(unsigned idx = std::min(elem_count, vl); idx < VLEN / 8; idx++) {
unsigned maximum_elems = VLEN * vtype.lmul() / (data_elem_size_byte * 8);
for(unsigned idx = std::min(elem_count, vl); idx < maximum_elems; idx++) {
for(unsigned s_idx = 0; s_idx < segment_size; s_idx++) {
// base + selected register + current_elem + current_segment
uint8_t* addressed_elem =

View File

@ -56,7 +56,8 @@ bool softvec_read(void* core, uint64_t addr, uint64_t length, uint8_t* data);
bool softvec_write(void* core, uint64_t addr, uint64_t length, uint8_t* data);
uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint64_t, uint8_t*)> load_store_fn, uint8_t* V, uint16_t VLEN,
uint8_t addressed_register, uint64_t base_addr, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm,
uint8_t elem_size_byte, uint64_t elem_count, int8_t EMUL_pow, uint8_t segment_size = 1, int64_t stride = 1);
uint8_t elem_size_byte, uint64_t elem_count, int8_t EMUL_pow, uint8_t segment_size = 1, int64_t stride = 0,
bool use_stride = false);
uint64_t vector_load_store_index(void* core, std::function<bool(void*, uint64_t, uint64_t, uint8_t*)> load_store_fn, uint8_t* V,
uint16_t VLEN, uint8_t XLEN, uint8_t addressed_register, uint8_t index_register, uint64_t base_addr,
uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, uint8_t elem_size_byte, uint64_t elem_count,