136 lines
6.4 KiB
C++
136 lines
6.4 KiB
C++
////////////////////////////////////////////////////////////////////////////////
|
|
// Copyright (C) 2025, MINRES Technologies GmbH
|
|
// All rights reserved.
|
|
//
|
|
// Redistribution and use in source and binary forms, with or without
|
|
// modification, are permitted provided that the following conditions are met:
|
|
//
|
|
// 1. Redistributions of source code must retain the above copyright notice,
|
|
// this list of conditions and the following disclaimer.
|
|
//
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
// and/or other materials provided with the distribution.
|
|
//
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors
|
|
// may be used to endorse or promote products derived from this software
|
|
// without specific prior written permission.
|
|
//
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
// POSSIBILITY OF SUCH DAMAGE.
|
|
//
|
|
// Contributors:
|
|
// alex@minres.com - initial API and implementation
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
#include "vector_functions.h"
|
|
#include "iss/vm_types.h"
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <functional>
|
|
#include <limits>
|
|
#include <math.h>
|
|
#include <nonstd/variant.hpp>
|
|
|
|
namespace softvector {
|
|
|
|
bool softvec_read(void* core, uint64_t addr, uint64_t length, uint8_t* data) {
|
|
iss::status status = static_cast<iss::arch_if*>(core)->read(iss::address_type::PHYSICAL, iss::access_type::READ,
|
|
0 /*traits<ARCH>::MEM*/, addr, length, data);
|
|
return status == iss::Ok;
|
|
}
|
|
bool softvec_write(void* core, uint64_t addr, uint64_t length, uint8_t* data) {
|
|
iss::status status = static_cast<iss::arch_if*>(core)->write(iss::address_type::PHYSICAL, iss::access_type::READ,
|
|
0 /*traits<ARCH>::MEM*/, addr, length, data);
|
|
return status == iss::Ok;
|
|
}
|
|
|
|
using vlen_t = uint64_t;
|
|
struct vreg_view {
|
|
uint8_t* start;
|
|
size_t size;
|
|
template <typename T> T& get(size_t idx = 0) {
|
|
assert((idx * sizeof(T)) <= size);
|
|
return *(reinterpret_cast<T*>(start) + idx);
|
|
}
|
|
};
|
|
|
|
vtype_t::vtype_t(uint32_t vtype_val) { underlying = (vtype_val & 0x8000) << 32 | (vtype_val & ~0x8000); }
|
|
vtype_t::vtype_t(uint64_t vtype_val) { underlying = vtype_val; }
|
|
bool vtype_t::vill() { return underlying >> 31; }
|
|
bool vtype_t::vma() { return (underlying >> 7) & 1; }
|
|
bool vtype_t::vta() { return (underlying >> 6) & 1; }
|
|
unsigned vtype_t::sew() {
|
|
uint8_t vsew = (underlying >> 3) & 0b111;
|
|
// pow(2, 3 + vsew);
|
|
return 1 << (3 + vsew);
|
|
}
|
|
double vtype_t::lmul() {
|
|
uint8_t vlmul = underlying & 0b111;
|
|
assert(vlmul != 0b100); // reserved encoding
|
|
int8_t signed_vlmul = (vlmul >> 2) ? 0b11111000 | vlmul : vlmul;
|
|
return pow(2, signed_vlmul);
|
|
}
|
|
|
|
vreg_view read_vmask(uint8_t* V, uint8_t VLEN, uint16_t num_elem, uint8_t reg_idx) {
|
|
uint8_t* mask_start = V + VLEN / 8 * reg_idx;
|
|
return {mask_start, num_elem / 8u}; // this can return size==0 as num_elem can be as low as 1, probably not wanted
|
|
}
|
|
uint64_t vector_load_store(void* core, std::function<bool(void*, uint64_t, uint64_t, uint8_t*)> load_store_fn, uint8_t* V, uint8_t VLEN,
|
|
uint8_t vd, uint64_t base_addr, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, uint8_t elem_byte_size,
|
|
int8_t EMUL_pow, uint8_t segment_size, int64_t stride) {
|
|
assert(pow(2, EMUL_pow) * segment_size <= 8);
|
|
uint64_t num_elem = VLEN * vtype.lmul() / vtype.sew();
|
|
assert((num_elem & (num_elem - 1)) == 0); // check that num_elem is power of 2
|
|
unsigned eew = elem_byte_size * 8;
|
|
unsigned emul_stride = EMUL_pow <= 0 ? 1 : pow(2, EMUL_pow);
|
|
assert(emul_stride * segment_size <= 8);
|
|
assert(!(vd % emul_stride));
|
|
vreg_view mask_view = read_vmask(V, VLEN, num_elem, 0);
|
|
// elements w/ index smaller than vstart are in the prestart and get skipped
|
|
// body is from vstart to min(num_elem, vl)
|
|
for(unsigned idx = vstart; idx < std::min(num_elem, vl); idx++) {
|
|
vstart = idx;
|
|
// vm decides active body element
|
|
uint8_t current_mask_byte = mask_view.get<uint8_t>(idx / 8);
|
|
bool mask_active = vm ? 1 : current_mask_byte & (1 << idx % 8);
|
|
for(unsigned s_idx = 0; s_idx < segment_size; s_idx++) {
|
|
// base + selected vd + current_elem + current_segment
|
|
uint8_t* dest_elem = V + (vd * VLEN / 8) + (eew / 8 * idx) + (VLEN / 8 * s_idx * emul_stride);
|
|
assert(dest_elem <= V + VLEN * 32 / 8);
|
|
if(mask_active) {
|
|
uint64_t addr = base_addr + (eew / 8) * (idx * segment_size + s_idx) * stride;
|
|
if(!load_store_fn(core, addr, eew / 8, dest_elem))
|
|
return vstart;
|
|
} else {
|
|
// this only updates the first 8 bits, so eew > 8 would not work correctly
|
|
*dest_elem = vtype.vma() ? *dest_elem : *dest_elem;
|
|
}
|
|
}
|
|
}
|
|
// elements w/ index larger than num_elem are in the tail (fractional LMUL)
|
|
// elements w/ index larger than vl are in the tail
|
|
for(unsigned idx = std::min(num_elem, vl); idx < VLEN / 8; idx++) {
|
|
vstart = idx;
|
|
for(unsigned s_idx = 0; s_idx < segment_size; s_idx++) {
|
|
// base + selected vd + current_elem + current_segment
|
|
uint8_t* dest_elem = V + (vd * VLEN / 8) + (eew / 8 * idx) + (VLEN / 8 * s_idx * emul_stride);
|
|
assert(dest_elem <= V + VLEN * 32 / 8);
|
|
// this only updates the first 8 bits, so eew > 8 would not work correctly
|
|
*dest_elem = vtype.vta() ? *dest_elem : *dest_elem;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
} // namespace softvector
|