adds integer extension and add/substract with carry vector instructions

This commit is contained in:
Eyck-Alexander Jentzsch 2025-02-13 19:42:12 +01:00
parent b1ceac2c2a
commit af3e76cc98
3 changed files with 237 additions and 54 deletions

View File

@ -188,38 +188,30 @@ if(vector != null) {%>
uint64_t vsxseg(uint8_t* V, uint8_t vs3, uint8_t vs2, uint64_t rs1_val, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t elem_byte_size, uint16_t elem_count, uint8_t segment_size, bool ordered){
return softvector::vector_load_store_index(this->get_arch(), softvector::softvec_write, V, traits::VLEN, traits::XLEN, vs3, vs2, rs1_val, vl, vstart, vtype, vm, elem_byte_size, elem_count, segment_size, ordered);
}
void vector_vector_op(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, uint8_t vs1, uint8_t sew_val){
void vector_vector_op(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, uint8_t vs1, uint8_t sew_val, int8_t carry = 0){
switch(sew_val){
case 0b000:
softvector::vector_vector_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1, static_cast<softvector::carry_t>(carry));
case 0b001:
softvector::vector_vector_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1, static_cast<softvector::carry_t>(carry));
case 0b010:
softvector::vector_vector_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1, static_cast<softvector::carry_t>(carry));
case 0b011:
softvector::vector_vector_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1, static_cast<softvector::carry_t>(carry));
default:
throw new std::runtime_error("Unsupported sew bit value");
}
}
void vector_imm_op(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val){
void vector_imm_op(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val, int8_t carry = 0){
switch(sew_val){
case 0b000:
softvector::vector_imm_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm, static_cast<softvector::carry_t>(carry));
case 0b001:
softvector::vector_imm_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm, static_cast<softvector::carry_t>(carry));
case 0b010:
softvector::vector_imm_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm, static_cast<softvector::carry_t>(carry));
case 0b011:
softvector::vector_imm_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm, static_cast<softvector::carry_t>(carry));
default:
throw new std::runtime_error("Unsupported sew bit value");
}
@ -227,14 +219,11 @@ if(vector != null) {%>
void vector_vector_wv(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, uint8_t vs1, uint8_t sew_val){
switch(sew_val){
case 0b000:
softvector::vector_vector_op<${vlen}, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
break;
return softvector::vector_vector_op<${vlen}, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
case 0b001:
softvector::vector_vector_op<${vlen}, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
break;
return softvector::vector_vector_op<${vlen}, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
case 0b010:
softvector::vector_vector_op<${vlen}, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
break;
return softvector::vector_vector_op<${vlen}, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1 );
case 0b011: // would widen to 128 bits
default:
throw new std::runtime_error("Unsupported sew bit value");
@ -243,14 +232,11 @@ if(vector != null) {%>
void vector_imm_wv(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val){
switch(sew_val){
case 0b000:
softvector::vector_imm_op<${vlen}, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b001:
softvector::vector_imm_op<${vlen}, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b010:
softvector::vector_imm_op<${vlen}, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b011: // would widen to 128 bits
default:
throw new std::runtime_error("Unsupported sew bit value");
@ -259,14 +245,11 @@ if(vector != null) {%>
void vector_vector_ww(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, uint8_t vs1, uint8_t sew_val){
switch(sew_val){
case 0b000:
softvector::vector_vector_op<${vlen}, uint16_t, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint16_t, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b001:
softvector::vector_vector_op<${vlen}, uint32_t, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint32_t, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b010:
softvector::vector_vector_op<${vlen}, uint64_t, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
break;
return softvector::vector_vector_op<${vlen}, uint64_t, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b011: // would widen to 128 bits
default:
throw new std::runtime_error("Unsupported sew bit value");
@ -275,19 +258,80 @@ if(vector != null) {%>
void vector_imm_ww(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val){
switch(sew_val){
case 0b000:
softvector::vector_imm_op<${vlen}, uint16_t, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint16_t, uint16_t, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b001:
softvector::vector_imm_op<${vlen}, uint32_t, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint32_t, uint32_t, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b010:
softvector::vector_imm_op<${vlen}, uint64_t, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
break;
return softvector::vector_imm_op<${vlen}, uint64_t, uint64_t, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b011: // would widen to 128 bits
default:
throw new std::runtime_error("Unsupported sew bit value");
}
}
void vector_extend(uint8_t* V, uint8_t unary_op, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, uint8_t vd, uint8_t vs2, uint8_t target_sew_pow, uint8_t frac_pow){
switch(target_sew_pow){
case 4: // uint16_t target
if(frac_pow != 1) throw new std::runtime_error("Unsupported frac_pow");
return softvector::vector_unary_op<${vlen}, uint16_t, uint8_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
case 5: // uint32_t target
switch(frac_pow){
case 1:
return softvector::vector_unary_op<${vlen}, uint32_t, uint16_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
case 2:
return softvector::vector_unary_op<${vlen}, uint32_t, uint8_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
default:
throw new std::runtime_error("Unsupported frac_pow");
}
case 6: // uint64_t target
switch(frac_pow){
case 1:
return softvector::vector_unary_op<${vlen}, uint64_t, uint32_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
case 2:
return softvector::vector_unary_op<${vlen}, uint64_t, uint16_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
case 3:
return softvector::vector_unary_op<${vlen}, uint64_t, uint8_t>(V, unary_op, vl, vstart, vtype, vm, vd, vs2);
default:
throw new std::runtime_error("Unsupported frac_pow");
}
default:
throw new std::runtime_error("Unsupported target_sew_pow");
}
}
void vector_vector_m(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, uint8_t vd, uint8_t vs2, uint8_t vs1, uint8_t sew_val, int8_t carry){
vector_vector_op(V, funct, vl, vstart, vtype, 0, vd, vs2, vs1, sew_val, carry);
}
void vector_imm_m(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val, int8_t carry){
vector_imm_op(V, funct, vl, vstart, vtype, 0, vd, vs2, imm, sew_val, carry);
}
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, unsigned vd, unsigned vs2, unsigned vs1, uint8_t sew_val){
switch(sew_val){
case 0b000:
return softvector::mask_vector_vector_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b001:
return softvector::mask_vector_vector_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b010:
return softvector::mask_vector_vector_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
case 0b011:
return softvector::mask_vector_vector_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
default:
throw new std::runtime_error("Unsupported sew bit value");
}
}
void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, unsigned vd, unsigned vs2, int64_t imm, uint8_t sew_val){
switch(sew_val){
case 0b000:
return softvector::mask_vector_imm_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b001:
return softvector::mask_vector_imm_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b010:
return softvector::mask_vector_imm_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
case 0b011:
return softvector::mask_vector_imm_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
default:
throw new std::runtime_error("Unsupported sew bit value");
}
}
<%}%>
uint64_t fetch_count{0};
uint64_t tval{0};

View File

@ -56,8 +56,9 @@ struct vtype_t {
struct vmask_view {
uint8_t* start;
size_t elem_count;
bool operator[](size_t);
bool operator[](size_t) const;
};
enum class carry_t { NO_CARRY = 0, ADD_CARRY = 1, SUB_CARRY = 2 };
vmask_view read_vmask(uint8_t* V, uint16_t VLEN, uint16_t elem_count, uint8_t reg_idx = 0);
template <unsigned VLEN> vmask_view read_vmask(uint8_t* V, uint16_t elem_count, uint8_t reg_idx = 0);
@ -73,10 +74,18 @@ uint64_t vector_load_store_index(void* core, std::function<bool(void*, uint64_t,
uint8_t segment_size, bool ordered);
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t = dest_elem_t, typename src1_elem_t = src2_elem_t>
void vector_vector_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
unsigned vs1);
unsigned vs1, carry_t carry = carry_t::NO_CARRY);
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t = dest_elem_t, typename src1_elem_t = src2_elem_t>
void vector_imm_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
typename std::make_signed<src1_elem_t>::type imm);
typename std::make_signed<src1_elem_t>::type imm, carry_t carry = carry_t::NO_CARRY);
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t = dest_elem_t>
void vector_unary_op(uint8_t* V, unsigned unary_op, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2);
template <unsigned VLEN, typename elem_t>
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
unsigned vs1);
template <unsigned VLEN, typename elem_t>
void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
typename std::make_signed<elem_t>::type imm);
} // namespace softvector
#include "vm/vector_functions.hpp"
#endif /* _VM_VECTOR_FUNCTIONS_H_ */

View File

@ -34,6 +34,7 @@
#pragma once
#include "vm/vector_functions.h"
#include <functional>
#include <limits>
#include <stdexcept>
#include <type_traits>
#ifndef _VM_VECTOR_FUNCTIONS_H_
@ -68,6 +69,7 @@ template <typename dest_elem_t, typename src2_elem_t = dest_elem_t, typename src
std::function<dest_elem_t(src2_elem_t, src1_elem_t)> get_funct(unsigned funct) {
switch(funct) {
case 0b000000: // VADD
case 0b010000: // VADC
case 0b110000: // VWADDU
case 0b110100: // VWADDU.W
return [](src2_elem_t vs2, src1_elem_t vs1) { return vs2 + vs1; };
@ -89,6 +91,7 @@ std::function<dest_elem_t(src2_elem_t, src1_elem_t)> get_funct(unsigned funct) {
return static_cast<typename std::make_signed_t<dest_elem_t>>(static_cast<typename std::make_signed_t<src2_elem_t>>(vs2) +
static_cast<typename std::make_signed_t<src1_elem_t>>(vs1));
};
case 0b010010: // VSBC
case 0b110011: // VWSUB
case 0b110111: // VWSUB.W
return [](src2_elem_t vs2, src1_elem_t vs1) {
@ -101,7 +104,7 @@ std::function<dest_elem_t(src2_elem_t, src1_elem_t)> get_funct(unsigned funct) {
}
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t, typename src1_elem_t>
void vector_vector_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
unsigned vs1) {
unsigned vs1, carry_t carry) {
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
auto vs1_view = get_vreg<VLEN, src1_elem_t>(V, vs1, elem_count);
@ -110,12 +113,22 @@ void vector_vector_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart,
auto fn = get_funct<dest_elem_t, src2_elem_t, src1_elem_t>(funct6);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
bool mask_active = vm ? 1 : mask_reg[idx];
if(mask_active) {
vd_view[idx] = fn(vs2_view[idx], vs1_view[idx]);
} else {
vd_view[idx] = vtype.vma() ? vd_view[idx] : vd_view[idx];
if(carry == carry_t::NO_CARRY) {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
bool mask_active = vm ? 1 : mask_reg[idx];
if(mask_active) {
vd_view[idx] = fn(vs2_view[idx], vs1_view[idx]);
} else {
vd_view[idx] = vtype.vma() ? vd_view[idx] : vd_view[idx];
}
}
} else if(carry == carry_t::SUB_CARRY) {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
vd_view[idx] = fn(vs2_view[idx], vs1_view[idx]) - mask_reg[idx];
}
} else {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
vd_view[idx] = fn(vs2_view[idx], vs1_view[idx]) + mask_reg[idx];
}
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
@ -128,7 +141,7 @@ void vector_vector_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart,
}
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t, typename src1_elem_t>
void vector_imm_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
typename std::make_signed<src1_elem_t>::type imm) {
typename std::make_signed<src1_elem_t>::type imm, carry_t carry) {
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
auto vs2_view = get_vreg<VLEN, src2_elem_t>(V, vs2, elem_count);
@ -136,10 +149,127 @@ void vector_imm_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vt
auto fn = get_funct<dest_elem_t, src2_elem_t, src1_elem_t>(funct6);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
if(carry == carry_t::NO_CARRY) {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
bool mask_active = vm ? 1 : mask_reg[idx];
if(mask_active) {
vd_view[idx] = fn(vs2_view[idx], imm);
} else {
vd_view[idx] = vtype.vma() ? vd_view[idx] : vd_view[idx];
}
}
} else if(carry == carry_t::SUB_CARRY) {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
auto val1 = fn(vs2_view[idx], imm);
auto val2 = static_cast<std::make_signed_t<dest_elem_t>>(mask_reg[idx]);
auto diff = val1 - val2;
vd_view[idx] = fn(vs2_view[idx], imm) - mask_reg[idx];
}
} else {
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
vd_view[idx] = fn(vs2_view[idx], imm) + mask_reg[idx];
}
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
// elements w/ index larger than vl are in the tail
unsigned maximum_elems = VLEN * vtype.lmul() / (sizeof(dest_elem_t) * 8);
for(unsigned idx = std::min(elem_count, vl); idx < maximum_elems; idx++) {
vd_view[idx] = vtype.vta() ? vd_view[idx] : vd_view[idx];
}
return;
}
template <typename elem_t> std::function<bool(elem_t, elem_t, elem_t)> get_mask_funct(unsigned funct) {
switch(funct) {
case 0b010001: // VMADC
return [](elem_t vs2, elem_t vs1, elem_t carry) {
return static_cast<elem_t>(vs2 + vs1 + carry) < std::max(vs1, vs2) || static_cast<elem_t>(vs2 + vs1) < std::max(vs1, vs2);
};
case 0b010011: // VMSBC
return [](elem_t vs2, elem_t vs1, elem_t carry) {
return vs2 < static_cast<elem_t>(vs1 + carry) || (vs1 == std::numeric_limits<elem_t>::max() && carry);
};
default:
throw new std::runtime_error("Uknown funct in get_mask_funct");
}
}
template <unsigned VLEN, typename elem_t>
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
unsigned vs1) {
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
auto vs1_view = get_vreg<VLEN, elem_t>(V, vs1, elem_count);
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
auto fn = get_mask_funct<elem_t>(funct);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
elem_t carry = vm ? 0 : mask_reg[idx];
bool new_bit_value = fn(vs2_view[idx], vs1_view[idx], carry);
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
unsigned cur_bit = idx % 8;
*cur_mask_byte_addr = *cur_mask_byte_addr & ~(1U << cur_bit) | static_cast<unsigned>(new_bit_value) << cur_bit;
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
// elements w/ index larger than vl are in the tail
for(unsigned idx = std::min(elem_count, vl); idx < VLEN; idx++) {
// always tail agnostic
}
return;
}
template <unsigned VLEN, typename elem_t>
void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
typename std::make_signed<elem_t>::type imm) {
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
auto fn = get_mask_funct<elem_t>(funct);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
elem_t carry = vm ? 0 : mask_reg[idx];
bool new_bit_value = fn(vs2_view[idx], imm, carry);
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
unsigned cur_bit = idx % 8;
*cur_mask_byte_addr = *cur_mask_byte_addr & ~(1U << cur_bit) | static_cast<unsigned>(new_bit_value) << cur_bit;
}
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
// elements w/ index larger than vl are in the tail
for(unsigned idx = std::min(elem_count, vl); idx < VLEN; idx++) {
// always tail agnostic
}
return;
}
template <typename dest_elem_t, typename src2_elem_t = dest_elem_t>
std::function<dest_elem_t(src2_elem_t)> get_unary_fn(unsigned unary_op) {
switch(unary_op) {
case 0b00111: // vsext.vf2
case 0b00101: // vsext.vf4
case 0b00011: // vsext.vf8
return [](src2_elem_t vs2) { return static_cast<typename std::make_signed_t<src2_elem_t>>(vs2); };
case 0b00110: // vzext.vf2
case 0b00100: // vzext.vf4
case 0b00010: // vzext.vf8
return [](src2_elem_t vs2) { return vs2; };
default:
throw new std::runtime_error("Uknown funct in get_unary_fn");
}
}
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t>
void vector_unary_op(uint8_t* V, unsigned unary_op, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2) {
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
auto vs2_view = get_vreg<VLEN, src2_elem_t>(V, vs2, elem_count);
auto vd_view = get_vreg<VLEN, dest_elem_t>(V, vd, elem_count);
auto fn = get_unary_fn<dest_elem_t, src2_elem_t>(unary_op);
// elements w/ index smaller than vstart are in the prestart and get skipped
// body is from vstart to min(elem_count, vl)
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
bool mask_active = vm ? 1 : mask_reg[idx];
if(mask_active) {
vd_view[idx] = fn(vs2_view[idx], imm);
vd_view[idx] = fn(vs2_view[idx]);
} else {
vd_view[idx] = vtype.vma() ? vd_view[idx] : vd_view[idx];
}