renames mask operations to distinguish from vector integer compare instructions
This commit is contained in:
parent
feaff8c4a5
commit
0027946f90
@ -303,6 +303,34 @@ if(vector != null) {%>
|
||||
void vector_imm_m(uint8_t* V, uint8_t funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, uint8_t vd, uint8_t vs2, int64_t imm, uint8_t sew_val, int8_t carry){
|
||||
vector_imm_op(V, funct, vl, vstart, vtype, 0, vd, vs2, imm, sew_val, carry);
|
||||
}
|
||||
void carry_mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, unsigned vd, unsigned vs2, unsigned vs1, uint8_t sew_val){
|
||||
switch(sew_val){
|
||||
case 0b000:
|
||||
return softvector::carry_mask_vector_vector_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
|
||||
case 0b001:
|
||||
return softvector::carry_mask_vector_vector_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
|
||||
case 0b010:
|
||||
return softvector::carry_mask_vector_vector_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
|
||||
case 0b011:
|
||||
return softvector::carry_mask_vector_vector_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, vs1);
|
||||
default:
|
||||
throw new std::runtime_error("Unsupported sew bit value");
|
||||
}
|
||||
}
|
||||
void carry_mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, unsigned vd, unsigned vs2, int64_t imm, uint8_t sew_val){
|
||||
switch(sew_val){
|
||||
case 0b000:
|
||||
return softvector::carry_mask_vector_imm_op<${vlen}, uint8_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
|
||||
case 0b001:
|
||||
return softvector::carry_mask_vector_imm_op<${vlen}, uint16_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
|
||||
case 0b010:
|
||||
return softvector::carry_mask_vector_imm_op<${vlen}, uint32_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
|
||||
case 0b011:
|
||||
return softvector::carry_mask_vector_imm_op<${vlen}, uint64_t>(V, funct, vl, vstart, vtype, vm, vd, vs2, imm);
|
||||
default:
|
||||
throw new std::runtime_error("Unsupported sew bit value");
|
||||
}
|
||||
}
|
||||
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, softvector::vtype_t vtype, bool vm, unsigned vd, unsigned vs2, unsigned vs1, uint8_t sew_val){
|
||||
switch(sew_val){
|
||||
case 0b000:
|
||||
|
@ -81,6 +81,11 @@ void vector_imm_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vt
|
||||
template <unsigned VLEN, typename dest_elem_t, typename src2_elem_t = dest_elem_t>
|
||||
void vector_unary_op(uint8_t* V, unsigned unary_op, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2);
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void carry_mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd,
|
||||
unsigned vs2, unsigned vs1);
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void carry_mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
typename std::make_signed<elem_t>::type imm);
|
||||
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
unsigned vs1);
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
|
@ -194,7 +194,7 @@ void vector_imm_op(uint8_t* V, unsigned funct6, uint64_t vl, uint64_t vstart, vt
|
||||
}
|
||||
return;
|
||||
}
|
||||
template <typename elem_t> std::function<bool(elem_t, elem_t, elem_t)> get_mask_funct(unsigned funct) {
|
||||
template <typename elem_t> std::function<bool(elem_t, elem_t, elem_t)> get_carry_mask_funct(unsigned funct) {
|
||||
switch(funct) {
|
||||
case 0b010001: // VMADC
|
||||
return [](elem_t vs2, elem_t vs1, elem_t carry) {
|
||||
@ -205,18 +205,18 @@ template <typename elem_t> std::function<bool(elem_t, elem_t, elem_t)> get_mask_
|
||||
return vs2 < static_cast<elem_t>(vs1 + carry) || (vs1 == std::numeric_limits<elem_t>::max() && carry);
|
||||
};
|
||||
default:
|
||||
throw new std::runtime_error("Uknown funct in get_mask_funct");
|
||||
throw new std::runtime_error("Uknown funct in get_carry_mask_funct");
|
||||
}
|
||||
}
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
unsigned vs1) {
|
||||
void carry_mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd,
|
||||
unsigned vs2, unsigned vs1) {
|
||||
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
|
||||
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
|
||||
auto vs1_view = get_vreg<VLEN, elem_t>(V, vs1, elem_count);
|
||||
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
|
||||
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
|
||||
auto fn = get_mask_funct<elem_t>(funct);
|
||||
auto fn = get_carry_mask_funct<elem_t>(funct);
|
||||
// elements w/ index smaller than vstart are in the prestart and get skipped
|
||||
// body is from vstart to min(elem_count, vl)
|
||||
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
|
||||
@ -234,13 +234,13 @@ void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vst
|
||||
return;
|
||||
}
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
typename std::make_signed<elem_t>::type imm) {
|
||||
void carry_mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
typename std::make_signed<elem_t>::type imm) {
|
||||
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
|
||||
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
|
||||
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
|
||||
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
|
||||
auto fn = get_mask_funct<elem_t>(funct);
|
||||
auto fn = get_carry_mask_funct<elem_t>(funct);
|
||||
// elements w/ index smaller than vstart are in the prestart and get skipped
|
||||
// body is from vstart to min(elem_count, vl)
|
||||
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
|
||||
@ -257,6 +257,100 @@ void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart
|
||||
}
|
||||
return;
|
||||
}
|
||||
template <typename elem_t> std::function<bool(elem_t, elem_t)> get_mask_funct(unsigned funct) {
|
||||
switch(funct) {
|
||||
case 0b011000: // VMSEQ
|
||||
return [](elem_t vs2, elem_t vs1) { return vs2 == vs1; };
|
||||
case 0b011001: // VMSNE
|
||||
return [](elem_t vs2, elem_t vs1) { return vs2 != vs1; };
|
||||
case 0b011010: // VMSLTU
|
||||
return [](elem_t vs2, elem_t vs1) { return vs2 < vs1; };
|
||||
case 0b011011: // VMSLT
|
||||
return [](elem_t vs2, elem_t vs1) {
|
||||
return static_cast<std::make_signed_t<elem_t>>(vs2) < static_cast<std::make_signed_t<elem_t>>(vs1);
|
||||
};
|
||||
case 0b011100: // VMSLEU
|
||||
return [](elem_t vs2, elem_t vs1) { return vs2 <= vs1; };
|
||||
case 0b011101: // VMSLE
|
||||
return [](elem_t vs2, elem_t vs1) {
|
||||
return static_cast<std::make_signed_t<elem_t>>(vs2) <= static_cast<std::make_signed_t<elem_t>>(vs1);
|
||||
};
|
||||
case 0b011110: // VMSGTU
|
||||
return [](elem_t vs2, elem_t vs1) { return vs2 > vs1; };
|
||||
case 0b011111: // VMSGT
|
||||
return [](elem_t vs2, elem_t vs1) {
|
||||
return static_cast<std::make_signed_t<elem_t>>(vs2) > static_cast<std::make_signed_t<elem_t>>(vs1);
|
||||
};
|
||||
|
||||
default:
|
||||
throw new std::runtime_error("Uknown funct in get_mask_funct");
|
||||
}
|
||||
}
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void mask_vector_vector_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
unsigned vs1) {
|
||||
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
|
||||
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
|
||||
auto vs1_view = get_vreg<VLEN, elem_t>(V, vs1, elem_count);
|
||||
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
|
||||
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
|
||||
auto fn = get_mask_funct<elem_t>(funct);
|
||||
// elements w/ index smaller than vstart are in the prestart and get skipped
|
||||
// body is from vstart to min(elem_count, vl)
|
||||
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
|
||||
bool mask_active = vm ? 1 : mask_reg[idx];
|
||||
if(mask_active) {
|
||||
bool new_bit_value = fn(vs2_view[idx], vs1_view[idx]);
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = *cur_mask_byte_addr & ~(1U << cur_bit) | static_cast<unsigned>(new_bit_value) << cur_bit;
|
||||
} else {
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = vtype.vma() ? *cur_mask_byte_addr : *cur_mask_byte_addr;
|
||||
}
|
||||
}
|
||||
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
|
||||
// elements w/ index larger than vl are in the tail
|
||||
for(unsigned idx = std::min(elem_count, vl); idx < VLEN; idx++) {
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = vtype.vta() ? *cur_mask_byte_addr : *cur_mask_byte_addr;
|
||||
}
|
||||
return;
|
||||
}
|
||||
template <unsigned VLEN, typename elem_t>
|
||||
void mask_vector_imm_op(uint8_t* V, unsigned funct, uint64_t vl, uint64_t vstart, vtype_t vtype, bool vm, unsigned vd, unsigned vs2,
|
||||
typename std::make_signed<elem_t>::type imm) {
|
||||
uint64_t elem_count = VLEN * vtype.lmul() / vtype.sew();
|
||||
vmask_view mask_reg = read_vmask<VLEN>(V, elem_count);
|
||||
auto vs2_view = get_vreg<VLEN, elem_t>(V, vs2, elem_count);
|
||||
vmask_view vd_mask_view = read_vmask<VLEN>(V, elem_count, vd);
|
||||
auto fn = get_mask_funct<elem_t>(funct);
|
||||
// elements w/ index smaller than vstart are in the prestart and get skipped
|
||||
// body is from vstart to min(elem_count, vl)
|
||||
for(unsigned idx = vstart; idx < std::min(elem_count, vl); idx++) {
|
||||
bool mask_active = vm ? 1 : mask_reg[idx];
|
||||
if(mask_active) {
|
||||
bool new_bit_value = fn(vs2_view[idx], imm);
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = *cur_mask_byte_addr & ~(1U << cur_bit) | static_cast<unsigned>(new_bit_value) << cur_bit;
|
||||
} else {
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = vtype.vma() ? *cur_mask_byte_addr : *cur_mask_byte_addr;
|
||||
}
|
||||
}
|
||||
// elements w/ index larger than elem_count are in the tail (fractional LMUL)
|
||||
// elements w/ index larger than vl are in the tail
|
||||
for(unsigned idx = std::min(elem_count, vl); idx < VLEN; idx++) {
|
||||
uint8_t* cur_mask_byte_addr = vd_mask_view.start + idx / 8;
|
||||
unsigned cur_bit = idx % 8;
|
||||
*cur_mask_byte_addr = vtype.vta() ? *cur_mask_byte_addr : *cur_mask_byte_addr;
|
||||
}
|
||||
return;
|
||||
}
|
||||
template <typename dest_elem_t, typename src2_elem_t = dest_elem_t>
|
||||
std::function<dest_elem_t(src2_elem_t)> get_unary_fn(unsigned unary_op) {
|
||||
switch(unary_op) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user