wip checkin
This commit is contained in:
parent
9fdbc3ff38
commit
ad79a28705
|
@ -79,21 +79,36 @@ public:
|
|||
}
|
||||
|
||||
protected:
|
||||
using vm_base<ARCH>::get_reg_ptr;
|
||||
using super::get_ptr_for;
|
||||
using super::get_reg;
|
||||
using super::get_reg_for;
|
||||
using super::load_reg_from_mem;
|
||||
using super::write_reg_to_mem;
|
||||
using super::gen_ext;
|
||||
using super::gen_read_mem;
|
||||
using super::gen_write_mem;
|
||||
using super::gen_wait;
|
||||
using super::gen_leave;
|
||||
using super::gen_operation;
|
||||
|
||||
using this_class = vm_impl<ARCH>;
|
||||
using compile_func = continuation_e (this_class::*)(virt_addr_t&, code_word_t, jit_holder&);
|
||||
|
||||
continuation_e gen_single_inst_behavior(virt_addr_t&, unsigned int &, jit_holder&) override;
|
||||
void gen_block_prologue(jit_holder& jh) override;
|
||||
void gen_block_epilogue(jit_holder& jh) override;
|
||||
inline const char *name(size_t index){return traits::reg_aliases.at(index);}
|
||||
|
||||
void gen_instr_prologue(jit_holder& jh);
|
||||
void gen_instr_epilogue(jit_holder& jh);
|
||||
inline void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause);
|
||||
|
||||
template<unsigned W, typename U, typename S = typename std::make_signed<U>::type>
|
||||
inline S sext(U from) {
|
||||
auto mask = (1ULL<<W) - 1;
|
||||
auto sign_mask = 1ULL<<(W-1);
|
||||
return (from & mask) | ((from & sign_mask) ? ~mask : 0);
|
||||
}
|
||||
#include <vm/asmjit/helper_func.h>
|
||||
|
||||
private:
|
||||
/****************************************************************************
|
||||
* start opcode definitions
|
||||
|
@ -139,12 +154,14 @@ private:
|
|||
|
||||
}
|
||||
x86::Compiler& cc = jh.cc;
|
||||
cc.comment(fmt::format("\\n${instr.name}_{:#x}:",pc.val).c_str());
|
||||
cc.comment(fmt::format("${instr.name}_{:#x}:",pc.val).c_str());
|
||||
this->gen_sync(jh, PRE_SYNC, ${idx});
|
||||
pc=pc+ ${instr.length/8};
|
||||
|
||||
gen_instr_prologue(jh, pc.val);
|
||||
cc.comment("\\n//behavior:");
|
||||
cc.mov(jh.pc, pc.val);
|
||||
pc = pc+${instr.length/8};
|
||||
cc.mov(jh.next_pc, pc.val);
|
||||
|
||||
gen_instr_prologue(jh);
|
||||
cc.comment("//behavior:");
|
||||
/*generate behavior*/
|
||||
<%instr.behavior.eachLine{%>${it}
|
||||
<%}%>
|
||||
|
@ -214,11 +231,6 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
template <typename CODE_WORD> void debug_fn(CODE_WORD instr) {
|
||||
volatile CODE_WORD x = instr;
|
||||
instr = 2 * x;
|
||||
}
|
||||
|
||||
template <typename ARCH> vm_impl<ARCH>::vm_impl() { this(new ARCH()); }
|
||||
|
||||
template <typename ARCH>
|
||||
|
@ -232,8 +244,7 @@ vm_impl<ARCH>::vm_impl(ARCH &core, unsigned core_id, unsigned cluster_id)
|
|||
}
|
||||
|
||||
template <typename ARCH>
|
||||
continuation_e
|
||||
vm_impl<ARCH>::gen_single_inst_behavior(virt_addr_t &pc, unsigned int &inst_cnt, jit_holder& jh) {
|
||||
continuation_e vm_impl<ARCH>::gen_single_inst_behavior(virt_addr_t &pc, unsigned int &inst_cnt, jit_holder& jh) {
|
||||
enum {TRAP_ID=1<<16};
|
||||
code_word_t instr = 0;
|
||||
phys_addr_t paddr(pc);
|
||||
|
@ -251,10 +262,90 @@ vm_impl<ARCH>::gen_single_inst_behavior(virt_addr_t &pc, unsigned int &inst_cnt,
|
|||
f = &this_class::illegal_intruction;
|
||||
return (this->*f)(pc, instr, jh);
|
||||
}
|
||||
template <typename ARCH>
|
||||
void vm_impl<ARCH>::gen_instr_prologue(jit_holder& jh) {
|
||||
auto& cc = jh.cc;
|
||||
|
||||
cc.comment("//(*icount)++;");
|
||||
cc.inc(get_ptr_for(jh, traits::ICOUNT));
|
||||
|
||||
cc.comment("//*trap_state=*pending_trap;");
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_ptr_for(jh, traits::TRAP_STATE));
|
||||
cc.mov(get_ptr_for(jh, traits::PENDING_TRAP), current_trap_state);
|
||||
|
||||
} // namespace ${coreDef.name.toLowerCase()}
|
||||
}
|
||||
template <typename ARCH>
|
||||
void vm_impl<ARCH>::gen_instr_epilogue(jit_holder& jh) {
|
||||
auto& cc = jh.cc;
|
||||
|
||||
cc.comment("//if(*trap_state!=0) goto trap_entry;");
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_ptr_for(jh, traits::TRAP_STATE));
|
||||
cc.cmp(current_trap_state, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
|
||||
// TODO: Does not need to be done for every instruction, only when needed (by plugin)
|
||||
cc.comment("//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
}
|
||||
template <typename ARCH>
|
||||
void vm_impl<ARCH>::gen_block_prologue(jit_holder& jh){
|
||||
|
||||
jh.pc = load_reg_from_mem(jh, traits::PC);
|
||||
jh.next_pc = load_reg_from_mem(jh, traits::NEXT_PC);
|
||||
}
|
||||
template <typename ARCH>
|
||||
void vm_impl<ARCH>::gen_block_epilogue(jit_holder& jh){
|
||||
x86::Compiler& cc = jh.cc;
|
||||
cc.comment("//return *next_pc;");
|
||||
cc.ret(jh.next_pc);
|
||||
|
||||
cc.bind(jh.trap_entry);
|
||||
cc.comment("//Prepare for enter_trap;");
|
||||
// Make sure cached values are written back
|
||||
cc.comment("//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
this->gen_sync(jh, POST_SYNC, -1);
|
||||
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_ptr_for(jh, traits::TRAP_STATE));
|
||||
|
||||
x86::Gp current_pc = get_reg_for(jh, traits::PC);
|
||||
cc.mov(current_pc, get_ptr_for(jh, traits::PC));
|
||||
|
||||
x86::Gp instr = cc.newInt32("instr");
|
||||
cc.mov(instr, 0); // this is not correct
|
||||
cc.comment("//enter trap call;");
|
||||
InvokeNode* call_enter_trap;
|
||||
cc.invoke(&call_enter_trap, &enter_trap, FuncSignatureT<uint64_t, void*, uint64_t, uint64_t, uint64_t>());
|
||||
call_enter_trap->setArg(0, jh.arch_if_ptr);
|
||||
call_enter_trap->setArg(1, current_trap_state);
|
||||
call_enter_trap->setArg(2, current_pc);
|
||||
call_enter_trap->setArg(3, instr);
|
||||
|
||||
x86::Gp current_next_pc = get_reg_for(jh, traits::NEXT_PC);
|
||||
cc.mov(current_next_pc, get_ptr_for(jh, traits::NEXT_PC));
|
||||
cc.mov(jh.next_pc, current_next_pc);
|
||||
|
||||
cc.comment("//*last_branch = std::numeric_limits<uint32_t>::max();");
|
||||
cc.mov(get_ptr_for(jh, traits::LAST_BRANCH), std::numeric_limits<uint32_t>::max());
|
||||
cc.comment("//return *next_pc;");
|
||||
cc.ret(jh.next_pc);
|
||||
}
|
||||
template <typename ARCH>
|
||||
inline void vm_impl<ARCH>:: gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) {
|
||||
auto& cc = jh.cc;
|
||||
cc.comment("//gen_raise");
|
||||
auto tmp1 = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(tmp1, 0x80ULL << 24 | (cause << 16) | trap_id);
|
||||
cc.mov(get_ptr_for(jh, traits::TRAP_STATE), tmp1);
|
||||
cc.mov(jh.next_pc, std::numeric_limits<uint32_t>::max());
|
||||
}
|
||||
|
||||
} // namespace tgc5c
|
||||
|
||||
template <>
|
||||
std::unique_ptr<vm_if> create<arch::${coreDef.name.toLowerCase()}>(arch::${coreDef.name.toLowerCase()} *core, unsigned short port, bool dump) {
|
||||
|
@ -265,9 +356,9 @@ std::unique_ptr<vm_if> create<arch::${coreDef.name.toLowerCase()}>(arch::${coreD
|
|||
} // namespace asmjit
|
||||
} // namespace iss
|
||||
|
||||
#include <iss/factory.h>
|
||||
#include <iss/arch/riscv_hart_m_p.h>
|
||||
#include <iss/arch/riscv_hart_mu_p.h>
|
||||
#include <iss/factory.h>
|
||||
namespace iss {
|
||||
namespace {
|
||||
volatile std::array<bool, 2> dummy = {
|
||||
|
|
|
@ -75,7 +75,7 @@ int main(int argc, char* argv[]) {
|
|||
("elf,f", po::value<std::vector<std::string>>(), "ELF file(s) to load")
|
||||
("mem,m", po::value<std::string>(), "the memory input file")
|
||||
("plugin,p", po::value<std::vector<std::string>>(), "plugin to activate")
|
||||
("backend", po::value<std::string>()->default_value("interp"), "the ISS backend to use, options are: interp, tcc")
|
||||
("backend", po::value<std::string>()->default_value("interp"), "the ISS backend to use, options are: interp, llvm, tcc, asmjit")
|
||||
("isa", po::value<std::string>()->default_value("tgc5c"), "core or isa name to use for simulation, use '?' to get list");
|
||||
// clang-format on
|
||||
auto parsed = po::command_line_parser(argc, argv).options(desc).allow_unregistered().run();
|
||||
|
|
|
@ -1,539 +0,0 @@
|
|||
#include <asmjit/asmjit.h>
|
||||
#include <iss/asmjit/jit_helper.h>
|
||||
|
||||
x86::Mem get_reg_ptr(jit_holder& jh, unsigned idx) {
|
||||
|
||||
x86::Gp tmp_ptr = jh.cc.newUIntPtr("tmp_ptr");
|
||||
jh.cc.mov(tmp_ptr, jh.regs_base_ptr);
|
||||
jh.cc.add(tmp_ptr, traits::reg_byte_offsets[idx]);
|
||||
switch(traits::reg_bit_widths[idx]) {
|
||||
case 8:
|
||||
return x86::ptr_8(tmp_ptr);
|
||||
case 16:
|
||||
return x86::ptr_16(tmp_ptr);
|
||||
case 32:
|
||||
return x86::ptr_32(tmp_ptr);
|
||||
case 64:
|
||||
return x86::ptr_64(tmp_ptr);
|
||||
default:
|
||||
throw std::runtime_error("Invalid reg size in get_reg_ptr");
|
||||
}
|
||||
}
|
||||
x86::Gp get_reg_for(jit_holder& jh, unsigned idx) {
|
||||
// TODO can check for regs in jh and return them instead of creating new ones
|
||||
switch(traits::reg_bit_widths[idx]) {
|
||||
case 8:
|
||||
return jh.cc.newInt8();
|
||||
case 16:
|
||||
return jh.cc.newInt16();
|
||||
case 32:
|
||||
return jh.cc.newInt32();
|
||||
case 64:
|
||||
return jh.cc.newInt64();
|
||||
default:
|
||||
throw std::runtime_error("Invalid reg size in get_reg_ptr");
|
||||
}
|
||||
}
|
||||
x86::Gp get_reg_for(jit_holder& jh, unsigned size, bool is_signed) {
|
||||
if(is_signed)
|
||||
switch(size) {
|
||||
case 8:
|
||||
return jh.cc.newInt8();
|
||||
case 16:
|
||||
return jh.cc.newInt16();
|
||||
case 32:
|
||||
return jh.cc.newInt32();
|
||||
case 64:
|
||||
return jh.cc.newInt64();
|
||||
default:
|
||||
throw std::runtime_error("Invalid reg size in get_reg_ptr");
|
||||
}
|
||||
else
|
||||
switch(size) {
|
||||
case 8:
|
||||
return jh.cc.newUInt8();
|
||||
case 16:
|
||||
return jh.cc.newUInt16();
|
||||
case 32:
|
||||
return jh.cc.newUInt32();
|
||||
case 64:
|
||||
return jh.cc.newUInt64();
|
||||
default:
|
||||
throw std::runtime_error("Invalid reg size in get_reg_ptr");
|
||||
}
|
||||
}
|
||||
inline x86::Gp load_reg_from_mem(jit_holder& jh, unsigned idx) {
|
||||
auto ptr = get_reg_ptr(jh, idx);
|
||||
auto reg = get_reg_for(jh, idx);
|
||||
jh.cc.mov(reg, ptr);
|
||||
return reg;
|
||||
}
|
||||
inline void write_reg_to_mem(jit_holder& jh, x86::Gp reg, unsigned idx) {
|
||||
auto ptr = get_reg_ptr(jh, idx);
|
||||
jh.cc.mov(ptr, reg);
|
||||
}
|
||||
|
||||
void gen_instr_prologue(jit_holder& jh, addr_t pc) {
|
||||
auto& cc = jh.cc;
|
||||
cc.mov(jh.pc, pc);
|
||||
|
||||
cc.comment("\n//(*icount)++;");
|
||||
cc.inc(get_reg_ptr(jh, traits::ICOUNT));
|
||||
|
||||
cc.comment("\n//*pc=*next_pc;");
|
||||
cc.mov(get_reg_ptr(jh, traits::PC), jh.next_pc);
|
||||
|
||||
cc.comment("\n//*trap_state=*pending_trap;");
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
cc.mov(get_reg_ptr(jh, traits::PENDING_TRAP), current_trap_state);
|
||||
|
||||
cc.comment("\n//increment *next_pc");
|
||||
cc.mov(jh.next_pc, pc);
|
||||
}
|
||||
void gen_instr_epilogue(jit_holder& jh) {
|
||||
auto& cc = jh.cc;
|
||||
|
||||
cc.comment("\n//if(*trap_state!=0) goto trap_entry;");
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
cc.cmp(current_trap_state, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
|
||||
// TODO: Does not need to be done for every instruction, only when needed
|
||||
cc.comment("\n//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
}
|
||||
void gen_block_prologue(jit_holder& jh) override {
|
||||
|
||||
jh.pc = load_reg_from_mem(jh, traits::PC);
|
||||
jh.next_pc = load_reg_from_mem(jh, traits::NEXT_PC);
|
||||
}
|
||||
void gen_block_epilogue(jit_holder& jh) override {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
cc.comment("\n//return *next_pc;");
|
||||
cc.ret(jh.next_pc);
|
||||
|
||||
cc.bind(jh.trap_entry);
|
||||
cc.comment("\n//Prepare for enter_trap;");
|
||||
// Make sure cached values are written back
|
||||
cc.comment("\n//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
this->gen_sync(jh, POST_SYNC, -1);
|
||||
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
|
||||
x86::Gp current_pc = get_reg_for(jh, traits::PC);
|
||||
cc.mov(current_pc, get_reg_ptr(jh, traits::PC));
|
||||
|
||||
x86::Gp instr = cc.newInt32("instr");
|
||||
cc.mov(instr, 0); // this is not correct
|
||||
cc.comment("\n//enter trap call;");
|
||||
InvokeNode* call_enter_trap;
|
||||
cc.invoke(&call_enter_trap, &enter_trap, FuncSignatureT<uint64_t, void*, uint64_t, uint64_t, uint64_t>());
|
||||
call_enter_trap->setArg(0, jh.arch_if_ptr);
|
||||
call_enter_trap->setArg(1, current_trap_state);
|
||||
call_enter_trap->setArg(2, current_pc);
|
||||
call_enter_trap->setArg(3, instr);
|
||||
|
||||
x86::Gp current_next_pc = get_reg_for(jh, traits::NEXT_PC);
|
||||
cc.mov(current_next_pc, get_reg_ptr(jh, traits::NEXT_PC));
|
||||
cc.mov(jh.next_pc, current_next_pc);
|
||||
|
||||
cc.comment("\n//*last_branch = std::numeric_limits<uint32_t>::max();");
|
||||
cc.mov(get_reg_ptr(jh, traits::LAST_BRANCH), std::numeric_limits<uint32_t>::max());
|
||||
cc.comment("\n//return *next_pc;");
|
||||
cc.ret(jh.next_pc);
|
||||
}
|
||||
/*
|
||||
inline void raise(uint16_t trap_id, uint16_t cause){
|
||||
auto trap_val = 0x80ULL << 24 | (cause << 16) | trap_id;
|
||||
this->core.reg.trap_state = trap_val;
|
||||
this->template get_reg<uint32_t>(traits::NEXT_PC) = std::numeric_limits<uint32_t>::max();
|
||||
}
|
||||
*/
|
||||
inline void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) {
|
||||
auto& cc = jh.cc;
|
||||
cc.comment("//gen_raise");
|
||||
auto tmp1 = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(tmp1, 0x80ULL << 24 | (cause << 16) | trap_id);
|
||||
cc.mov(get_reg_ptr(jh, traits::TRAP_STATE), tmp1);
|
||||
auto tmp2 = get_reg_for(jh, traits::NEXT_PC);
|
||||
cc.mov(tmp2, std::numeric_limits<uint32_t>::max());
|
||||
cc.mov(get_reg_ptr(jh, traits::NEXT_PC), tmp2);
|
||||
}
|
||||
inline void gen_wait(jit_holder& jh, unsigned type) { jh.cc.comment("//gen_wait"); }
|
||||
inline void gen_leave(jit_holder& jh, unsigned lvl) { jh.cc.comment("//gen_leave"); }
|
||||
|
||||
enum operation { add, sub, band, bor, bxor, shl, sar, shr };
|
||||
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value || std::is_same<T, x86::Gp>::value>>
|
||||
x86::Gp gen_operation(jit_holder& jh, operation op, x86::Gp a, T b) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
switch(op) {
|
||||
case add: {
|
||||
cc.add(a, b);
|
||||
break;
|
||||
}
|
||||
case sub: {
|
||||
cc.sub(a, b);
|
||||
break;
|
||||
}
|
||||
case band: {
|
||||
cc.and_(a, b);
|
||||
break;
|
||||
}
|
||||
case bor: {
|
||||
cc.or_(a, b);
|
||||
break;
|
||||
}
|
||||
case bxor: {
|
||||
cc.xor_(a, b);
|
||||
break;
|
||||
}
|
||||
case shl: {
|
||||
cc.shl(a, b);
|
||||
break;
|
||||
}
|
||||
case sar: {
|
||||
cc.sar(a, b);
|
||||
break;
|
||||
}
|
||||
case shr: {
|
||||
cc.shr(a, b);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (operation)", op));
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
enum three_operand_operation { imul, mul, idiv, div, srem, urem };
|
||||
|
||||
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, x86::Gp b) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
switch(op) {
|
||||
case imul: {
|
||||
x86::Gp dummy = cc.newInt64();
|
||||
cc.imul(dummy, a.r64(), b.r64());
|
||||
return a;
|
||||
}
|
||||
case mul: {
|
||||
x86::Gp dummy = cc.newInt64();
|
||||
cc.mul(dummy, a.r64(), b.r64());
|
||||
return a;
|
||||
}
|
||||
case idiv: {
|
||||
x86::Gp dummy = cc.newInt64();
|
||||
cc.mov(dummy, 0);
|
||||
cc.idiv(dummy, a.r64(), b.r64());
|
||||
return a;
|
||||
}
|
||||
case div: {
|
||||
x86::Gp dummy = cc.newInt64();
|
||||
cc.mov(dummy, 0);
|
||||
cc.div(dummy, a.r64(), b.r64());
|
||||
return a;
|
||||
}
|
||||
case srem: {
|
||||
x86::Gp rem = cc.newInt32();
|
||||
cc.mov(rem, 0);
|
||||
auto a_reg = cc.newInt32();
|
||||
cc.mov(a_reg, a.r32());
|
||||
cc.idiv(rem, a_reg, b.r32());
|
||||
return rem;
|
||||
}
|
||||
case urem: {
|
||||
x86::Gp rem = cc.newInt32();
|
||||
cc.mov(rem, 0);
|
||||
auto a_reg = cc.newInt32();
|
||||
cc.mov(a_reg, a.r32());
|
||||
cc.div(rem, a_reg, b.r32());
|
||||
return rem;
|
||||
}
|
||||
|
||||
default:
|
||||
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (three_operand)", op));
|
||||
}
|
||||
return a;
|
||||
}
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, T b) {
|
||||
x86::Gp b_reg = jh.cc.newInt32();
|
||||
/* switch(a.size()){
|
||||
case 1: b_reg = jh.cc.newInt8(); break;
|
||||
case 2: b_reg = jh.cc.newInt16(); break;
|
||||
case 4: b_reg = jh.cc.newInt32(); break;
|
||||
case 8: b_reg = jh.cc.newInt64(); break;
|
||||
default: throw std::runtime_error(fmt::format("Invalid size ({}) in gen operation", a.size()));
|
||||
} */
|
||||
jh.cc.mov(b_reg, b);
|
||||
return gen_operation(jh, op, a, b_reg);
|
||||
}
|
||||
enum comparison_operation { land, lor, eq, ne, lt, ltu, gt, gtu, lte, lteu, gte, gteu };
|
||||
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value || std::is_same<T, x86::Gp>::value>>
|
||||
x86::Gp gen_operation(jit_holder& jh, comparison_operation op, x86::Gp a, T b) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
x86::Gp tmp = cc.newInt8();
|
||||
cc.mov(tmp, 1);
|
||||
Label label_then = cc.newLabel();
|
||||
cc.cmp(a, b);
|
||||
switch(op) {
|
||||
case eq:
|
||||
cc.je(label_then);
|
||||
break;
|
||||
case ne:
|
||||
cc.jne(label_then);
|
||||
break;
|
||||
case lt:
|
||||
cc.jl(label_then);
|
||||
break;
|
||||
case ltu:
|
||||
cc.jb(label_then);
|
||||
break;
|
||||
case gt:
|
||||
cc.jg(label_then);
|
||||
break;
|
||||
case gtu:
|
||||
cc.ja(label_then);
|
||||
break;
|
||||
case lte:
|
||||
cc.jle(label_then);
|
||||
break;
|
||||
case lteu:
|
||||
cc.jbe(label_then);
|
||||
break;
|
||||
case gte:
|
||||
cc.jge(label_then);
|
||||
break;
|
||||
case gteu:
|
||||
cc.jae(label_then);
|
||||
break;
|
||||
case land: {
|
||||
Label label_false = cc.newLabel();
|
||||
cc.cmp(a, 0);
|
||||
cc.je(label_false);
|
||||
auto b_reg = cc.newInt8();
|
||||
cc.mov(b_reg, b);
|
||||
cc.cmp(b_reg, 0);
|
||||
cc.je(label_false);
|
||||
cc.jmp(label_then);
|
||||
cc.bind(label_false);
|
||||
break;
|
||||
}
|
||||
case lor: {
|
||||
cc.cmp(a, 0);
|
||||
cc.jne(label_then);
|
||||
auto b_reg = cc.newInt8();
|
||||
cc.mov(b_reg, b);
|
||||
cc.cmp(b_reg, 0);
|
||||
cc.jne(label_then);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (comparison)", op));
|
||||
}
|
||||
cc.mov(tmp, 0);
|
||||
cc.bind(label_then);
|
||||
return tmp;
|
||||
}
|
||||
enum binary_operation { lnot, inc, dec, bnot, neg };
|
||||
|
||||
x86::Gp gen_operation(jit_holder& jh, binary_operation op, x86::Gp a) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
switch(op) {
|
||||
case lnot:
|
||||
throw std::runtime_error("Current operation not supported in gen_operation(lnot)");
|
||||
case inc: {
|
||||
cc.inc(a);
|
||||
break;
|
||||
}
|
||||
case dec: {
|
||||
cc.dec(a);
|
||||
break;
|
||||
}
|
||||
case bnot: {
|
||||
cc.not_(a);
|
||||
break;
|
||||
}
|
||||
case neg: {
|
||||
cc.neg(a);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (unary)", op));
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||
inline x86::Gp gen_ext(jit_holder& jh, T val, unsigned size, bool is_signed) {
|
||||
auto val_reg = get_reg_for(jh, sizeof(val) * 8, is_signed);
|
||||
jh.cc.mov(val_reg, val);
|
||||
return gen_ext(jh, val_reg, size, is_signed);
|
||||
}
|
||||
inline x86::Gp gen_ext(jit_holder& jh, x86::Gp val, unsigned size, bool is_signed) {
|
||||
auto& cc = jh.cc;
|
||||
if(is_signed) {
|
||||
switch(val.size()) {
|
||||
case 1:
|
||||
cc.cbw(val);
|
||||
break;
|
||||
case 2:
|
||||
cc.cwde(val);
|
||||
break;
|
||||
case 4:
|
||||
cc.cdqe(val);
|
||||
break;
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid register size in gen_ext");
|
||||
}
|
||||
}
|
||||
switch(size) {
|
||||
case 8:
|
||||
cc.and_(val, std::numeric_limits<uint8_t>::max());
|
||||
return val.r8();
|
||||
case 16:
|
||||
cc.and_(val, std::numeric_limits<uint16_t>::max());
|
||||
return val.r16();
|
||||
case 32:
|
||||
cc.and_(val, std::numeric_limits<uint32_t>::max());
|
||||
return val.r32();
|
||||
case 64:
|
||||
cc.and_(val, std::numeric_limits<uint64_t>::max());
|
||||
return val.r64();
|
||||
case 128:
|
||||
return val.r64();
|
||||
default:
|
||||
throw std::runtime_error("Invalid size in gen_ext");
|
||||
}
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint32_t length) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
auto ret_reg = cc.newInt32();
|
||||
|
||||
auto mem_type_reg = cc.newInt32();
|
||||
cc.mov(mem_type_reg, type);
|
||||
|
||||
auto space_reg = cc.newInt32();
|
||||
cc.mov(space_reg, static_cast<uint16_t>(iss::address_type::VIRTUAL));
|
||||
|
||||
auto val_ptr = cc.newUIntPtr();
|
||||
cc.mov(val_ptr, read_mem_buf);
|
||||
|
||||
InvokeNode* invokeNode;
|
||||
uint64_t mask = 0;
|
||||
x86::Gp val_reg = cc.newInt64();
|
||||
|
||||
switch(length) {
|
||||
case 1: {
|
||||
cc.invoke(&invokeNode, &read_mem1, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
|
||||
mask = std::numeric_limits<uint8_t>::max();
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
cc.invoke(&invokeNode, &read_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
|
||||
mask = std::numeric_limits<uint16_t>::max();
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
cc.invoke(&invokeNode, &read_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
|
||||
mask = std::numeric_limits<uint32_t>::max();
|
||||
break;
|
||||
}
|
||||
case 8: {
|
||||
cc.invoke(&invokeNode, &read_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
|
||||
mask = std::numeric_limits<uint64_t>::max();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error(fmt::format("Invalid length ({}) in gen_read_mem", length));
|
||||
}
|
||||
|
||||
invokeNode->setRet(0, ret_reg);
|
||||
invokeNode->setArg(0, jh.arch_if_ptr);
|
||||
invokeNode->setArg(1, space_reg);
|
||||
invokeNode->setArg(2, mem_type_reg);
|
||||
invokeNode->setArg(3, addr);
|
||||
invokeNode->setArg(4, val_ptr);
|
||||
cc.cmp(ret_reg, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
|
||||
cc.mov(val_reg, x86::ptr_64(val_ptr));
|
||||
cc.and_(val_reg, mask);
|
||||
return val_reg;
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp length) {
|
||||
throw std::runtime_error("Invalid gen_read_mem");
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp length) {
|
||||
throw std::runtime_error("Invalid gen_read_mem");
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, uint32_t length) {
|
||||
auto addr_reg = jh.cc.newInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
|
||||
return gen_read_mem(jh, type, addr_reg, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, int64_t val, uint32_t length) {
|
||||
auto val_reg = get_reg_for(jh, length * 8, true);
|
||||
jh.cc.mov(val_reg, val);
|
||||
gen_write_mem(jh, type, addr, val_reg, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp val, uint32_t length) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
assert(val.size() == length);
|
||||
auto mem_type_reg = cc.newInt32();
|
||||
jh.cc.mov(mem_type_reg, type);
|
||||
auto space_reg = cc.newInt32();
|
||||
jh.cc.mov(space_reg, static_cast<uint16_t>(iss::address_type::VIRTUAL));
|
||||
auto ret_reg = cc.newInt32();
|
||||
InvokeNode* invokeNode;
|
||||
switch(length) {
|
||||
case 1:
|
||||
cc.invoke(&invokeNode, &write_mem1, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint8_t>());
|
||||
|
||||
break;
|
||||
case 2:
|
||||
cc.invoke(&invokeNode, &write_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint16_t>());
|
||||
break;
|
||||
case 4:
|
||||
cc.invoke(&invokeNode, &write_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint32_t>());
|
||||
break;
|
||||
case 8:
|
||||
cc.invoke(&invokeNode, &write_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint64_t>());
|
||||
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid register size in gen_ext");
|
||||
}
|
||||
invokeNode->setRet(0, ret_reg);
|
||||
invokeNode->setArg(0, jh.arch_if_ptr);
|
||||
invokeNode->setArg(1, space_reg);
|
||||
invokeNode->setArg(2, mem_type_reg);
|
||||
invokeNode->setArg(3, addr);
|
||||
invokeNode->setArg(4, val);
|
||||
|
||||
cc.cmp(ret_reg, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp val, uint32_t length) {
|
||||
auto addr_reg = jh.cc.newUInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
gen_write_mem(jh, type, addr_reg, val, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, int64_t val, uint32_t length) {
|
||||
auto val_reg = get_reg_for(jh, length * 8, true);
|
||||
jh.cc.mov(val_reg, val);
|
||||
|
||||
auto addr_reg = jh.cc.newUInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
gen_write_mem(jh, type, addr_reg, val_reg, length);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue