adds functionality, adds working asmjit
This commit is contained in:
parent
e48597b2b7
commit
0eb1db0e7e
@ -320,6 +320,8 @@ protected:
|
||||
|
||||
unsigned get_reg_num() override { return traits<BASE>::NUM_REGS; }
|
||||
|
||||
unsigned get_reg_size(unsigned num) override { return traits<BASE>::reg_bit_widths[num]; }
|
||||
|
||||
riscv_hart_m_p<BASE, FEAT>& arch;
|
||||
};
|
||||
|
||||
@ -822,7 +824,8 @@ iss::status riscv_hart_m_p<BASE, FEAT>::write(const address_type type, const acc
|
||||
x |= 0x80; // set pll lock upon writing
|
||||
return iss::Ok;
|
||||
} break;
|
||||
default: {}
|
||||
default: {
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case traits<BASE>::CSR: {
|
||||
|
@ -371,6 +371,8 @@ protected:
|
||||
|
||||
unsigned get_reg_num() override { return traits<BASE>::NUM_REGS; }
|
||||
|
||||
unsigned get_reg_size(unsigned num) override { return traits<BASE>::reg_bit_widths[num]; }
|
||||
|
||||
riscv_hart_msu_vp<BASE>& arch;
|
||||
};
|
||||
|
||||
@ -802,7 +804,8 @@ iss::status riscv_hart_msu_vp<BASE>::write(const address_type type, const access
|
||||
x |= 0x80; // set pll lock upon writing
|
||||
return iss::Ok;
|
||||
} break;
|
||||
default: {}
|
||||
default: {
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case traits<BASE>::CSR: {
|
||||
@ -1225,9 +1228,9 @@ template <typename BASE> typename riscv_hart_msu_vp<BASE>::phys_addr_t riscv_har
|
||||
break;
|
||||
} else if(!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
|
||||
break;
|
||||
} else if(type == iss::access_type::FETCH ? !(pte & PTE_X)
|
||||
: type == iss::access_type::READ ? !(pte & PTE_R) && !(mxr && (pte & PTE_X))
|
||||
: !((pte & PTE_R) && (pte & PTE_W))) {
|
||||
} else if(type == iss::access_type::FETCH ? !(pte & PTE_X)
|
||||
: type == iss::access_type::READ ? !(pte & PTE_R) && !(mxr && (pte & PTE_X))
|
||||
: !((pte & PTE_R) && (pte & PTE_W))) {
|
||||
break;
|
||||
} else if((ppn & ((reg_t(1) << ptshift) - 1)) != 0) {
|
||||
break;
|
||||
|
@ -347,6 +347,8 @@ protected:
|
||||
|
||||
unsigned get_reg_num() override { return traits<BASE>::NUM_REGS; }
|
||||
|
||||
unsigned get_reg_size(unsigned num) override { return traits<BASE>::reg_bit_widths[num]; }
|
||||
|
||||
riscv_hart_mu_p<BASE, FEAT>& arch;
|
||||
};
|
||||
|
||||
@ -1005,7 +1007,8 @@ iss::status riscv_hart_mu_p<BASE, FEAT>::write(const address_type type, const ac
|
||||
x |= 0x80; // set pll lock upon writing
|
||||
return iss::Ok;
|
||||
} break;
|
||||
default: {}
|
||||
default: {
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case traits<BASE>::CSR: {
|
||||
|
@ -19,7 +19,7 @@ x86::Mem get_reg_ptr(jit_holder& jh, unsigned idx) {
|
||||
}
|
||||
}
|
||||
x86::Gp get_reg_for(jit_holder& jh, unsigned idx) {
|
||||
// can check for regs in jh and return them instead of creating new ones
|
||||
// TODO can check for regs in jh and return them instead of creating new ones
|
||||
switch(traits::reg_bit_widths[idx]) {
|
||||
case 8:
|
||||
return jh.cc.newInt8();
|
||||
@ -82,7 +82,9 @@ void gen_instr_prologue(jit_holder& jh, addr_t pc) {
|
||||
cc.mov(get_reg_ptr(jh, traits::PC), jh.next_pc);
|
||||
|
||||
cc.comment("\n//*trap_state=*pending_trap;");
|
||||
cc.mov(get_reg_ptr(jh, traits::PENDING_TRAP), jh.trap_state);
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
cc.mov(get_reg_ptr(jh, traits::PENDING_TRAP), current_trap_state);
|
||||
|
||||
cc.comment("\n//increment *next_pc");
|
||||
cc.mov(jh.next_pc, pc);
|
||||
@ -91,20 +93,20 @@ void gen_instr_epilogue(jit_holder& jh) {
|
||||
auto& cc = jh.cc;
|
||||
|
||||
cc.comment("\n//if(*trap_state!=0) goto trap_entry;");
|
||||
cc.test(jh.trap_state, jh.trap_state);
|
||||
cc.jnz(jh.trap_entry);
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
cc.cmp(current_trap_state, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
|
||||
// Does this need to be done after every single instruction?
|
||||
// TODO: Does not need to be done for every instruction, only when needed
|
||||
cc.comment("\n//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
write_reg_to_mem(jh, jh.trap_state, traits::TRAP_STATE);
|
||||
}
|
||||
void gen_block_prologue(jit_holder& jh) override {
|
||||
|
||||
jh.pc = load_reg_from_mem(jh, traits::PC);
|
||||
jh.next_pc = load_reg_from_mem(jh, traits::NEXT_PC);
|
||||
jh.trap_state = load_reg_from_mem(jh, traits::TRAP_STATE);
|
||||
}
|
||||
void gen_block_epilogue(jit_holder& jh) override {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
@ -112,7 +114,12 @@ void gen_block_epilogue(jit_holder& jh) override {
|
||||
cc.ret(jh.next_pc);
|
||||
|
||||
cc.bind(jh.trap_entry);
|
||||
cc.comment("\n//enter_trap(core_ptr, *trap_state, *pc, 0);");
|
||||
cc.comment("\n//Prepare for enter_trap;");
|
||||
// Make sure cached values are written back
|
||||
cc.comment("\n//write back regs to mem");
|
||||
write_reg_to_mem(jh, jh.pc, traits::PC);
|
||||
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
|
||||
this->gen_sync(jh, POST_SYNC, -1);
|
||||
|
||||
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
|
||||
@ -121,23 +128,43 @@ void gen_block_epilogue(jit_holder& jh) override {
|
||||
cc.mov(current_pc, get_reg_ptr(jh, traits::PC));
|
||||
|
||||
x86::Gp instr = cc.newInt32("instr");
|
||||
cc.mov(instr, 0);
|
||||
cc.mov(instr, 0); // this is not correct
|
||||
cc.comment("\n//enter trap call;");
|
||||
InvokeNode* call_enter_trap;
|
||||
cc.invoke(&call_enter_trap, &enter_trap, FuncSignatureT<uint64_t, void*, uint64_t, uint64_t, uint64_t>());
|
||||
call_enter_trap->setArg(0, jh.arch_if_ptr);
|
||||
call_enter_trap->setArg(1, current_trap_state);
|
||||
call_enter_trap->setArg(2, current_pc);
|
||||
call_enter_trap->setArg(3, instr);
|
||||
|
||||
x86::Gp current_next_pc = get_reg_for(jh, traits::NEXT_PC);
|
||||
cc.mov(current_next_pc, get_reg_ptr(jh, traits::NEXT_PC));
|
||||
cc.mov(jh.next_pc, current_next_pc);
|
||||
|
||||
cc.comment("\n//*last_branch = std::numeric_limits<uint32_t>::max();");
|
||||
cc.mov(get_reg_ptr(jh, traits::LAST_BRANCH), std::numeric_limits<uint32_t>::max());
|
||||
cc.comment("\n//return *next_pc;");
|
||||
cc.ret(jh.next_pc);
|
||||
}
|
||||
// TODO implement
|
||||
|
||||
void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) { jh.cc.comment("//gen_raise"); }
|
||||
void gen_wait(jit_holder& jh, unsigned type) { jh.cc.comment("//gen_wait"); }
|
||||
void gen_leave(jit_holder& jh, unsigned lvl) { jh.cc.comment("//gen_leave"); }
|
||||
/*
|
||||
inline void raise(uint16_t trap_id, uint16_t cause){
|
||||
auto trap_val = 0x80ULL << 24 | (cause << 16) | trap_id;
|
||||
this->core.reg.trap_state = trap_val;
|
||||
this->template get_reg<uint32_t>(traits::NEXT_PC) = std::numeric_limits<uint32_t>::max();
|
||||
}
|
||||
*/
|
||||
inline void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) {
|
||||
auto& cc = jh.cc;
|
||||
cc.comment("//gen_raise");
|
||||
auto tmp1 = get_reg_for(jh, traits::TRAP_STATE);
|
||||
cc.mov(tmp1, 0x80ULL << 24 | (cause << 16) | trap_id);
|
||||
cc.mov(get_reg_ptr(jh, traits::TRAP_STATE), tmp1);
|
||||
auto tmp2 = get_reg_for(jh, traits::NEXT_PC);
|
||||
cc.mov(tmp2, std::numeric_limits<uint32_t>::max());
|
||||
cc.mov(get_reg_ptr(jh, traits::NEXT_PC), tmp2);
|
||||
}
|
||||
inline void gen_wait(jit_holder& jh, unsigned type) { jh.cc.comment("//gen_wait"); }
|
||||
inline void gen_leave(jit_holder& jh, unsigned lvl) { jh.cc.comment("//gen_leave"); }
|
||||
|
||||
enum operation { add, sub, band, bor, bxor, shl, sar, shr };
|
||||
|
||||
@ -342,24 +369,12 @@ x86::Gp gen_operation(jit_holder& jh, binary_operation op, x86::Gp a) {
|
||||
return a;
|
||||
}
|
||||
|
||||
/* template <typename T>
|
||||
inline typename std::enable_if_t<std::is_unsigned<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool
|
||||
is_signed) const { auto val_reg = get_reg_for(jh, sizeof(val)*8); auto tmp = get_reg_for(jh, size); jh.cc.mov(val_reg,
|
||||
val); if(is_signed) jh.cc.movsx(tmp, val_reg); else jh.cc.movzx(tmp,val_reg); return tmp;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename std::enable_if_t<std::is_signed<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool
|
||||
is_signed) const { auto val_reg = get_reg_for(jh, sizeof(val)*8); auto tmp = get_reg_for(jh, size); jh.cc.mov(val_reg,
|
||||
val); if(is_signed) jh.cc.movsx(tmp, val_reg); else jh.cc.movzx(tmp,val_reg); return tmp;
|
||||
} */
|
||||
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||
inline x86::Gp gen_ext(jit_holder& jh, T val, unsigned size, bool is_signed) {
|
||||
auto val_reg = get_reg_for(jh, sizeof(val) * 8);
|
||||
auto val_reg = get_reg_for(jh, sizeof(val) * 8, is_signed);
|
||||
jh.cc.mov(val_reg, val);
|
||||
return gen_ext(jh, val_reg, size, is_signed);
|
||||
}
|
||||
// explicit Gp size cast
|
||||
inline x86::Gp gen_ext(jit_holder& jh, x86::Gp val, unsigned size, bool is_signed) {
|
||||
auto& cc = jh.cc;
|
||||
if(is_signed) {
|
||||
@ -398,7 +413,6 @@ inline x86::Gp gen_ext(jit_holder& jh, x86::Gp val, unsigned size, bool is_signe
|
||||
throw std::runtime_error("Invalid size in gen_ext");
|
||||
}
|
||||
}
|
||||
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint32_t length) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
auto ret_reg = cc.newInt32();
|
||||
@ -447,31 +461,18 @@ inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint3
|
||||
invokeNode->setArg(2, mem_type_reg);
|
||||
invokeNode->setArg(3, addr);
|
||||
invokeNode->setArg(4, val_ptr);
|
||||
cc.cmp(ret_reg, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
|
||||
cc.mov(val_reg, x86::ptr_64(val_ptr));
|
||||
cc.and_(val_reg, mask);
|
||||
cc.cmp(ret_reg, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
return val_reg;
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp length) {
|
||||
uint32_t length_val = 0;
|
||||
auto length_ptr = jh.cc.newIntPtr();
|
||||
jh.cc.mov(length_ptr, &length_val);
|
||||
jh.cc.mov(x86::ptr_32(length_ptr), length);
|
||||
|
||||
return gen_read_mem(jh, type, addr, length);
|
||||
throw std::runtime_error("Invalid gen_read_mem");
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp length) {
|
||||
auto addr_reg = jh.cc.newInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
|
||||
uint32_t length_val = 0;
|
||||
auto length_ptr = jh.cc.newIntPtr();
|
||||
jh.cc.mov(length_ptr, &length_val);
|
||||
jh.cc.mov(x86::ptr_32(length_ptr), length);
|
||||
|
||||
return gen_read_mem(jh, type, addr_reg, length_val);
|
||||
throw std::runtime_error("Invalid gen_read_mem");
|
||||
}
|
||||
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, uint32_t length) {
|
||||
auto addr_reg = jh.cc.newInt64();
|
||||
@ -479,32 +480,38 @@ inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, uint
|
||||
|
||||
return gen_read_mem(jh, type, addr_reg, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, int64_t val) {
|
||||
auto val_reg = jh.cc.newInt64();
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, int64_t val, uint32_t length) {
|
||||
auto val_reg = get_reg_for(jh, length * 8, true);
|
||||
jh.cc.mov(val_reg, val);
|
||||
gen_write_mem(jh, type, addr, val_reg);
|
||||
gen_write_mem(jh, type, addr, val_reg, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp val) {
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp val, uint32_t length) {
|
||||
x86::Compiler& cc = jh.cc;
|
||||
|
||||
assert(val.size() == length);
|
||||
auto mem_type_reg = cc.newInt32();
|
||||
jh.cc.mov(mem_type_reg, type);
|
||||
auto space_reg = cc.newInt32();
|
||||
jh.cc.mov(space_reg, static_cast<uint16_t>(iss::address_type::VIRTUAL));
|
||||
auto ret_reg = cc.newInt32();
|
||||
InvokeNode* invokeNode;
|
||||
|
||||
if(val.isGpb()) {
|
||||
switch(length) {
|
||||
case 1:
|
||||
cc.invoke(&invokeNode, &write_mem1, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint8_t>());
|
||||
} else if(val.isGpw()) {
|
||||
cc.invoke(&invokeNode, &write_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint16_t>());
|
||||
} else if(val.isGpd()) {
|
||||
cc.invoke(&invokeNode, &write_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint32_t>());
|
||||
} else if(val.isGpq()) {
|
||||
cc.invoke(&invokeNode, &write_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint64_t>());
|
||||
} else
|
||||
throw std::runtime_error("Invalid register size in gen_write_mem");
|
||||
|
||||
break;
|
||||
case 2:
|
||||
cc.invoke(&invokeNode, &write_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint16_t>());
|
||||
break;
|
||||
case 4:
|
||||
cc.invoke(&invokeNode, &write_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint32_t>());
|
||||
break;
|
||||
case 8:
|
||||
cc.invoke(&invokeNode, &write_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint64_t>());
|
||||
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid register size in gen_ext");
|
||||
}
|
||||
invokeNode->setRet(0, ret_reg);
|
||||
invokeNode->setArg(0, jh.arch_if_ptr);
|
||||
invokeNode->setArg(1, space_reg);
|
||||
@ -515,16 +522,16 @@ inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp
|
||||
cc.cmp(ret_reg, 0);
|
||||
cc.jne(jh.trap_entry);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp val) {
|
||||
auto addr_reg = jh.cc.newInt64();
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp val, uint32_t length) {
|
||||
auto addr_reg = jh.cc.newUInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
gen_write_mem(jh, type, addr_reg, val);
|
||||
gen_write_mem(jh, type, addr_reg, val, length);
|
||||
}
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, int64_t val) {
|
||||
auto val_reg = jh.cc.newInt64();
|
||||
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, int64_t val, uint32_t length) {
|
||||
auto val_reg = get_reg_for(jh, length * 8, true);
|
||||
jh.cc.mov(val_reg, val);
|
||||
|
||||
auto addr_reg = jh.cc.newInt64();
|
||||
auto addr_reg = jh.cc.newUInt64();
|
||||
jh.cc.mov(addr_reg, addr);
|
||||
gen_write_mem(jh, type, addr_reg, val_reg);
|
||||
gen_write_mem(jh, type, addr_reg, val_reg, length);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user