applies clang-format changes

This commit is contained in:
2023-10-29 17:06:56 +01:00
parent 2115e9ceae
commit 759061b569
51 changed files with 11493 additions and 12673 deletions

View File

@ -1,26 +1,41 @@
x86::Mem get_reg_ptr(jit_holder& jh, unsigned idx){
x86::Gp tmp_ptr = jh.cc.newUIntPtr("tmp_ptr");
jh.cc.mov(tmp_ptr, jh.regs_base_ptr);
jh.cc.add(tmp_ptr, traits::reg_byte_offsets[idx]);
switch(traits::reg_bit_widths[idx]){
case 8:
return x86::ptr_8(tmp_ptr);
case 16:
return x86::ptr_16(tmp_ptr);
case 32:
return x86::ptr_32(tmp_ptr);
case 64:
return x86::ptr_64(tmp_ptr);
default:
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
x86::Mem get_reg_ptr(jit_holder& jh, unsigned idx) {
x86::Gp tmp_ptr = jh.cc.newUIntPtr("tmp_ptr");
jh.cc.mov(tmp_ptr, jh.regs_base_ptr);
jh.cc.add(tmp_ptr, traits::reg_byte_offsets[idx]);
switch(traits::reg_bit_widths[idx]) {
case 8:
return x86::ptr_8(tmp_ptr);
case 16:
return x86::ptr_16(tmp_ptr);
case 32:
return x86::ptr_32(tmp_ptr);
case 64:
return x86::ptr_64(tmp_ptr);
default:
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
}
x86::Gp get_reg_for(jit_holder& jh, unsigned idx){
//can check for regs in jh and return them instead of creating new ones
switch(traits::reg_bit_widths[idx]){
x86::Gp get_reg_for(jit_holder& jh, unsigned idx) {
// can check for regs in jh and return them instead of creating new ones
switch(traits::reg_bit_widths[idx]) {
case 8:
return jh.cc.newInt8();
case 16:
return jh.cc.newInt16();
case 32:
return jh.cc.newInt32();
case 64:
return jh.cc.newInt64();
default:
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
}
x86::Gp get_reg_for(jit_holder& jh, unsigned size, bool is_signed) {
if(is_signed)
switch(size) {
case 8:
return jh.cc.newInt8();
case 16:
@ -32,23 +47,8 @@ x86::Gp get_reg_for(jit_holder& jh, unsigned idx){
default:
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
}
x86::Gp get_reg_for(jit_holder& jh, unsigned size, bool is_signed){
if(is_signed)
switch(size){
case 8:
return jh.cc.newInt8();
case 16:
return jh.cc.newInt16();
case 32:
return jh.cc.newInt32();
case 64:
return jh.cc.newInt64();
default:
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
else
switch(size){
switch(size) {
case 8:
return jh.cc.newUInt8();
case 16:
@ -61,18 +61,18 @@ x86::Gp get_reg_for(jit_holder& jh, unsigned size, bool is_signed){
throw std::runtime_error("Invalid reg size in get_reg_ptr");
}
}
inline x86::Gp load_reg_from_mem(jit_holder& jh, unsigned idx){
inline x86::Gp load_reg_from_mem(jit_holder& jh, unsigned idx) {
auto ptr = get_reg_ptr(jh, idx);
auto reg = get_reg_for(jh, idx);
jh.cc.mov(reg, ptr);
return reg;
}
inline void write_reg_to_mem(jit_holder& jh, x86::Gp reg, unsigned idx){
inline void write_reg_to_mem(jit_holder& jh, x86::Gp reg, unsigned idx) {
auto ptr = get_reg_ptr(jh, idx);
jh.cc.mov(ptr, reg);
}
void gen_instr_prologue(jit_holder& jh, addr_t pc){
void gen_instr_prologue(jit_holder& jh, addr_t pc) {
auto& cc = jh.cc;
cc.comment("\n//(*icount)++;");
@ -83,33 +83,30 @@ void gen_instr_prologue(jit_holder& jh, addr_t pc){
cc.comment("\n//*trap_state=*pending_trap;");
cc.mov(get_reg_ptr(jh, traits::PENDING_TRAP), jh.trap_state);
cc.comment("\n//increment *next_pc");
cc.mov(jh.next_pc, pc);
}
void gen_instr_epilogue(jit_holder& jh){
void gen_instr_epilogue(jit_holder& jh) {
auto& cc = jh.cc;
cc.comment("\n//if(*trap_state!=0) goto trap_entry;");
cc.test(jh.trap_state, jh.trap_state);
cc.jnz(jh.trap_entry);
//Does this need to be done after every single instruction?
// Does this need to be done after every single instruction?
cc.comment("\n//write back regs to mem");
write_reg_to_mem(jh, jh.pc, traits::PC);
write_reg_to_mem(jh, jh.next_pc, traits::NEXT_PC);
write_reg_to_mem(jh, jh.trap_state, traits::TRAP_STATE);
}
void gen_block_prologue(jit_holder& jh) override{
void gen_block_prologue(jit_holder& jh) override {
jh.pc = load_reg_from_mem(jh, traits::PC);
jh.next_pc = load_reg_from_mem(jh, traits::NEXT_PC);
jh.trap_state = load_reg_from_mem(jh, traits::TRAP_STATE);
}
void gen_block_epilogue(jit_holder& jh) override{
void gen_block_epilogue(jit_holder& jh) override {
x86::Compiler& cc = jh.cc;
cc.comment("\n//return *next_pc;");
cc.ret(jh.next_pc);
@ -117,11 +114,11 @@ void gen_block_epilogue(jit_holder& jh) override{
cc.bind(jh.trap_entry);
cc.comment("\n//enter_trap(core_ptr, *trap_state, *pc, 0);");
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
x86::Gp current_trap_state = get_reg_for(jh, traits::TRAP_STATE);
cc.mov(current_trap_state, get_reg_ptr(jh, traits::TRAP_STATE));
x86::Gp current_pc = get_reg_for(jh, traits::PC);
cc.mov(current_pc, get_reg_ptr(jh, traits::PC));
cc.mov(current_pc, get_reg_ptr(jh, traits::PC));
x86::Gp instr = cc.newInt32("instr");
cc.mov(instr, 0);
@ -132,123 +129,162 @@ void gen_block_epilogue(jit_holder& jh) override{
call_enter_trap->setArg(2, current_pc);
call_enter_trap->setArg(3, instr);
cc.comment("\n//*last_branch = std::numeric_limits<uint32_t>::max();");
cc.mov(get_reg_ptr(jh,traits::LAST_BRANCH), std::numeric_limits<uint32_t>::max());
cc.mov(get_reg_ptr(jh, traits::LAST_BRANCH), std::numeric_limits<uint32_t>::max());
cc.comment("\n//return *next_pc;");
cc.ret(jh.next_pc);
}
// TODO implement
}
//TODO implement
void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) { jh.cc.comment("//gen_raise"); }
void gen_wait(jit_holder& jh, unsigned type) { jh.cc.comment("//gen_wait"); }
void gen_leave(jit_holder& jh, unsigned lvl) { jh.cc.comment("//gen_leave"); }
void gen_raise(jit_holder& jh, uint16_t trap_id, uint16_t cause) {
jh.cc.comment("//gen_raise");
}
void gen_wait(jit_holder& jh, unsigned type) {
jh.cc.comment("//gen_wait");
}
void gen_leave(jit_holder& jh, unsigned lvl){
jh.cc.comment("//gen_leave");
}
enum operation {add, sub, band, bor, bxor, shl, sar , shr};
enum operation { add, sub, band, bor, bxor, shl, sar, shr };
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value || std::is_same<T, x86::Gp>::value>>
x86::Gp gen_operation(jit_holder& jh, operation op, x86::Gp a, T b){
x86::Gp gen_operation(jit_holder& jh, operation op, x86::Gp a, T b) {
x86::Compiler& cc = jh.cc;
switch (op) {
case add: { cc.add(a, b); break; }
case sub: { cc.sub(a, b); break; }
case band: { cc.and_(a, b); break; }
case bor: { cc.or_(a, b); break; }
case bxor: { cc.xor_(a, b); break; }
case shl: { cc.shl(a, b); break; }
case sar: { cc.sar(a, b); break; }
case shr: { cc.shr(a, b); break; }
default: throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (operation)", op));
switch(op) {
case add: {
cc.add(a, b);
break;
}
case sub: {
cc.sub(a, b);
break;
}
case band: {
cc.and_(a, b);
break;
}
case bor: {
cc.or_(a, b);
break;
}
case bxor: {
cc.xor_(a, b);
break;
}
case shl: {
cc.shl(a, b);
break;
}
case sar: {
cc.sar(a, b);
break;
}
case shr: {
cc.shr(a, b);
break;
}
default:
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (operation)", op));
}
return a;
}
enum three_operand_operation{imul, mul, idiv, div, srem, urem};
enum three_operand_operation { imul, mul, idiv, div, srem, urem };
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, x86::Gp b){
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, x86::Gp b) {
x86::Compiler& cc = jh.cc;
switch (op) {
case imul: {
x86::Gp dummy = cc.newInt64();
cc.imul(dummy, a.r64(), b.r64());
return a;
}
case mul: {
x86::Gp dummy = cc.newInt64();
cc.mul(dummy, a.r64(), b.r64());
return a;
}
case idiv: {
x86::Gp dummy = cc.newInt64();
cc.mov(dummy, 0);
cc.idiv(dummy, a.r64(), b.r64());
return a;
}
case div: {
x86::Gp dummy = cc.newInt64();
cc.mov(dummy, 0);
cc.div(dummy, a.r64(), b.r64());
return a;
}
case srem:{
x86::Gp rem = cc.newInt32();
cc.mov(rem, 0);
auto a_reg = cc.newInt32();
cc.mov(a_reg, a.r32());
cc.idiv(rem, a_reg, b.r32());
return rem;
}
case urem:{
x86::Gp rem = cc.newInt32();
cc.mov(rem, 0);
auto a_reg = cc.newInt32();
cc.mov(a_reg, a.r32());
cc.div(rem, a_reg, b.r32());
return rem;
}
switch(op) {
case imul: {
x86::Gp dummy = cc.newInt64();
cc.imul(dummy, a.r64(), b.r64());
return a;
}
case mul: {
x86::Gp dummy = cc.newInt64();
cc.mul(dummy, a.r64(), b.r64());
return a;
}
case idiv: {
x86::Gp dummy = cc.newInt64();
cc.mov(dummy, 0);
cc.idiv(dummy, a.r64(), b.r64());
return a;
}
case div: {
x86::Gp dummy = cc.newInt64();
cc.mov(dummy, 0);
cc.div(dummy, a.r64(), b.r64());
return a;
}
case srem: {
x86::Gp rem = cc.newInt32();
cc.mov(rem, 0);
auto a_reg = cc.newInt32();
cc.mov(a_reg, a.r32());
cc.idiv(rem, a_reg, b.r32());
return rem;
}
case urem: {
x86::Gp rem = cc.newInt32();
cc.mov(rem, 0);
auto a_reg = cc.newInt32();
cc.mov(a_reg, a.r32());
cc.div(rem, a_reg, b.r32());
return rem;
}
default: throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (three_operand)", op));
default:
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (three_operand)", op));
}
return a;
}
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, T b){
x86::Gp gen_operation(jit_holder& jh, three_operand_operation op, x86::Gp a, T b) {
x86::Gp b_reg = jh.cc.newInt32();
/* switch(a.size()){
case 1: b_reg = jh.cc.newInt8(); break;
case 2: b_reg = jh.cc.newInt16(); break;
case 4: b_reg = jh.cc.newInt32(); break;
case 8: b_reg = jh.cc.newInt64(); break;
default: throw std::runtime_error(fmt::format("Invalid size ({}) in gen operation", a.size()));
} */
/* switch(a.size()){
case 1: b_reg = jh.cc.newInt8(); break;
case 2: b_reg = jh.cc.newInt16(); break;
case 4: b_reg = jh.cc.newInt32(); break;
case 8: b_reg = jh.cc.newInt64(); break;
default: throw std::runtime_error(fmt::format("Invalid size ({}) in gen operation", a.size()));
} */
jh.cc.mov(b_reg, b);
return gen_operation(jh, op, a, b_reg);
}
enum comparison_operation{land, lor, eq, ne, lt, ltu, gt, gtu, lte, lteu, gte, gteu};
enum comparison_operation { land, lor, eq, ne, lt, ltu, gt, gtu, lte, lteu, gte, gteu };
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value || std::is_same<T, x86::Gp>::value>>
x86::Gp gen_operation(jit_holder& jh, comparison_operation op, x86::Gp a, T b){
x86::Gp gen_operation(jit_holder& jh, comparison_operation op, x86::Gp a, T b) {
x86::Compiler& cc = jh.cc;
x86::Gp tmp = cc.newInt8();
cc.mov(tmp,1);
cc.mov(tmp, 1);
Label label_then = cc.newLabel();
cc.cmp(a,b);
switch (op) {
case eq: cc.je(label_then); break;
case ne: cc.jne(label_then); break;
case lt: cc.jl(label_then); break;
case ltu: cc.jb(label_then); break;
case gt: cc.jg(label_then); break;
case gtu: cc.ja(label_then); break;
case lte: cc.jle(label_then); break;
case lteu: cc.jbe(label_then); break;
case gte: cc.jge(label_then); break;
case gteu: cc.jae(label_then); break;
cc.cmp(a, b);
switch(op) {
case eq:
cc.je(label_then);
break;
case ne:
cc.jne(label_then);
break;
case lt:
cc.jl(label_then);
break;
case ltu:
cc.jb(label_then);
break;
case gt:
cc.jg(label_then);
break;
case gtu:
cc.ja(label_then);
break;
case lte:
cc.jle(label_then);
break;
case lteu:
cc.jbe(label_then);
break;
case gte:
cc.jge(label_then);
break;
case gteu:
cc.jae(label_then);
break;
case land: {
Label label_false = cc.newLabel();
cc.cmp(a, 0);
@ -267,78 +303,103 @@ x86::Gp gen_operation(jit_holder& jh, comparison_operation op, x86::Gp a, T b){
auto b_reg = cc.newInt8();
cc.mov(b_reg, b);
cc.cmp(b_reg, 0);
cc.jne(label_then);
cc.jne(label_then);
break;
}
default: throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (comparison)", op));
default:
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (comparison)", op));
}
cc.mov(tmp,0);
cc.mov(tmp, 0);
cc.bind(label_then);
return tmp;
}
enum binary_operation{lnot, inc, dec, bnot, neg};
enum binary_operation { lnot, inc, dec, bnot, neg };
x86::Gp gen_operation(jit_holder& jh, binary_operation op, x86::Gp a){
x86::Gp gen_operation(jit_holder& jh, binary_operation op, x86::Gp a) {
x86::Compiler& cc = jh.cc;
switch (op) {
case lnot: throw std::runtime_error("Current operation not supported in gen_operation(lnot)");
case inc: { cc.inc(a); break; }
case dec: { cc.dec(a); break; }
case bnot: { cc.not_(a); break; }
case neg: { cc.neg(a); break; }
default: throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (unary)", op));
switch(op) {
case lnot:
throw std::runtime_error("Current operation not supported in gen_operation(lnot)");
case inc: {
cc.inc(a);
break;
}
case dec: {
cc.dec(a);
break;
}
case bnot: {
cc.not_(a);
break;
}
case neg: {
cc.neg(a);
break;
}
default:
throw std::runtime_error(fmt::format("Current operation {} not supported in gen_operation (unary)", op));
}
return a;
}
/* template <typename T>
inline typename std::enable_if_t<std::is_unsigned<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool is_signed) const {
auto val_reg = get_reg_for(jh, sizeof(val)*8);
auto tmp = get_reg_for(jh, size);
jh.cc.mov(val_reg, val);
if(is_signed) jh.cc.movsx(tmp, val_reg);
else jh.cc.movzx(tmp,val_reg);
return tmp;
inline typename std::enable_if_t<std::is_unsigned<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool
is_signed) const { auto val_reg = get_reg_for(jh, sizeof(val)*8); auto tmp = get_reg_for(jh, size); jh.cc.mov(val_reg,
val); if(is_signed) jh.cc.movsx(tmp, val_reg); else jh.cc.movzx(tmp,val_reg); return tmp;
}
template <typename T>
inline typename std::enable_if_t<std::is_signed<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool is_signed) const {
auto val_reg = get_reg_for(jh, sizeof(val)*8);
auto tmp = get_reg_for(jh, size);
jh.cc.mov(val_reg, val);
if(is_signed) jh.cc.movsx(tmp, val_reg);
else jh.cc.movzx(tmp,val_reg);
return tmp;
inline typename std::enable_if_t<std::is_signed<T>::value, x86::Gp> gen_ext(jit_holder& jh, T val, unsigned size, bool
is_signed) const { auto val_reg = get_reg_for(jh, sizeof(val)*8); auto tmp = get_reg_for(jh, size); jh.cc.mov(val_reg,
val); if(is_signed) jh.cc.movsx(tmp, val_reg); else jh.cc.movzx(tmp,val_reg); return tmp;
} */
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
inline x86::Gp gen_ext(jit_holder& jh, T val, unsigned size, bool is_signed) {
auto val_reg = get_reg_for(jh, sizeof(val)*8);
auto val_reg = get_reg_for(jh, sizeof(val) * 8);
jh.cc.mov(val_reg, val);
return gen_ext(jh, val_reg, size, is_signed);
}
//explicit Gp size cast
// explicit Gp size cast
inline x86::Gp gen_ext(jit_holder& jh, x86::Gp val, unsigned size, bool is_signed) {
auto& cc = jh.cc;
if(is_signed){
switch(val.size()){
case 1: cc.cbw(val); break;
case 2: cc.cwde(val); break;
case 4: cc.cdqe(val); break;
case 8: break;
default: throw std::runtime_error("Invalid register size in gen_ext");
if(is_signed) {
switch(val.size()) {
case 1:
cc.cbw(val);
break;
case 2:
cc.cwde(val);
break;
case 4:
cc.cdqe(val);
break;
case 8:
break;
default:
throw std::runtime_error("Invalid register size in gen_ext");
}
}
switch(size){
case 8: cc.and_(val,std::numeric_limits<uint8_t>::max()); return val.r8();
case 16: cc.and_(val,std::numeric_limits<uint16_t>::max()); return val.r16();
case 32: cc.and_(val,std::numeric_limits<uint32_t>::max()); return val.r32();
case 64: cc.and_(val,std::numeric_limits<uint64_t>::max()); return val.r64();
case 128: return val.r64();
default: throw std::runtime_error("Invalid size in gen_ext");
switch(size) {
case 8:
cc.and_(val, std::numeric_limits<uint8_t>::max());
return val.r8();
case 16:
cc.and_(val, std::numeric_limits<uint16_t>::max());
return val.r16();
case 32:
cc.and_(val, std::numeric_limits<uint32_t>::max());
return val.r32();
case 64:
cc.and_(val, std::numeric_limits<uint64_t>::max());
return val.r64();
case 128:
return val.r64();
default:
throw std::runtime_error("Invalid size in gen_ext");
}
}
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint32_t length){
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint32_t length) {
x86::Compiler& cc = jh.cc;
auto ret_reg = cc.newInt32();
@ -347,7 +408,7 @@ inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint3
auto space_reg = cc.newInt32();
cc.mov(space_reg, static_cast<uint16_t>(iss::address_type::VIRTUAL));
auto val_ptr = cc.newUIntPtr();
cc.mov(val_ptr, read_mem_buf);
@ -355,28 +416,29 @@ inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint3
uint64_t mask = 0;
x86::Gp val_reg = cc.newInt64();
switch(length){
case 1:{
switch(length) {
case 1: {
cc.invoke(&invokeNode, &read_mem1, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
mask = std::numeric_limits<uint8_t>::max();
break;
}
case 2:{
case 2: {
cc.invoke(&invokeNode, &read_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
mask = std::numeric_limits<uint16_t>::max();
break;
}
case 4:{
case 4: {
cc.invoke(&invokeNode, &read_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
mask = std::numeric_limits<uint32_t>::max();
break;
}
case 8:{
case 8: {
cc.invoke(&invokeNode, &read_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uintptr_t>());
mask = std::numeric_limits<uint64_t>::max();
break;
}
default: throw std::runtime_error(fmt::format("Invalid length ({}) in gen_read_mem",length));
default:
throw std::runtime_error(fmt::format("Invalid length ({}) in gen_read_mem", length));
}
invokeNode->setRet(0, ret_reg);
@ -388,42 +450,41 @@ inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, uint3
cc.mov(val_reg, x86::ptr_64(val_ptr));
cc.and_(val_reg, mask);
cc.cmp(ret_reg,0);
cc.cmp(ret_reg, 0);
cc.jne(jh.trap_entry);
return val_reg;
}
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp length){
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp length) {
uint32_t length_val = 0;
auto length_ptr = jh.cc.newIntPtr();
jh.cc.mov(length_ptr, &length_val);
jh.cc.mov(x86::ptr_32(length_ptr),length);
jh.cc.mov(x86::ptr_32(length_ptr), length);
return gen_read_mem(jh, type, addr, length);
}
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp length){
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp length) {
auto addr_reg = jh.cc.newInt64();
jh.cc.mov(addr_reg, addr);
jh.cc.mov(addr_reg, addr);
uint32_t length_val = 0;
auto length_ptr = jh.cc.newIntPtr();
jh.cc.mov(length_ptr, &length_val);
jh.cc.mov(x86::ptr_32(length_ptr),length);
jh.cc.mov(x86::ptr_32(length_ptr), length);
return gen_read_mem(jh, type, addr_reg, length_val);
}
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, uint32_t length){
inline x86::Gp gen_read_mem(jit_holder& jh, mem_type_e type, uint64_t addr, uint32_t length) {
auto addr_reg = jh.cc.newInt64();
jh.cc.mov(addr_reg, addr);
return gen_read_mem(jh, type, addr_reg, length);
}
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, int64_t val){
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, int64_t val) {
auto val_reg = jh.cc.newInt64();
jh.cc.mov(val_reg, val);
gen_write_mem(jh, type, addr, val_reg);
}
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp val){
inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp val) {
x86::Compiler& cc = jh.cc;
auto mem_type_reg = cc.newInt32();
@ -433,42 +494,37 @@ inline void gen_write_mem(jit_holder& jh, mem_type_e type, x86::Gp addr, x86::Gp
auto ret_reg = cc.newInt32();
InvokeNode* invokeNode;
if(val.isGpb()){
if(val.isGpb()) {
cc.invoke(&invokeNode, &write_mem1, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint8_t>());
}
else if(val.isGpw()){
} else if(val.isGpw()) {
cc.invoke(&invokeNode, &write_mem2, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint16_t>());
}
else if(val.isGpd()){
} else if(val.isGpd()) {
cc.invoke(&invokeNode, &write_mem4, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint32_t>());
}
else if(val.isGpq()){
} else if(val.isGpq()) {
cc.invoke(&invokeNode, &write_mem8, FuncSignatureT<uint32_t, uint64_t, uint32_t, uint32_t, uint64_t, uint64_t>());
}
else throw std::runtime_error("Invalid register size in gen_write_mem");
} else
throw std::runtime_error("Invalid register size in gen_write_mem");
invokeNode->setRet(0,ret_reg);
invokeNode->setRet(0, ret_reg);
invokeNode->setArg(0, jh.arch_if_ptr);
invokeNode->setArg(1, space_reg);
invokeNode->setArg(2, mem_type_reg);
invokeNode->setArg(3, addr);
invokeNode->setArg(4, val);
cc.cmp(ret_reg,0);
cc.cmp(ret_reg, 0);
cc.jne(jh.trap_entry);
}
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp val){
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, x86::Gp val) {
auto addr_reg = jh.cc.newInt64();
jh.cc.mov(addr_reg, addr);
gen_write_mem(jh, type, addr_reg, val);
}
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, int64_t val){
inline void gen_write_mem(jit_holder& jh, mem_type_e type, uint64_t addr, int64_t val) {
auto val_reg = jh.cc.newInt64();
jh.cc.mov(val_reg, val);
auto addr_reg = jh.cc.newInt64();
jh.cc.mov(addr_reg, addr);
gen_write_mem(jh, type, addr_reg, val_reg);
}

File diff suppressed because it is too large Load Diff