def unlift_noncached(bs, func, desc): #print desc @memoize def make_post(flow): return compiler.translate("unlift_post " + desc, flow, stack=list(bs.call_stack)) def make_thingy(flow, data): return compiler.translate("unlift_thingy " + desc, flow, this=[ func(data), lambda bs: add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp(make_post(flow))), compiler.end, ]) @called_from_asm def glue(rdi, flow=bs.flow.clone()): return make_thingy(flow, rdi) code = ctypes.CFUNCTYPE(ctypes.c_int64, ctypes.c_int64)(glue) bs.code += isa.pop(registers.rdi) bs.code += isa.mov(registers.rax, ctypes.cast(code, ctypes.c_void_p).value) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.r12) bs.code += isa.jmp(registers.rax) bs.this.append(compiler.end) bs.program.references.append(code)
def _(bs): "handler(rsp)" bs.code += isa.mov(registers.rdi, registers.rsp) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.mov( registers.rax, ctypes.cast(handler_cfunc, ctypes.c_void_p).value) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.r12) for arg in arg_types[::-1]: assert bs.flow.stack.pop() is arg for i in xrange(arg.size): bs.code += isa.pop(registers.rbx) assert bs.flow.stack.pop() is self bs.code += isa.push(registers.rax) def _(value): def _(bs): type = type_impl.id_to_type[value] for i in xrange(type.size): bs.code += isa.push( MemRef( ctypes.cast(self.return_area, ctypes.c_void_p).value + i)) bs.flow.stack.append(type) assert bs.flow.stack[-1].size == 0 return _ util.unlift(bs, _, "PythonFunction.call")
def cleanup(self): """Do end-of-loop iterator code""" # Update the current count if self.mode == DEC: if self.step_size() == 1: self.code.add(x86_64.dec(self.r_count)) else: if self.r_step is not None: if isinstance(self.r_step, memory.MemRef): self.code.add(x86_64.mov(self.r_clobber, self.r_step)) self.code.add(x86_64.add(self.r_count, self.r_clobber)) else: self.code.add(x86_64.add(self.r_count, self.r_step)) else: self.code.add(x86_64.add(self.r_count, self.step_size())) elif self.mode == INC: if self.step_size() == 1: self.code.add(x86_64.inc(self.r_count)) else: if self.r_step is not None: if isinstance(self.r_step, memory.MemRef): self.code.add(x86_64.mov(self.r_clobber, self.r_step)) self.code.add(x86_64.add(self.r_count, self.r_clobber)) else: self.code.add(x86_64.add(self.r_count, self.r_step)) else: self.code.add(x86_64.add(self.r_count, self.step_size())) return
def _(bs): "handler(rsp)" bs.code += isa.mov(registers.rdi, registers.rsp) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.mov(registers.rax, ctypes.cast(handler_cfunc, ctypes.c_void_p).value) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.r12) for arg in arg_types[::-1]: assert bs.flow.stack.pop() is arg for i in xrange(arg.size): bs.code += isa.pop(registers.rbx) assert bs.flow.stack.pop() is self bs.code += isa.push(registers.rax) def _(value): def _(bs): type = type_impl.id_to_type[value] for i in xrange(type.size): bs.code += isa.push(MemRef(ctypes.cast(self.return_area, ctypes.c_void_p).value + i)) bs.flow.stack.append(type) assert bs.flow.stack[-1].size == 0 return _ util.unlift(bs, _, "PythonFunction.call")
def TestCTRImm(): A = extarray.extarray('l', 1000) B = extarray.extarray('l', 1000) for i in xrange(1000): A[i] = i prgm = env.Program() code = prgm.get_stream() a = registers.r8 b = registers.r9 #code.add(x86_64.mov(a, memory.MemRef(registers.rbp, 2*_ws))) #code.add(x86_64.mov(b, memory.MemRef(registers.rbp, 3*_ws))) code.add(x86_64.mov(a, registers.rdi)) code.add(x86_64.mov(b, registers.rsi)) i_iter = syn_iter(code, 1000, mode=CTR) for i_ in i_iter: code.add(x86_64.mov(registers.r11, memory.MemRef(a, index=i_, scale=8))) code.add(x86_64.mov(memory.MemRef(b, index=i_, scale=8), registers.r11)) prgm.add(code) params = env.ExecParams() params.p1 = A.buffer_info()[0] - _ws params.p2 = B.buffer_info()[0] - _ws proc = env.Processor() proc.execute(prgm, mode='int', params=params) for i in range(len(B)): assert (B[i] == i)
def TestCTRReg(): A = extarray.extarray('l', 1000) B = extarray.extarray('l', 1000) for i in xrange(1000): A[i] = i prgm = env.Program() code = prgm.get_stream() a = registers.r8 b = registers.r9 #code.add(x86_64.mov(a, memory.MemRef(registers.rbp, 2*_ws))) #code.add(x86_64.mov(b, memory.MemRef(registers.rbp, 3*_ws))) code.add(x86_64.mov(a, registers.rdi)) code.add(x86_64.mov(b, registers.rsi)) n = registers.rsi code.add(x86_64.mov(n, 1000)) i_iter = syn_iter(code, n, mode=CTR) for i_ in i_iter: code.add(x86_64.mov(registers.r11, memory.MemRef(a, index=i_, scale=8))) code.add(x86_64.mov(memory.MemRef(b, index=i_, scale=8), registers.r11)) prgm.add(code) params = env.ExecParams() params.p1 = A.buffer_info()[0]-_ws params.p2 = B.buffer_info()[0]-_ws proc = env.Processor() proc.execute(prgm, mode='int', params=params) for i in range(len(B)): assert(B[i] == i)
def TestINCRegImm(): A = extarray.extarray('l', 1000) B = extarray.extarray('l', 1000) for i in xrange(1000): A[i] = i code = env.InstructionStream() a = registers.r8 b = registers.r9 #code.add(x86_64.mov(a, memory.MemRef(registers.rbp, 2*_ws))) #code.add(x86_64.mov(b, memory.MemRef(registers.rbp, 3*_ws))) code.add(x86_64.mov(a, registers.rdi)) code.add(x86_64.mov(b, registers.rsi)) i_iter = syn_iter(code, 1000, mode=INC, count_reg = registers.r10) for i_ in i_iter: code.add(x86_64.mov(registers.r11, memory.MemRef(a, index=i_, scale=8))) code.add(x86_64.mov(memory.MemRef(b, index=i_, scale=8), registers.r11)) params = env.ExecParams() params.p1 = A.buffer_info()[0] params.p2 = B.buffer_info()[0] proc = env.Processor() proc.execute(prgm, mode='int', params=params) for i in range(len(B)): assert(B[i] == i)
def _(bs): assert bs.flow.stack.pop() is type_impl.Int assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.r12) bs.code += isa.mov(registers.rdi, registers.r12) bs.code += isa.add(registers.rdi, 8) bs.code += isa.mov(registers.rax, util.malloc_addr) bs.code += isa.call(registers.rax) bs.code += isa.push(registers.rax) bs.code += isa.mov(MemRef(registers.rax), registers.r12) bs.flow.stack.append(Raw)
def _(bs): assert bs.flow.stack[-1] is type_impl.Str, list(bs.flow.stack) bs.flow.stack.pop() bs.code += isa.mov(registers.rax, util.print_string_addr) bs.code += isa.pop(registers.rdi) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rax, util.print_nl_addr) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.rbp) bs.code += isa.pop(registers.rbp) bs.code += isa.ret()
def _(bs): good = bs.program.get_unique_label() bs.code += isa.mov(registers.rax, data) # check if could be combined into cmp bs.code += isa.cmp(MemRef(registers.rsp), registers.rax) bs.code += isa.je(good) bs.code += isa.mov(registers.rdi, MemRef(registers.rsp)) add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp(make_thingy(flow, rdi))) bs.code += good bs.code += isa.pop(registers.rax) bs.this.append(func(data))
def start(self, align=True): if self.mode == CTR: if self.external_start: self.code.add(x86_64.mov(registers.rcx, self.r_start)) else: self.code.add(x86_64.mov(registers.rcx, self.n)) elif self.mode == DEC: if self.external_start: if self.r_start == None: raise Exception( 'No external start register was specified.') if isinstance(self.r_count, memory.MemRef) and isinstance( self.r_start, memory.MemRef): if self.r_clobber == None: raise Exception( 'Must specify clobber_reg if count_reg and start values are both stored in memory.' ) self.code.add(x86_64.mov(self.r_clobber, self.r_start)) self.code.add(x86_64.mov(self.r_count, self.r_clobber)) else: self.code.add(x86_64.mov(self.r_count, self.r_start)) else: self.code.add(x86_64.mov(self.r_count, self.n)) elif self.mode == INC: if self.external_stop: if self.r_stop == None: raise Exception('No external stop register was specified.') if self.external_start: if isinstance(self.r_count, memory.MemRef) and isinstance( self.r_start, memory.MemRef): self.code.add(x86_64.mov(self.r_clobber, self.r_start)) self.code.add(x86_64.mov(self.r_count, self.r_clobber)) else: self.code.add(x86_64.mov(self.r_count, self.r_start)) else: self.code.add(x86_64.mov(self.r_count, self.get_start())) # /end mode if # Label self.start_label = self.code.prgm.get_unique_label("SYN_ITER_START") self.code.add(self.start_label) # Create continue/branch labels so they can be referenced; they will be # added to the code in their appropriate locations. self.continue_label = self.code.prgm.get_unique_label( "SYN_ITER_CONTINUE") return
def _(bs): ints = len([x for x in arg_types if x is type_impl.Int or x is type_impl.Str or x is Raw or x is type_impl.NoneType or isinstance(x, _FuncPtr) or x is type_impl.Bool]) floats = len([x for x in arg_types if x is type_impl.Float]) floats_orig = floats pos = 0 for arg_type in reversed(arg_types): type = bs.flow.stack.pop() assert arg_type is type, (type, arg_type) if type is type_impl.Int or type is type_impl.Bool: ints -= 1 bs.code += isa.mov(int_regs[ints], MemRef(registers.rsp, pos)) pos += 8 elif isinstance(type, _FuncPtr): ints -= 1 bs.code += isa.mov(int_regs[ints], MemRef(registers.rsp, pos)) pos += 8 elif type is type_impl.NoneType: ints -= 1 bs.code += isa.mov(int_regs[ints], 0) elif type is Raw: ints -= 1 bs.code += isa.mov(int_regs[ints], MemRef(registers.rsp, pos)) pos += 8 bs.code += isa.add(int_regs[ints], 8) elif type is type_impl.Float: floats -= 1 bs.code += isa.movsd(float_regs[floats], MemRef(registers.rsp, pos)) pos += 8 elif type is type_impl.Str: ints -= 1 bs.code += isa.mov(int_regs[ints], MemRef(registers.rsp, pos)) bs.code += isa.test(int_regs[ints], 1) short = bs.program.get_unique_label() end = bs.program.get_unique_label() bs.code += isa.jnz(short) # long bs.code += isa.add(int_regs[ints], 8) bs.code += isa.jmp(end) bs.code += short # short bs.code += isa.shr(MemRef(registers.rsp, pos), 8) bs.code += isa.lea(int_regs[ints], MemRef(registers.rsp, pos, data_size=None)) bs.code += end pos += 8 else: assert False, type assert bs.flow.stack.pop() is self bs.code += isa.mov(registers.rbx, ctypes.cast(self.func, ctypes.c_void_p).value) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.mov(registers.rax, floats) bs.code += isa.call(registers.rbx) bs.code += isa.mov(registers.rsp, registers.r12) bs.code += isa.add(registers.rsp, pos) bs.code += isa.push(registers.rax) bs.flow.stack.append(type_impl.Int)
def getattr_raw(self, bs): bs.code += isa.pop(registers.r12) bs.code += isa.mov(registers.rdi, MemRef(registers.r12)) bs.code += isa.add(registers.rdi, 8) bs.code += isa.mov(registers.rax, util.malloc_addr) bs.code += isa.call(registers.rax) bs.code += isa.push(registers.rax) bs.code += isa.mov(registers.rdi, registers.rax) bs.code += isa.mov(registers.rsi, registers.r12) bs.code += isa.mov(registers.rcx, MemRef(registers.r12)) bs.code += isa.add(registers.rcx, 8) bs.code += isa.rep() bs.code += isa.movsb() bs.flow.stack.append(type_impl.Str)
def _(bs): assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rcx) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.rbx) bs.code += isa.add(registers.rbx, registers.rcx) bs.code += isa.mov(regsisters.rax, 0) bs.code += isa.mov(registers.ax, MemRef(registers.rbx, data_size=8)) bs.code += isa.shl(registers.rax, 8) bs.code += isa.mov(registers.ax, 2 * 1 + 1) bs.flow.stack.append(type_impl.Str)
def _(bs): good = bs.program.get_unique_label() bs.code += isa.mov(registers.rax, data) # check if could be combined into cmp bs.code += isa.cmp(MemRef(registers.rsp), registers.rax) bs.code += isa.je(good) bs.code += isa.mov(registers.rdi, MemRef(registers.rsp)) add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp( make_thingy(flow, rdi))) bs.code += good bs.code += isa.pop(registers.rax) bs.this.append(func(data))
def get_asm_glue_old(dest_addr): program = BareProgram() code = program.get_stream() code += isa.mov(registers.rax, fake_int(dest_addr)) code += isa.push(registers.r12) code += isa.mov(registers.r12, registers.rsp) code += isa.and_(registers.rsp, -16) code += isa.call(registers.rax) code += isa.mov(registers.rsp, registers.r12) code += isa.pop(registers.r12) code += isa.pop(registers.rax) code += isa.sub(registers.rax, patch_len) code += isa.jmp(registers.rax) program.add(code) program.cache_code() return program.render_code
def add_redirection(caller_code, callback): global redirections redirections += 1 @called_from_asm def glue(rdi): global triggered_redirections triggered_redirections += 1 caller_program.render_code[caller_start.position:caller_end. position] = callback(rdi) caller_program.references.remove(code) code = get_asm_glue(callback_type(glue)) caller_program = caller_code.prgm caller_start = caller_program.get_unique_label() caller_end = caller_program.get_unique_label() caller_code += caller_start caller_code += isa.mov(registers.rax, fake_int(code.buffer_info()[0])) caller_code += isa.call(registers.rax) caller_code += caller_end caller_program.references.append(code)
def _(bs): assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.r14) assert bs.flow.stack.pop() is Raw bs.code += isa.pop(registers.r13) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.r12) bs.code += isa.shl(registers.r14, 3) bs.code += isa.mov(registers.rdi, registers.r12) bs.code += isa.mov(registers.rsi, registers.r13) bs.code += isa.mov(registers.rdx, registers.r14) bs.code += isa.mov(registers.rax, ctypes.cast(ctypes.memmove, ctypes.c_void_p).value) bs.code += isa.call(registers.rax) bs.this.append(type_impl.NoneType.load())
def _(bs): #print "loading", attr, self.desc, type, repr(content.container.raw) addr = ctypes.cast(content.container, ctypes.c_void_p).value if type.size: bs.code += isa.mov(registers.rax, addr) for i in xrange(type.size): bs.code += isa.push(MemRef(registers.rax, 8 * i)) bs.flow.stack.append(type)
def return_var(var): if isinstance(var.reg, type(var.code.gp_return)): var.code.add(x86.mov(var.code.gp_return, var)) #elif isinstance(var.reg, type(var.code.fp_return)): # var.code.add(x86.fmrx(var.code.fp_return, var)) else: raise Exception('Return not supported for %s registers' % (str(type(var.reg)))) return
def read_top(bs, regs): res = [] type = bs.flow.stack[-1] for i in xrange(type.size): reg = regs.pop() bs.code += isa.mov(reg, MemRef(registers.rsp, 8 * i)) res.append(reg) return type, res
def read_top(bs, regs): res = [] type = bs.flow.stack[-1] for i in xrange(type.size): reg = regs.pop() bs.code += isa.mov(reg, MemRef(registers.rsp, 8*i)) res.append(reg) return type, res
def TestDECMemMem_MemStep(): A = extarray.extarray('l', 1000) B = extarray.extarray('l', 1000) for i in xrange(1000): A[i] = i prgm = env.Program() code = prgm.get_stream() a = registers.r8 b = registers.r9 #code.add(x86_64.mov(a, memory.MemRef(registers.rbp, 2*_ws))) #code.add(x86_64.mov(b, memory.MemRef(registers.rbp, 3*_ws))) code.add(x86_64.mov(a, registers.rdi)) code.add(x86_64.mov(b, registers.rsi)) n = memory.MemRef(registers.rsp, 0) code.add(x86_64.mov(n, 1000)) s = memory.MemRef(registers.rsp, -2*_ws) code.add(x86_64.mov(s, -4)) i_iter = syn_iter(code, n, mode=DEC, step=s, count_reg = memory.MemRef(registers.rsp, -1*_ws), clobber_reg=registers.rax) j = registers.rsi for i_ in i_iter: code.add(x86_64.mov(j, i_)) code.add(x86_64.mov(registers.r11, memory.MemRef(a, index=j, scale=8))) code.add(x86_64.mov(memory.MemRef(b, index=j, scale=8), registers.r11)) prgm.add(code) params = env.ExecParams() params.p1 = A.buffer_info()[0]-_ws params.p2 = B.buffer_info()[0]-_ws proc = env.Processor() proc.execute(prgm, mode='int', params=params) for i in range(len(B)-1, 0, -4): assert(B[i] == i)
def start(self, align = True): if self.mode == CTR: if self.external_start: self.code.add(x86_64.mov(registers.rcx, self.r_start)) else: self.code.add(x86_64.mov(registers.rcx, self.n)) elif self.mode == DEC: if self.external_start: if self.r_start == None: raise Exception('No external start register was specified.') if isinstance(self.r_count, memory.MemRef) and isinstance(self.r_start, memory.MemRef): if self.r_clobber == None: raise Exception('Must specify clobber_reg if count_reg and start values are both stored in memory.') self.code.add(x86_64.mov(self.r_clobber, self.r_start)) self.code.add(x86_64.mov(self.r_count, self.r_clobber)) else: self.code.add(x86_64.mov(self.r_count, self.r_start)) else: self.code.add(x86_64.mov(self.r_count, self.n)) elif self.mode == INC: if self.external_stop: if self.r_stop == None: raise Exception('No external stop register was specified.') if self.external_start: if isinstance(self.r_count, memory.MemRef) and isinstance(self.r_start, memory.MemRef): self.code.add(x86_64.mov(self.r_clobber, self.r_start)) self.code.add(x86_64.mov(self.r_count, self.r_clobber)) else: self.code.add(x86_64.mov(self.r_count, self.r_start)) else: self.code.add(x86_64.mov(self.r_count, self.get_start())) # /end mode if # Label self.start_label = self.code.prgm.get_unique_label("SYN_ITER_START") self.code.add(self.start_label) # Create continue/branch labels so they can be referenced; they will be # added to the code in their appropriate locations. self.continue_label = self.code.prgm.get_unique_label("SYN_ITER_CONTINUE") return
def CpuidAsmInit(): import corepy.arch.x86_64.isa as x86 import corepy.arch.x86_64.types.registers as reg import corepy.arch.x86_64.platform as env import corepy.arch.x86_64.lib.memory as mem global CpuidCode global CpuidProc global CpuidParams CpuidCode = env.InstructionStream() CpuidProc = env.Processor() CpuidParams = env.ExecParams() CpuidCode.add(x86.mov(reg.rax, mem.MemRef(reg.rbp, 16))) # parameter 1 CpuidCode.add(x86.mov(reg.rcx, mem.MemRef(reg.rbp, 24))) # parameter 2 CpuidCode.add(x86.mov(reg.rdi, mem.MemRef(reg.rbp, 32))) # parameter 3 CpuidCode.add(x86.push(reg.rax)) # save input parameter CpuidCode.add(x86.cpuid()) CpuidCode.add(x86.mov(mem.MemRef(reg.edi, 0, data_size=32), reg.eax)) CpuidCode.add(x86.mov(mem.MemRef(reg.edi, 4, data_size=32), reg.ebx)) CpuidCode.add(x86.mov(mem.MemRef(reg.edi, 8, data_size=32), reg.ecx)) CpuidCode.add(x86.mov(mem.MemRef(reg.edi, 12, data_size=32), reg.edx)) CpuidCode.add(x86.pop(reg.rax)) # restore input parameter as return value
def replace(self, data): self.inst.__dict__ = isa.mov(registers.rax, fake_int(data)).__dict__ if self.caller_program.render_code is None: self.value = data return assert list(self.caller_program.render_code[self.caller_start.position:self.caller_end.position]) == \ list(get_mov_rax(self.value)) self.value = data self.caller_program.render_code[self.caller_start.position:self.caller_end.position] = get_mov_rax(self.value)
def __init__(self): import corepy.lib.printer as printer import corepy.arch.x86_64.isa as x86 import corepy.arch.x86_64.types.registers as reg import corepy.arch.x86_64.platform as env import corepy.arch.x86_64.lib.memory as mem self.code = env.InstructionStream() self.proc = env.Processor() self.params = env.ExecParams() self.code.add(x86.mov(reg.rax, mem.MemRef(reg.rbp, 16))) self.code.add(x86.mov(reg.dx, 0x0cf8)) self.code.add(x86.out(reg.dx, reg.eax)) self.code.add(x86.mov(reg.dx, 0x0cfc)) self.code.add(x86.in_(reg.eax, reg.dx))
def _(bs): type = bs.flow.stack.pop() for i, reg in zip(xrange(type.size), [registers.r11, registers.r12, registers.r13, registers.r14]): bs.code += isa.pop(reg) assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rbx) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.rax) bs.code += isa.shl(registers.rbx, 3) bs.code += isa.add(registers.rax, registers.rbx) bs.code += isa.mov(MemRef(registers.rax), type.id) for i, reg in zip(xrange(type.size), [registers.r11, registers.r12, registers.r13, registers.r14]): bs.code += isa.mov(MemRef(registers.rax, i * 8 + 8), reg) bs.code += isa.push(1 + type.size) bs.flow.stack.append(type_impl.Int)
def _(bs): assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.r14) assert bs.flow.stack.pop() is Raw bs.code += isa.pop(registers.r13) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.r12) bs.code += isa.shl(registers.r14, 3) bs.code += isa.mov(registers.rdi, registers.r12) bs.code += isa.mov(registers.rsi, registers.r13) bs.code += isa.mov(registers.rdx, registers.r14) bs.code += isa.mov( registers.rax, ctypes.cast(ctypes.memmove, ctypes.c_void_p).value) bs.code += isa.call(registers.rax) bs.this.append(type_impl.NoneType.load())
def __init__(self, caller_code, initial): self.value = initial self.caller_program = caller_code.prgm self.caller_start = caller_code.prgm.get_unique_label() self.caller_end = caller_code.prgm.get_unique_label() caller_code += self.caller_start self.inst = isa.mov(registers.rax, fake_int(self.value)) caller_code += self.inst caller_code += self.caller_end
def caller(): p = util.Program() code = p.get_stream() code += isa.mov(registers.rax, make_root()) code += isa.call(registers.rax) #util.add_redirection(code, lambda rdi: util.get_call(make_root())) p.add(code) p.cache_code() util.debug(p, "caller") return p
def make_root(): return compiler.translate("make_root", compiler.Flow(), this=[ lambda bs: bs.code.add(isa.push(registers.rbp)), lambda bs: bs.code.add(isa.mov(registers.rbp, registers.rsp)), lambda bs: bs.flow.try_stack.append(uncaught_exception), #main_module.load(), #ast.Call( # func=lambda bs: None, # args=[], # keywords=[], # starargs=None, # kwargs=None, # ), tree.body, lambda bs: bs.code.add(isa.mov(registers.rsp, registers.rbp)), lambda bs: bs.code.add(isa.pop(registers.rbp)), lambda bs: bs.code.add(isa.ret()), compiler.end, ])
def replace(self, data): self.inst.__dict__ = isa.mov(registers.rax, fake_int(data)).__dict__ if self.caller_program.render_code is None: self.value = data return assert list(self.caller_program.render_code[self.caller_start.position:self.caller_end.position]) == \ list(get_mov_rax(self.value)) self.value = data self.caller_program.render_code[self.caller_start.position:self. caller_end.position] = get_mov_rax( self.value)
def dup_lower(bs, level): # 0 = dup skip = sum(x.size for x in bs.flow.stack[-1 - level:]) type = bs.flow.stack[-1 - level] #print skip, bs.flow.stack[-1-level:] #print "skip =", skip, level if type.size: bs.code += isa.mov(registers.rax, 8 * skip - 8) for i in xrange(type.size): bs.code += isa.push(MemRef(registers.rsp, 0, registers.rax)) bs.flow.stack.append(type)
def dup_lower(bs, level): # 0 = dup skip = sum(x.size for x in bs.flow.stack[-1-level:]) type = bs.flow.stack[-1-level] #print skip, bs.flow.stack[-1-level:] #print "skip =", skip, level if type.size: bs.code += isa.mov(registers.rax, 8 * skip - 8) for i in xrange(type.size): bs.code += isa.push(MemRef(registers.rsp, 0, registers.rax)) bs.flow.stack.append(type)
def _(bs): content = self.contents[attr] type, a = bs.flow.stack.pop2() #type = bs.flow.stack.pop() # we could use a watcher to modify the generated code to not require a memory access. skip = bs.program.get_unique_label() bs.code += isa.mov(registers.rax, ctypes.cast(content.id_container, ctypes.c_void_p).value) bs.code += isa.cmp(MemRef(registers.rax), type.id) bs.code += isa.je(skip) bs.code += isa.mov(registers.rax, ctypes.cast(content.type_setter, ctypes.c_void_p).value) bs.code += isa.mov(registers.rdi, type.id) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.r12) bs.code += skip #if type is not content.value.value: content.value.set(type) # i'm pretty sure we can do this - this code will immediately be executed after and this fits in with code generation # BUT things that use it before it have to be altered properly # right now they will just raise an error - render_code hasn't been defined # it should ignore this and wait for the asm code above to modify it. #print "storing", attr, self.desc, type, a, repr(content.container.raw) if type.size: bs.code += isa.mov(registers.rax, ctypes.cast(content.container, ctypes.c_void_p).value) for i in reversed(xrange(type.size)): bs.code += isa.pop(MemRef(registers.rax, 8 * i))
def make_root(): return compiler.translate( "make_root", compiler.Flow(), this=[ lambda bs: bs.code.add(isa.push(registers.rbp)), lambda bs: bs.code.add(isa.mov(registers.rbp, registers.rsp)), lambda bs: bs.flow.try_stack.append(uncaught_exception), #main_module.load(), #ast.Call( # func=lambda bs: None, # args=[], # keywords=[], # starargs=None, # kwargs=None, # ), tree.body, lambda bs: bs.code.add(isa.mov(registers.rsp, registers.rbp)), lambda bs: bs.code.add(isa.pop(registers.rbp)), lambda bs: bs.code.add(isa.ret()), compiler.end, ])
def _(bs): assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rcx) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.rbx) bs.code += isa.add(registers.rbx, registers.rcx) bs.code += isa.add(registers.rbx, 8) bs.code += isa.mov(registers.rax, MemRef(registers.rbx)) bs.code += isa.push(registers.rax) bs.flow.stack.append(type_impl.Int)
def _(bs): type = bs.flow.stack.pop() for i, reg in zip( xrange(type.size), [registers.r11, registers.r12, registers.r13, registers.r14]): bs.code += isa.pop(reg) assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rbx) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.rax) bs.code += isa.shl(registers.rbx, 3) bs.code += isa.add(registers.rax, registers.rbx) bs.code += isa.mov(MemRef(registers.rax), type.id) for i, reg in zip( xrange(type.size), [registers.r11, registers.r12, registers.r13, registers.r14]): bs.code += isa.mov(MemRef(registers.rax, i * 8 + 8), reg) bs.code += isa.push(1 + type.size) bs.flow.stack.append(type_impl.Int)
def unlift(bs, func, desc): #print desc #print func #flows = [] @memoize def make_post(flow): #print flows #if flows: # print flow, flows[-1] # print flow == flows[-1] # print flow.__dict__ == flows[-1].__dict__ #flows.append(flow) return compiler.translate("unlift_post " + desc, flow, stack=list(bs.call_stack)) def make_thingy(flow, data): #print "thingy", id(flows), desc, data def _(bs): good = bs.program.get_unique_label() bs.code += isa.mov(registers.rax, data) # check if could be combined into cmp bs.code += isa.cmp(MemRef(registers.rsp), registers.rax) bs.code += isa.je(good) bs.code += isa.mov(registers.rdi, MemRef(registers.rsp)) add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp( make_thingy(flow, rdi))) bs.code += good bs.code += isa.pop(registers.rax) bs.this.append(func(data)) return compiler.translate( "unlift_thingy " + desc, flow, this=[ _, lambda bs: add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp(make_post(flow))), compiler.end, ]) bs.code += isa.mov(registers.rdi, MemRef(registers.rsp)) add_redirection( bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp(make_thingy(flow, rdi))) bs.this.append(compiler.end)
def _(bs): assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rcx) assert bs.flow.stack.pop() is type_impl.Int bs.code += isa.pop(registers.rbx) assert bs.flow.stack.pop() is self bs.code += isa.pop(registers.rax) bs.code += isa.shl(registers.rbx, 3) bs.code += isa.add(registers.rax, registers.rbx) bs.code += isa.mov(MemRef(registers.rax), registers.rcx) type_impl.NoneType.load()(bs)
def TestInt(): code = InstructionStream() proc = Processor() params = x86_64_exec.ExecParams() params.p1 = 32 code.add(x86.mov(eax, spe.MemRef(ebp, 8))) #code.add(x86.xor(code.eax, code.eax)) code.add(x86.add(eax, 1200)) code.print_code(pro = False, epi = False, binary = True) r = proc.execute(code, debug = True, params = params) print 'int result:', r assert(r == 1232) return
def unlift_noncached(bs, func, desc): #print desc @memoize def make_post(flow): return compiler.translate("unlift_post " + desc, flow, stack=list(bs.call_stack)) def make_thingy(flow, data): return compiler.translate( "unlift_thingy " + desc, flow, this=[ func(data), lambda bs: add_redirection(bs.code, lambda rdi, flow=bs.flow.clone(): get_jmp(make_post(flow))), compiler.end, ]) @called_from_asm def glue(rdi, flow=bs.flow.clone()): return make_thingy(flow, rdi) code = ctypes.CFUNCTYPE(ctypes.c_int64, ctypes.c_int64)(glue) bs.code += isa.pop(registers.rdi) bs.code += isa.mov(registers.rax, ctypes.cast(code, ctypes.c_void_p).value) bs.code += isa.mov(registers.r12, registers.rsp) bs.code += isa.and_(registers.rsp, -16) bs.code += isa.call(registers.rax) bs.code += isa.mov(registers.rsp, registers.r12) bs.code += isa.jmp(registers.rax) bs.this.append(compiler.end) bs.program.references.append(code)
def _synthesize_prologue(self): """ Create the prologue. This manages the register preservation requirements from the ABI. """ # Reset the prologue self._prologue = [self.lbl_prologue] # Set up the call frame and push callee-save registers # Note the stack is expected to remain 16-byte aligned, which is true here. self._prologue.append(x86.mov(rax, rsp, ignore_active = True)) self._prologue.append(x86.push(rbp, ignore_active = True)) self._prologue.append(x86.mov(rbp, rsp, ignore_active = True)) self._prologue.append(x86.push(r15, ignore_active = True)) self._prologue.append(x86.push(r14, ignore_active = True)) self._prologue.append(x86.push(r13, ignore_active = True)) self._prologue.append(x86.push(r12, ignore_active = True)) #self._prologue.append(x86.push(rdi, ignore_active = True)) #self._prologue.append(x86.push(rsi, ignore_active = True)) self._prologue.append(x86.push(rbx, ignore_active = True)) return
def _synthesize_prologue(self): """ Create the prologue. This manages the register preservation requirements from the ABI. """ # Set up the call frame and push callee-save registers # Note the stack is expected to remain 16-byte aligned, which is true here. self._prologue = [self.lbl_prologue, x86.push(rbp, ignore_active = True), x86.mov(rbp, rsp, ignore_active = True), x86.push(r15, ignore_active = True), x86.push(r14, ignore_active = True), x86.push(r13, ignore_active = True), x86.push(r12, ignore_active = True), x86.push(rbx, ignore_active = True)] return
def end(self): """Do post-loop iterator code""" if self.mode == CTR: self.code.add(x86_64.loop(self.start_label)) elif self.mode == DEC: # branch if r_count is not zero (CR) # Note that this relies on someone (e.g. cleanup()) setting the # condition register properly. if self.step_size() == 1: self.code.add(x86_64.jnz(self.start_label)) else: self.code.add(x86_64.cmp(self.r_count, 0)) self.code.add(x86_64.jg(self.start_label)) elif self.mode == INC: if self.external_stop: if isinstance(self.r_count, memory.MemRef) and isinstance(self.r_stop, memory.MemRef): if self.r_clobber == None: raise Exception('Must specify clobber_reg if count and stop values are both stored in memory.') #self.code.add(x86_64.push(registers.rax)) #if self.r_count.base != registers.rsp: # self.code.add(x86_64.mov(registers.rax, self.r_count)) #else: # oldm = self.r_count # m = memory.MemRef(oldm.base, oldm.disp+8, oldm.index, oldm.scale, oldm.data_size) # self.code.add(x86_64.mov(registers.rax, m)) #if self.r_stop.base != registers.rsp: # self.code.add(x86_64.cmp(registers.rax, self.r_stop)) #else: # oldm = self.r_stop # m = memory.MemRef(oldm.base, oldm.disp+8, oldm.index, oldm.scale, oldm.data_size) # self.code.add(x86_64.cmp(registers.rax, m)) #self.code.add(x86_64.pop(registers.rax)) else: self.code.add(x86_64.mov(self.r_clobber, self.r_count)) self.code.add(x86_64.cmp(self.r_clobber, self.r_stop)) else: self.code.add(x86_64.cmp(self.r_count, self.r_stop)) else: self.code.add(x86_64.cmp(self.r_count, self.n)) self.code.add(x86_64.jnge(self.start_label)) return