def emit_operation(self, op): self.emitting_operation(op) self.emit_postponed_op() opnum = op.opnum if (rop.is_comparison(opnum) or rop.is_call_may_force(opnum) or rop.is_ovf(opnum)): self.postponed_op = op else: Optimization.emit_operation(self, op)
def emit(self, op): self.emitting_operation(op) self.emit_postponed_op() opnum = op.opnum if (rop.is_comparison(opnum) or rop.is_call_may_force(opnum) or rop.is_ovf(opnum)): self.postponed_op = op else: return Optimization.emit(self, op)
def fake_allocate(self, loop): from rpython.jit.backend.x86.jump import remap_frame_layout def emit(*args): self.assembler.emitted.append(args) for i, op in enumerate(loop.operations): self.rm.position = i opnum = op.getopnum() opname = op.getopname() if rop.is_comparison(opnum): locs = [self.loc(x) for x in op.getarglist()] loc = self.force_allocate_reg_or_cc(op) emit(opname, loc, locs) elif opname.startswith("int_"): locs = [self.loc(x) for x in op.getarglist()] loc = self.rm.force_result_in_reg( op, op.getarg(0), op.getarglist()) emit(opname, loc, locs[1:]) elif op.is_guard(): fail_locs = [self.loc(x) for x in op.getfailargs()] emit(opname, self.loc(op.getarg(0)), fail_locs) elif rop.is_call(opnum): # calling convention! src_locs = [self.loc(x) for x in op.getarglist()[1:]] self.rm.before_call() loc = self.rm.after_call(op) dst_locs = [r1, r2, r3][:len(src_locs)] remap_frame_layout(self.assembler, src_locs, dst_locs, r8) emit(opname, loc, dst_locs) elif opname == "label": descr = op.getdescr() locs = [self.loc(x) for x in op.getarglist()] emit(opname, locs) descr._fake_arglocs = locs lastop = loop.operations[-1] if lastop.getopname() == "jump" and lastop.getdescr() is descr: # now we know the places, add hints for i, r in enumerate(locs): if isinstance(r, FakeReg): self.longevity.fixed_register( len(loop.operations) - 1, r, lastop.getarg(i)) elif opname == "jump": src_locs = [self.loc(x) for x in op.getarglist()] dst_locs = op.getdescr()._fake_arglocs remap_frame_layout(self.assembler, src_locs, dst_locs, r8) emit("jump", dst_locs) else: locs = [self.loc(x) for x in op.getarglist()] if op.type != "v": loc = self.rm.force_allocate_reg(op) emit(opname, loc, locs) else: emit(opname, locs) self.possibly_free_vars_for_op(op) return self.assembler.emitted
def could_merge_with_next_guard(self, op, i, operations): # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. if not rop.is_comparison(op.opnum): return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] opnum = next_op.getopnum() if not (opnum == rop.GUARD_TRUE or opnum == rop.GUARD_FALSE or opnum == rop.COND_CALL): return False if next_op.getarg(0) is not op: return False self.remove_tested_failarg(next_op) return True
def is_comparison_or_ovf_op(opnum): return rop.is_comparison(opnum) or rop.is_ovf(opnum)