def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): if rop.is_always_pure(op.opnum): sb.add_pure_op(op) if rop.is_ovf( op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] # don't move call_pure_with_exception in the short preamble... # issue #2015 # Also, don't move cond_call_value in the short preamble. # The issue there is that it's usually pointless to try to # because the 'value' argument is typically not a loop # invariant, and would really need to be in order to end up # in the short preamble. Maybe the code works anyway in the # other rare case, but better safe than sorry and don't try. effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): assert rop.is_call(op.opnum) if not OpHelpers.is_cond_call_value(op.opnum): sb.add_pure_op(op)
def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_GC or opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT or opnum == rop.STRSETITEM or opnum == rop.UNICODESETITEM or opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or opnum == rop.RECORD_EXACT_CLASS or opnum == rop.RAW_STORE or opnum == rop.ASSERT_NOT_NONE): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return self.need_guard_not_invalidated = True # can do better, but good start if (OpHelpers.is_plain_call(opnum) or OpHelpers.is_call_loopinvariant(opnum) or OpHelpers.is_cond_call_value(opnum) or opnum == rop.COND_CALL): effectinfo = descr.get_extra_info() ef = effectinfo.extraeffect if (ef == effectinfo.EF_LOOPINVARIANT or ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or ef == effectinfo.EF_ELIDABLE_OR_MEMORYERROR or ef == effectinfo.EF_ELIDABLE_CAN_RAISE): return # A special case for ll_arraycopy, because it is so common, and its # effects are so well defined. elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY: self._clear_caches_arraycopy(opnum, descr, argboxes, effectinfo) return elif effectinfo.oopspecindex == effectinfo.OS_ARRAYMOVE: self._clear_caches_arraymove(opnum, descr, argboxes, effectinfo) return else: # Only invalidate things that are escaped # XXX can do better, only do it for the descrs in the effectinfo for descr, cache in self.heap_cache.iteritems(): cache.invalidate_unescaped() for descr, indices in self.heap_array_cache.iteritems(): for cache in indices.itervalues(): cache.invalidate_unescaped() return # XXX not completely sure, but I *think* it is needed to reset() the # state at least in the 'CALL_*' operations that release the GIL. We # tried to do only the kind of resetting done by the two loops just # above, but hit an assertion in "pypy test_multiprocessing.py". self.reset_keep_likely_virtuals()
def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or opnum == rop.SETARRAYITEM_GC or opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_GC or opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT or opnum == rop.STRSETITEM or opnum == rop.UNICODESETITEM or opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or opnum == rop.RAW_STORE): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return if (OpHelpers.is_plain_call(opnum) or OpHelpers.is_call_loopinvariant(opnum) or OpHelpers.is_cond_call_value(opnum) or opnum == rop.COND_CALL): effectinfo = descr.get_extra_info() ef = effectinfo.extraeffect if (ef == effectinfo.EF_LOOPINVARIANT or ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or ef == effectinfo.EF_ELIDABLE_OR_MEMORYERROR or ef == effectinfo.EF_ELIDABLE_CAN_RAISE): return # A special case for ll_arraycopy, because it is so common, and its # effects are so well defined. elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY: self._clear_caches_arraycopy(opnum, descr, argboxes, effectinfo) return else: # Only invalidate things that are escaped # XXX can do better, only do it for the descrs in the effectinfo for descr, cache in self.heap_cache.iteritems(): cache.invalidate_unescaped() for descr, indices in self.heap_array_cache.iteritems(): for cache in indices.itervalues(): cache.invalidate_unescaped() return # XXX not completely sure, but I *think* it is needed to reset() the # state at least in the 'CALL_*' operations that release the GIL. We # tried to do only the kind of resetting done by the two loops just # above, but hit an assertion in "pypy test_multiprocessing.py". self.reset_keep_likely_virtuals()
def optimize_call_pure_old(self, op, old_op, start_index): if op.getdescr() is not old_op.getdescr(): return False # this will match a call_pure and a cond_call_value with # the same function and arguments old_start_index = OpHelpers.is_cond_call_value(old_op.opnum) if self._same_args(old_op, op, old_start_index, start_index): # all identical # this removes a CALL_PURE that has the same (non-constant) # arguments as a previous CALL_PURE. if isinstance(old_op, PreambleOp): # xxx obscure, it's dealt with in the caller old_op = old_op.op self.make_equal_to(op, old_op) self.last_emitted_operation = REMOVED return True return False
def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): if rop.is_always_pure(op.opnum): sb.add_pure_op(op) if rop.is_ovf(op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] # don't move call_pure_with_exception in the short preamble... # issue #2015 # Also, don't move cond_call_value in the short preamble. # The issue there is that it's usually pointless to try to # because the 'value' argument is typically not a loop # invariant, and would really need to be in order to end up # in the short preamble. Maybe the code works anyway in the # other rare case, but better safe than sorry and don't try. effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): assert rop.is_call(op.opnum) if not OpHelpers.is_cond_call_value(op.opnum): sb.add_pure_op(op)
def optimize_call_pure_old(self, op, old_op, start_index): if op.getdescr() is not old_op.getdescr(): return False # this will match a call_pure and a cond_call_value with # the same function and arguments j = start_index old_start_index = OpHelpers.is_cond_call_value(old_op.opnum) for i in range(old_start_index, old_op.numargs()): box = old_op.getarg(i) if not self.get_box_replacement(op.getarg(j)).same_box(box): break j += 1 else: # all identical # this removes a CALL_PURE that has the same (non-constant) # arguments as a previous CALL_PURE. if isinstance(old_op, PreambleOp): # xxx obscure, it's dealt with in the caller old_op = old_op.op self.make_equal_to(op, old_op) self.last_emitted_operation = REMOVED return True return False
def optimize_call_pure_old(self, op, old_op, start_index): if op.getdescr() is not old_op.getdescr(): return False # this will match a call_pure and a cond_call_value with # the same function and arguments j = start_index old_start_index = OpHelpers.is_cond_call_value(old_op.opnum) for i in range(old_start_index, old_op.numargs()): box = old_op.getarg(i) if not get_box_replacement(op.getarg(j)).same_box(box): break j += 1 else: # all identical # this removes a CALL_PURE that has the same (non-constant) # arguments as a previous CALL_PURE. if isinstance(old_op, PreambleOp): # xxx obscure, it's dealt with in the caller old_op = old_op.op self.make_equal_to(op, old_op) self.last_emitted_operation = REMOVED return True return False