self.emit_operation(op) def optimize_STRGETITEM(self, op): indexb = self.getintbound(op.getarg(1)) if indexb.is_constant(): pass #raise Exception("implement me") #arrayvalue = self.getvalue(op.getarg(0)) #arrayvalue.make_len_gt(MODE_STR, op.getdescr(), indexvalue.box.getint()) self.optimize_default(op) def optimize_UNICODEGETITEM(self, op): indexb = self.getintbound(op.getarg(1)) if indexb.is_constant(): #arrayvalue = self.getvalue(op.getarg(0)) #arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint()) pass self.optimize_default(op) # These are typically removed already by OptRewrite, but it can be # dissabled and unrolling emits some SAME_AS ops to setup the # optimizier state. These needs to always be optimized out. def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default)
if fld is None: raise Exception("I think this is illegal") xxx fieldvalue = self.new_const(descr) self.make_equal_to(op, fld) return self.make_nonnull(op.getarg(0)) return self.emit(op) optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I def optimize_SETINTERIORFIELD_GC(self, op): opinfo = self.getptrinfo(op.getarg(0)) if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: opinfo.setinteriorfield_virtual(indexbox.getint(), op.getdescr(), self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) return self.emit(op) dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit) OptVirtualize.propagate_forward = dispatch_opt dispatch_postprocess = make_dispatcher_method(OptVirtualize, 'postprocess_') OptVirtualize.propagate_postprocess = dispatch_postprocess
self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.py_div_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) b = r.py_div_bound(b1) if b2.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_LSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.rshift_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', default=OptIntBounds.emit) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_') dispatch_postprocess = make_dispatcher_method(OptIntBounds, 'postprocess_')
return self.emit(op) def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_from_args(rop.CAST_PTR_TO_INT, [op], op.getarg(0)) return self.emit(op) def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I optimize_SAME_AS_F = optimize_SAME_AS_I def serialize_optrewrite(self, available_boxes): res = [] for i, box in self.loop_invariant_results.iteritems(): box = get_box_replacement(box) if box in available_boxes: res.append((i, box)) return res def deserialize_optrewrite(self, tups): for i, box in tups: self.loop_invariant_results[i] = box dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') dispatch_postprocess = make_dispatcher_method(OptRewrite, 'postprocess_')
if value.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: descr = op.getdescr() fieldvalue = value.getinteriorfield(indexbox.getint(), descr, None) if fieldvalue is None: fieldvalue = self.new_const(descr) self.make_equal_to(op.result, fieldvalue) return value.ensure_nonnull() self.emit_operation(op) def optimize_SETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: value.setinteriorfield(indexbox.getint(), op.getdescr(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit_operation) OptVirtualize.propagate_forward = dispatch_opt
b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.py_div_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) b = r.py_div_bound(b1) if b2.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_LSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.rshift_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', default=OptIntBounds.emit) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_') OptIntBounds.propagate_postprocess = make_dispatcher_method( OptIntBounds, 'postprocess_') OptIntBounds.have_postprocess_op = have_dispatcher_method( OptIntBounds, 'postprocess_')
self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.div_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) b = r.div_bound(b1) if b2.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_LSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = r.rshift_bound(b2) if b1.intersect(b): self.propagate_bounds_backward(op.getarg(0)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', default=OptIntBounds.opt_default) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_')
self._remove_guard_not_invalidated = True return # not a constant at all; ignore QUASIIMMUT_FIELD # from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr qmutdescr = op.getdescr() assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed # already between the tracing and now. In this case, we mark the loop # as invalid if not qmutdescr.is_still_valid_for( self.get_box_replacement(op.getarg(0))): raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): if self._remove_guard_not_invalidated: return if self._seen_guard_not_invalidated: return self._seen_guard_not_invalidated = True self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', default=OptHeap.emit_operation) OptHeap.propagate_forward = dispatch_opt
self.optimize_loop(ops, ops) class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) def optimize_STRLEN(self, op): newop = op.clone() newop.result = op.result.clonebox() self.emit_operation(newop) self.make_equal_to(op.result, self.getvalue(newop.result)) dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', default=OptRenameStrlen.emit_operation) class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): def _do_optimize_loop(self, loop, call_pure_results, state, export_state=False): from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.optimizeopt.pure import OptPure self.loop = loop loop.call_pure_results = args_dict()
# from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr qmutdescr = op.getdescr() assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed # already between the tracing and now. In this case, we mark the loop # as invalid if not qmutdescr.is_still_valid_for( self.get_box_replacement(op.getarg(0))): raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): if self._remove_guard_not_invalidated: return if self._seen_guard_not_invalidated: return self._seen_guard_not_invalidated = True return self.emit(op) dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', default=OptHeap.emit) OptHeap.propagate_forward = dispatch_opt dispatch_postprocess = make_dispatcher_method(OptHeap, 'postprocess_') OptHeap.propagate_postprocess = dispatch_postprocess
return True else: from rpython.jit.metainterp.optimizeopt import intdiv known_nonneg = b1.known_ge(IntBound(0, 0)) operations = intdiv.modulo_operations(arg1, val, known_nonneg) newop = None for newop in operations: self.optimizer.send_extra_operation(newop) self.make_equal_to(op, newop) return True def optimize_CAST_PTR_TO_INT(self, op): self.optimizer.pure_from_args(rop.CAST_INT_TO_PTR, [op], op.getarg(0)) return self.emit(op) def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_from_args(rop.CAST_PTR_TO_INT, [op], op.getarg(0)) return self.emit(op) def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(OptRewrite, "optimize_", default=OptRewrite.emit) optimize_guards = _findall(OptRewrite, "optimize_", "GUARD") dispatch_postprocess = make_dispatcher_method(OptRewrite, "postprocess_")
for i, op in enumerate(ops): if rop.is_always_pure(op.opnum): sb.add_pure_op(op) if rop.is_ovf( op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] # don't move call_pure_with_exception in the short preamble... # issue #2015 # Also, don't move cond_call_value in the short preamble. # The issue there is that it's usually pointless to try to # because the 'value' argument is typically not a loop # invariant, and would really need to be in order to end up # in the short preamble. Maybe the code works anyway in the # other rare case, but better safe than sorry and don't try. effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): assert rop.is_call(op.opnum) if not OpHelpers.is_cond_call_value(op.opnum): sb.add_pure_op(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', default=OptPure.optimize_default) OptPure.propagate_postprocess = make_dispatcher_method(OptPure, 'postprocess_') OptPure.have_postprocess_op = have_dispatcher_method(OptPure, 'postprocess_')
return True return False def generate_modified_call(self, oopspecindex, args, result, mode): oopspecindex += mode.OS_offset cic = self.optimizer.metainterp_sd.callinfocollection calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = self.optimizer.replace_op_with(result, rop.CALL_I, [ConstInt(func)] + args, descr=calldescr) return self.emit(op) dispatch_opt = make_dispatcher_method(OptString, 'optimize_', default=OptString.emit) OptString.propagate_postprocess = make_dispatcher_method( OptString, 'postprocess_') OptString.have_postprocess_op = have_dispatcher_method(OptString, 'postprocess_') def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' result = [] for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) assert is_valid_int(value) and value != 0 result.append((value, getattr(OptString, name)))
self._remove_guard_not_invalidated = True return # not a constant at all; ignore QUASIIMMUT_FIELD # from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr qmutdescr = op.getdescr() assert isinstance(qmutdescr, QuasiImmutDescr) # check that the value is still correct; it could have changed # already between the tracing and now. In this case, we mark the loop # as invalid if not qmutdescr.is_still_valid_for(structvalue.get_key_box()): raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): if self._remove_guard_not_invalidated: return if self._seen_guard_not_invalidated: return self._seen_guard_not_invalidated = True self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', default=OptHeap.emit_operation) OptHeap.propagate_forward = dispatch_opt
self.make_equal_to(op.result, v1) return elif v1.is_constant() and v1.box.getint() == 0: self.make_constant_int(op.result, 0) return if v1.getintbound().known_ge(IntBound(0, 0)) and v2.is_constant(): val = v2.box.getint() if val & (val - 1) == 0 and val > 0: # val == 2**shift op = op.copy_and_change( rop.INT_RSHIFT, args=[op.getarg(0), ConstInt(highest_bit(val))]) self.emit_operation(op) def optimize_CAST_PTR_TO_INT(self, op): self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_SAME_AS(self, op): self.make_equal_to(op.result, self.getvalue(op.getarg(0))) dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
exec(py.code.Source(array_access_source .format(name='RAW_STORE',raw_access=True)).compile()) exec(py.code.Source(array_access_source .format(name='GETARRAYITEM_RAW_I',raw_access=False)).compile()) exec(py.code.Source(array_access_source .format(name='GETARRAYITEM_RAW_F',raw_access=False)).compile()) exec(py.code.Source(array_access_source .format(name='SETARRAYITEM_RAW',raw_access=False)).compile()) exec(py.code.Source(array_access_source .format(name='GETARRAYITEM_GC_I',raw_access=False)).compile()) exec(py.code.Source(array_access_source .format(name='GETARRAYITEM_GC_F',raw_access=False)).compile()) exec(py.code.Source(array_access_source .format(name='SETARRAYITEM_GC',raw_access=False)).compile()) del array_access_source integral_dispatch_opt = make_dispatcher_method(IntegralForwardModification, 'operation_') IntegralForwardModification.inspect_operation = integral_dispatch_opt del integral_dispatch_opt class IndexVar(AbstractValue): """ IndexVar is an AbstractValue only to ensure that a box can be assigned to the same variable as an index var. """ def __init__(self, var, coeff_mul=1, coeff_div=1, constant=0): self.var = var self.coefficient_mul = coeff_mul self.coefficient_div = coeff_div self.constant = constant # saves the next modification that uses a variable self.next_nonconst = None self.current_end = None
xxx fieldvalue = self.optimizer.new_const(descr) self.make_equal_to(op, fld) return self.make_nonnull(op.getarg(0)) return self.emit(op) optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I def optimize_SETINTERIORFIELD_GC(self, op): opinfo = getptrinfo(op.getarg(0)) if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: opinfo.setinteriorfield_virtual( indexbox.getint(), op.getdescr(), get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) return self.emit(op) dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit) OptVirtualize.propagate_forward = dispatch_opt dispatch_postprocess = make_dispatcher_method(OptVirtualize, 'postprocess_') OptVirtualize.propagate_postprocess = dispatch_postprocess
self.optimize_loop(ops, ops) class OptRenameStrlen(Optimization): def propagate_forward(self, op): dispatch_opt(self, op) def optimize_STRLEN(self, op): newop = op.clone() newop.result = op.result.clonebox() self.emit_operation(newop) self.make_equal_to(op.result, self.getvalue(newop.result)) dispatch_opt = make_dispatcher_method(OptRenameStrlen, 'optimize_', default=OptRenameStrlen.emit_operation) class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): def _do_optimize_loop(self, loop, call_pure_results, state, export_state=False): from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.optimizeopt.pure import OptPure self.loop = loop loop.call_pure_results = args_dict() metainterp_sd = FakeMetaInterpStaticData(self.cpu) return optimize_unroll(metainterp_sd, loop, [OptRewrite(), OptRenameStrlen(), OptHeap(), OptPure()], True, state, export_state) def test_optimizer_renaming_boxes1(self):
if v2.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_LSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.rshift_bound(v2.intbound) if v1.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(0)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', default=OptIntBounds.opt_default) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_')
return True return False def generate_modified_call(self, oopspecindex, args, result, mode): oopspecindex += mode.OS_offset cic = self.optimizer.metainterp_sd.callinfocollection calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = self.optimizer.replace_op_with(result, rop.CALL_I, [ConstInt(func)] + args, descr=calldescr) self.emit_operation(op) def propagate_forward(self, op): dispatch_opt(self, op) dispatch_opt = make_dispatcher_method(OptString, 'optimize_', default=OptString.emit_operation) def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' result = [] for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) assert is_valid_int(value) and value != 0 result.append((value, getattr(OptString, name))) return unrolling_iterable(result) opt_call_oopspec_ops = _findall_call_oopspec()
# descr = op.getdescr() # if isinstance(descr, JitCellToken): # return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) # self.last_label_descr = op.getdescr() # self.emit_operation(op) # def optimize_JUMP(self, op): # if not self.unroll: # op = op.copy_and_change(op.getopnum()) # descr = op.getdescr() # assert isinstance(descr, JitCellToken) # if not descr.target_tokens: # assert self.last_label_descr is not None # target_token = self.last_label_descr # assert isinstance(target_token, TargetToken) # assert target_token.targeting_jitcell_token is descr # op.setdescr(self.last_label_descr) # else: # assert len(descr.target_tokens) == 1 # op.setdescr(descr.target_tokens[0]) # self.emit_operation(op) def optimize_GUARD_FUTURE_CONDITION(self, op): self.optimizer.notice_guard_future_condition(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit_operation) OptSimplify.propagate_forward = dispatch_opt
def _check_subclass( vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 known_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable1), rclass.CLASSTYPE) expected_class = llmemory.cast_adr_to_ptr( llmemory.cast_int_to_adr(vtable2), rclass.CLASSTYPE) # note: the test is for a range including 'max', but 'max' # should never be used for actual classes. Including it makes # it easier to pass artificial tests. return (expected_class.subclassrange_min <= known_class.subclassrange_min <= expected_class.subclassrange_max) def is_virtual(self, op): opinfo = getptrinfo(op) return opinfo is not None and opinfo.is_virtual() # These are typically removed already by OptRewrite, but it can be # dissabled and unrolling emits some SAME_AS ops to setup the # optimizier state. These needs to always be optimized out. def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default)
indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: descr = op.getdescr() fieldvalue = value.getinteriorfield( indexbox.getint(), descr, None ) if fieldvalue is None: fieldvalue = self.new_const(descr) self.make_equal_to(op.result, fieldvalue) return value.ensure_nonnull() self.emit_operation(op) def optimize_SETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: value.setinteriorfield( indexbox.getint(), op.getdescr(), self.getvalue(op.getarg(2)) ) return value.ensure_nonnull() self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit_operation) OptVirtualize.propagate_forward = dispatch_opt
# def optimize_LABEL(self, op): # if not self.unroll: # descr = op.getdescr() # if isinstance(descr, JitCellToken): # return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) # self.last_label_descr = op.getdescr() # return self.emit(op) # def optimize_JUMP(self, op): # if not self.unroll: # op = op.copy_and_change(op.getopnum()) # descr = op.getdescr() # assert isinstance(descr, JitCellToken) # if not descr.target_tokens: # assert self.last_label_descr is not None # target_token = self.last_label_descr # assert isinstance(target_token, TargetToken) # assert target_token.targeting_jitcell_token is descr # op.setdescr(self.last_label_descr) # else: # assert len(descr.target_tokens) == 1 # op.setdescr(descr.target_tokens[0]) # return self.emit(op) def optimize_GUARD_FUTURE_CONDITION(self, op): self.optimizer.notice_guard_future_condition(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', default=OptSimplify.emit) OptSimplify.propagate_forward = dispatch_opt
exec py.code.Source(array_access_source .format(name='RAW_STORE',raw_access=True)).compile() exec py.code.Source(array_access_source .format(name='GETARRAYITEM_RAW_I',raw_access=False)).compile() exec py.code.Source(array_access_source .format(name='GETARRAYITEM_RAW_F',raw_access=False)).compile() exec py.code.Source(array_access_source .format(name='SETARRAYITEM_RAW',raw_access=False)).compile() exec py.code.Source(array_access_source .format(name='GETARRAYITEM_GC_I',raw_access=False)).compile() exec py.code.Source(array_access_source .format(name='GETARRAYITEM_GC_F',raw_access=False)).compile() exec py.code.Source(array_access_source .format(name='SETARRAYITEM_GC',raw_access=False)).compile() del array_access_source integral_dispatch_opt = make_dispatcher_method(IntegralForwardModification, 'operation_') IntegralForwardModification.inspect_operation = integral_dispatch_opt del integral_dispatch_opt class IndexVar(AbstractValue): """ IndexVar is an AbstractValue only to ensure that a box can be assigned to the same variable as an index var. """ def __init__(self, var, coeff_mul=1, coeff_div=1, constant=0): self.var = var self.coefficient_mul = coeff_mul self.coefficient_div = coeff_div self.constant = constant # saves the next modification that uses a variable self.next_nonconst = None self.current_end = None
if box1.is_constant(): structinfo = info.ConstPtrInfo(box1) else: structinfo = box1.get_forwarded() if not isinstance(structinfo, info.AbstractVirtualPtrInfo): structinfo = info.InstancePtrInfo(parent_descr) structinfo.init_fields(parent_descr, descr.get_index()) box1.set_forwarded(structinfo) cf = self.field_cache(descr) structinfo.setfield(descr, box1, box2, optheap=self, cf=cf) for box1, index, descr, box2 in triples_array: if box1.is_constant(): arrayinfo = info.ConstPtrInfo(box1) else: arrayinfo = box1.get_forwarded() if not isinstance(arrayinfo, info.AbstractVirtualPtrInfo): arrayinfo = info.ArrayPtrInfo(descr) box1.set_forwarded(arrayinfo) cf = self.arrayitem_cache(descr, index) arrayinfo.setitem(descr, index, box1, box2, optheap=self, cf=cf) dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', default=OptHeap.emit) OptHeap.propagate_forward = dispatch_opt dispatch_postprocess = make_dispatcher_method(OptHeap, 'postprocess_') OptHeap.propagate_postprocess = dispatch_postprocess OptHeap.have_postprocess_op = have_dispatcher_method(OptHeap, 'postprocess_')
if b2.is_constant() and b2.getint() == 1: self.make_equal_to(op, arg0) return elif b1.is_constant() and b1.getint() == 0: self.make_constant_int(op, 0) return if b1.known_ge(IntBound(0, 0)) and b2.is_constant(): val = b2.getint() if val & (val - 1) == 0 and val > 0: # val == 2**shift op = self.replace_op_with(op, rop.INT_RSHIFT, args = [op.getarg(0), ConstInt(highest_bit(val))]) self.emit_operation(op) def optimize_CAST_PTR_TO_INT(self, op): self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_reverse(op) self.emit_operation(op) def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
def generate_modified_call(self, oopspecindex, args, result, mode): oopspecindex += mode.OS_offset cic = self.optimizer.metainterp_sd.callinfocollection calldescr, func = cic.callinfo_for_oopspec(oopspecindex) op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) self.emit_operation(op) def propagate_forward(self, op): dispatch_opt(self, op) dispatch_opt = make_dispatcher_method(OptString, 'optimize_', default=OptString.emit_operation) def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' result = [] for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) assert is_valid_int(value) and value != 0 result.append((value, getattr(OptString, name))) return unrolling_iterable(result) opt_call_oopspec_ops = _findall_call_oopspec()
recentops = self.getrecentops(op.getopnum()) return recentops.lookup(self.optimizer, op) def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): if rop.is_always_pure(op.opnum): sb.add_pure_op(op) if rop.is_ovf(op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] # don't move call_pure_with_exception in the short preamble... # issue #2015 # Also, don't move cond_call_value in the short preamble. # The issue there is that it's usually pointless to try to # because the 'value' argument is typically not a loop # invariant, and would really need to be in order to end up # in the short preamble. Maybe the code works anyway in the # other rare case, but better safe than sorry and don't try. effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): assert rop.is_call(op.opnum) if not OpHelpers.is_cond_call_value(op.opnum): sb.add_pure_op(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', default=OptPure.optimize_default) dispatch_postprocess = make_dispatcher_method(OptPure, 'postprocess_')