v2 = self.getvalue(op.getarg(1)) if v2.is_constant() and v2.box.getint() == 1: self.make_equal_to(op.result, v1) return elif v1.is_constant() and v1.box.getint() == 0: self.make_constant_int(op.result, 0) return if v1.intbound.known_ge(IntBound(0, 0)) and v2.is_constant(): val = v2.box.getint() if val & (val - 1) == 0 and val > 0: # val == 2**shift op = op.copy_and_change(rop.INT_RSHIFT, args = [op.getarg(0), ConstInt(highest_bit(val))]) self.emit_operation(op) def optimize_MARK_OPAQUE_PTR(self, op): value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True def optimize_CAST_PTR_TO_INT(self, op): self.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
newop = ResOperation(rop.CALL_RELEASE_GIL, arglist, op.result, descr=funcinfo.descr) self.commit_optimization() ops = [] for delayed_op in funcinfo.delayed_ops: ops.append(delayed_op) ops.append(newop) return ops def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op) def _get_oopspec(self, op): effectinfo = op.getdescr().get_extra_info() if effectinfo is not None: return effectinfo.oopspecindex return EffectInfo.OS_NONE def _get_funcval(self, op): return self.getvalue(op.getarg(1)) optimize_ops = _findall(OptFfiCall, 'optimize_')
self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.sub_bound(v1.intbound).mul(-1) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.div_bound(v2.intbound) if v1.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(0)) b = r.intbound.div_bound(v1.intbound) if v2.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(1)) def propagate_bounds_INT_LSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op.result) b = r.intbound.rshift_bound(v2.intbound) if v1.intbound.intersect(b): self.propagate_bounds_backward(op.getarg(0)) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB propagate_bounds_INT_MUL_OVF = propagate_bounds_INT_MUL optimize_ops = _findall(OptIntBounds, 'optimize_') propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_')
return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value) self.emit_operation(op) # note: the following line does not mean that the two operations are # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure(). optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC def optimize_SETARRAYITEM_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) return value.ensure_nonnull() ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue) self.emit_operation(op) def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op) optimize_ops = _findall(OptVirtualize, 'optimize_')
args = op.getarglist() self.emit_operation(ResOperation(rop.CALL, args, op.result, op.getdescr())) def optimize_CALL_LOOPINVARIANT(self, op): op = op.copy_and_change(rop.CALL) self.emit_operation(op) def optimize_VIRTUAL_REF_FINISH(self, op): pass def optimize_VIRTUAL_REF(self, op): op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result) self.emit_operation(op) def optimize_QUASIIMMUT_FIELD(self, op): # xxx ideally we could also kill the following GUARD_NOT_INVALIDATED # but it's a bit hard to implement robustly if heap.py is also run pass def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op) optimize_ops = _findall(OptSimplify, "optimize_")
op = ResOperation(rop.CALL, [ConstInt(func)] + args, result, descr=calldescr) self.optimizer.emit_operation(op) def propagate_forward(self, op): if not self.enabled: self.emit_operation(op) return opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op) optimize_ops = _findall(OptString, 'optimize_') def _findall_call_oopspec(): prefix = 'opt_call_stroruni_' result = [] for name in dir(OptString): if name.startswith(prefix): value = getattr(EffectInfo, 'OS_' + name[len(prefix):]) assert isinstance(value, int) and value != 0 result.append((value, getattr(OptString, name))) return unrolling_iterable(result) opt_call_oopspec_ops = _findall_call_oopspec()
True) return else: self.pure_operations[args] = op # otherwise, the operation remains self.emit_operation(op) if nextop: self.emit_operation(nextop) def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] resbox = execute_nonspec(self.cpu, None, op.getopnum(), argboxes, op.getdescr()) return resbox.constbox() #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case # self.emit_operation(op) # FIXME: Is this still needed? def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) optimize_ops = _findall(Optimizer, 'optimize_')
if v2.is_constant() and v2.box.getint() == 1: self.make_equal_to(op.result, v1) return elif v1.is_constant() and v1.box.getint() == 0: self.make_constant_int(op.result, 0) return if v1.intbound.known_ge(IntBound(0, 0)) and v2.is_constant(): val = v2.box.getint() if val & (val - 1) == 0 and val > 0: # val == 2**shift op = op.copy_and_change( rop.INT_RSHIFT, args=[op.getarg(0), ConstInt(highest_bit(val))]) self.emit_operation(op) def optimize_CAST_PTR_TO_INT(self, op): self.pure(rop.CAST_INT_TO_PTR, [op.result], op.getarg(0)) self.emit_operation(op) def optimize_CAST_INT_TO_PTR(self, op): self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0)) self.emit_operation(op) def optimize_SAME_AS(self, op): self.make_equal_to(op.result, self.getvalue(op.getarg(0))) dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', default=OptRewrite.emit_operation) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
return # record as an out-of-line guard if self.optimizer.quasi_immutable_deps is None: self.optimizer.quasi_immutable_deps = {} self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None # perform the replacement in the list of operations fieldvalue = self.getvalue(qmutdescr.constantfieldbox) cf = self.field_cache(qmutdescr.fielddescr) cf.force_lazy_setfield(self) cf.remember_field_value(structvalue, fieldvalue) self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): if self._remove_guard_not_invalidated: return if self._seen_guard_not_invalidated: return self._seen_guard_not_invalidated = True self.emit_operation(op) def propagate_forward(self, op): opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op) optimize_ops = _findall(OptHeap, 'optimize_')