def execute_frame(self): """Execute this frame. Main entry point to the interpreter.""" from pypy.rlib import rstack # the following 'assert' is an annotation hint: it hides from # the annotator all methods that are defined in PyFrame but # overridden in the FrameClass subclass of PyFrame. assert isinstance(self, self.space.FrameClass) executioncontext = self.space.getexecutioncontext() executioncontext.enter(self) try: if not we_are_jitted(): executioncontext.call_trace(self) # Execution starts just after the last_instr. Initially, # last_instr is -1. After a generator suspends it points to # the YIELD_VALUE instruction. next_instr = self.last_instr + 1 try: w_exitvalue = self.dispatch(self.pycode, next_instr, executioncontext) rstack.resume_point("execute_frame", self, executioncontext, returns=w_exitvalue) except Exception: if not we_are_jitted(): executioncontext.return_trace(self, self.space.w_None) raise if not we_are_jitted(): executioncontext.return_trace(self, w_exitvalue) # on exit, we try to release self.last_exception -- breaks an # obvious reference cycle, so it helps refcounting implementations self.last_exception = None finally: executioncontext.leave(self) return w_exitvalue
def handle_operation_error(self, ec, operr, attach_tb=True): if attach_tb: if not we_are_jitted(): # xxx this is a hack. It allows bytecode_trace() to # call a signal handler which raises, and catch the # raised exception immediately. See test_alarm_raise in # pypy/module/signal/test/test_signal.py. Without the # next four lines, if an external call (like # socket.accept()) is interrupted by a signal, it raises # an exception carrying EINTR which arrives here, # entering the next "except" block -- but the signal # handler is then called on the next call to # dispatch_bytecode(), causing the real exception to be # raised after the exception handler block was popped. try: trace = self.w_f_trace self.w_f_trace = None try: ec.bytecode_trace(self) finally: self.w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( self.space, operr, self, self.last_instr) if not we_are_jitted(): ec.exception_trace(self, operr)
def LOOKUP_METHOD(f, nameindex, *ignored): # stack before after # -------------- --fast-method----fallback-case------------ # # w_object None # w_object => w_function w_boundmethod_or_whatever # (more stuff) (more stuff) (more stuff) # space = f.space w_obj = f.popvalue() if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): # mapdict has an extra-fast version of this function from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return w_name = f.getname_w(nameindex) w_value = None w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) w_descr = w_type.lookup(name) if w_descr is None: # this handles directly the common case # module.function(args..) w_value = w_obj.getdictvalue(space, name) # xxx we could also use the mapdict cache in that case, probably else: typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, name) if w_value is None: # fast method path: a function object in the class, # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) if (space.config.objspace.std.withmapdict and not jit.we_are_jitted()): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type) return if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) f.pushvalue(None)
def call_valuestack(self, w_func, nargs, frame): from pypy.interpreter.function import Function, Method, is_builtin_code if (not we_are_jitted() and frame.is_being_profiled and is_builtin_code(w_func)): # XXX: this code is copied&pasted :-( from the slow path below # call_valuestack(). args = frame.make_arguments(nargs) return self.call_args_and_c_profile(frame, w_func, args) if not self.config.objspace.disable_call_speedhacks: # XXX start of hack for performance if isinstance(w_func, Method): w_inst = w_func.w_instance if w_inst is not None: w_func = w_func.w_function # reuse callable stack place for w_inst frame.settopvalue(w_inst, nargs) nargs += 1 elif nargs > 0 and ( self.abstract_isinstance_w(frame.peekvalue(nargs-1), # :-( w_func.w_class)): w_func = w_func.w_function if isinstance(w_func, Function): return w_func.funccall_valuestack(nargs, frame) # XXX end of hack for performance args = frame.make_arguments(nargs) return self.call_args(w_func, args)
def f(x): try: if we_are_jitted(): return x return x + 1 except Exception: return 5
def call_valuestack(self, w_func, nargs, frame): from pypy.interpreter.function import Function, Method, is_builtin_code if (not we_are_jitted() and frame.is_being_profiled and is_builtin_code(w_func)): # XXX: this code is copied&pasted :-( from the slow path below # call_valuestack(). args = frame.make_arguments(nargs) return self.call_args_and_c_profile(frame, w_func, args) if not self.config.objspace.disable_call_speedhacks: # XXX start of hack for performance if isinstance(w_func, Method): w_inst = w_func.w_instance if w_inst is not None: w_func = w_func.w_function # reuse callable stack place for w_inst frame.settopvalue(w_inst, nargs) nargs += 1 elif nargs > 0 and (self.abstract_isinstance_w( frame.peekvalue(nargs - 1), # :-( w_func.w_class)): w_func = w_func.w_function if isinstance(w_func, Function): return w_func.funccall_valuestack(nargs, frame) # XXX end of hack for performance args = frame.make_arguments(nargs) return self.call_args(w_func, args)
def ll_math_isfinite(y): # Use a custom hack that is reasonably well-suited to the JIT. # Floats are awesome (bis). if use_library_isinf_isnan and not jit.we_are_jitted(): return bool(_lib_finite(y)) z = 0.0 * y return z == z # i.e.: z is not a NaN
def ll_math_isinf(y): if jit.we_are_jitted(): return (y + VERY_LARGE_FLOAT) == y elif use_library_isinf_isnan: return not _lib_finite(y) and not _lib_isnan(y) else: return y == INFINITY or y == -INFINITY
def getcell(self, key, makenew): if makenew or jit.we_are_jitted(): # when we are jitting, we always go through the pure function # below, to ensure that we have no residual dict lookup self = jit.hint(self, promote=True) return self._getcell_makenew(key) return self.content.get(key, None)
def index(self, selector): if jit.we_are_jitted(): # hack for the jit: # the _index method is pure too, but its argument is never # constant, because it is always a new tuple return self._index_jit_pure(selector[0], selector[1]) else: return self._index_indirection(selector)
def JUMP_ABSOLUTE(f, jumpto, _, ec=None): if we_are_jitted(): f.last_instr = intmask(jumpto) ec.bytecode_trace(f) jumpto = r_uint(f.last_instr) pypyjitdriver.can_enter_jit(frame=f, ec=ec, next_instr=jumpto, pycode=f.getcode()) return jumpto
def get_next_structure(self, key): # jit helper self = hint(self, promote=True) key = hint(key, promote=True) newstruct = _get_next_structure_shared(self, key) if not we_are_jitted(): self._size_estimate -= self.size_estimate() self._size_estimate += newstruct.size_estimate() return newstruct
def issubtype(w_self, w_type): promote(w_self) promote(w_type) if w_self.space.config.objspace.std.withtypeversion and we_are_jitted(): version_tag1 = w_self.version_tag() version_tag2 = w_type.version_tag() if version_tag1 is not None and version_tag2 is not None: res = _pure_issubtype(w_self, w_type, version_tag1, version_tag2) return res return _issubtype(w_self, w_type)
def f(y): while y >= 0: myjitdriver.can_enter_jit(y=y) myjitdriver.jit_merge_point(y=y) if we_are_jitted(): x = 1 else: x = 10 y -= x return y
def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): if stop > len(s1.chars): stop = len(s1.chars) else: if stop >= len(s1.chars): if start == 0: return s1 stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop)
def call__Type(space, w_type, __args__): w_type = hint(w_type, promote=True) # special case for type(x) if space.is_w(w_type, space.w_type): try: w_obj, = __args__.fixedunpack(1) except ValueError: pass else: return space.type(w_obj) # invoke the __new__ of the type if not we_are_jitted(): # note that the annotator will figure out that w_type.w_bltin_new can # only be None if the newshortcut config option is not set w_bltin_new = w_type.w_bltin_new else: # for the JIT it is better to take the slow path because normal lookup # is nicely optimized, but the w_type.w_bltin_new attribute is not # known to the JIT w_bltin_new = None call_init = True if w_bltin_new is not None: w_newobject = space.call_obj_args(w_bltin_new, w_type, __args__) else: w_newtype, w_newdescr = w_type.lookup_where('__new__') w_newfunc = space.get(w_newdescr, w_type) if (space.config.objspace.std.newshortcut and not we_are_jitted() and isinstance(w_newtype, W_TypeObject) and not w_newtype.is_heaptype() and not space.is_w(w_newtype, space.w_type)): w_type.w_bltin_new = w_newfunc w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) call_init = space.is_true(space.isinstance(w_newobject, w_type)) # maybe invoke the __init__ of the type if call_init: w_descr = space.lookup(w_newobject, '__init__') w_result = space.get_and_call_args(w_descr, w_newobject, __args__) if not space.is_w(w_result, space.w_None): raise OperationError(space.w_TypeError, space.wrap("__init__() should return None")) return w_newobject
def f(n): while n > 0: jitdriver.can_enter_jit(n=n) jitdriver.jit_merge_point(n=n) s = _chr(n) if not we_are_jitted(): s += s # forces to be a string if n > 100: escape(s) n -= 1 return 42
def f(n): total = 0 while n > 0: jitdriver.can_enter_jit(n=n, total=total) jitdriver.jit_merge_point(n=n, total=total) s = _chr(n) if not we_are_jitted(): s += s # forces to be a string total += escape(s) n -= 1 return total
def wrapchar(space, c): from pypy.objspace.std.stringobject import W_StringObject from pypy.objspace.std.ropeobject import rope, W_RopeObject if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): if space.config.objspace.std.withrope: return W_RopeObject.PREBUILT[ord(c)] return W_StringObject.PREBUILT[ord(c)] else: if space.config.objspace.std.withrope: return W_RopeObject(rope.LiteralStringNode(c)) return W_StringObject(c)
def issubtype__Type_Type(space, w_type1, w_type2): w_type1 = hint(w_type1, promote=True) w_type2 = hint(w_type2, promote=True) if space.config.objspace.std.withtypeversion and we_are_jitted(): version_tag1 = w_type1.version_tag() version_tag2 = w_type2.version_tag() if version_tag1 is not None and version_tag2 is not None: res = _pure_issubtype(w_type1, w_type2, version_tag1, version_tag2) return space.newbool(res) res = _issubtype(w_type1, w_type2) return space.newbool(res)
def f(self, code): pc = 0 while pc < len(code): myjitdriver.jit_merge_point(self=self, code=code, pc=pc) op = code[pc] if op == "-": self.n -= 1 elif op == "c": frame = Frame(self.n) self.n = frame.f("---i---") if we_are_jitted(): if frame.hookcalled: raise UnexpectedHook elif op == "C": frame = Frame(self.n) self.n = frame.f("cL") if we_are_jitted(): if not frame.hookcalled: raise ExpectedHook elif op == "i": if self.n % 5 == 1: return self.n elif op == "l": if self.n > 0: myjitdriver.can_enter_jit(self=self, code=code, pc=0) pc = 0 continue elif op == "L": if self.n > 50: myjitdriver.can_enter_jit(self=self, code=code, pc=0) pc = 0 continue else: assert 0 pc += 1 return self.n
def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=[], blindargs=0): """Parse args and kwargs according to the signature of a code object, or raise an ArgErr in case of failure. Return the number of arguments filled in. """ if jit.we_are_jitted() and self._dont_jit: return self._match_signature_jit_opaque(w_firstarg, scope_w, signature, defaults_w, blindargs) return self._really_match_signature(w_firstarg, scope_w, signature, defaults_w, blindargs)
def try_rule(self, rule, query, continuation=DONOTHING, choice_point=True, inline=False): if not choice_point: return (TRY_RULE, query, continuation, rule) if not we_are_jitted(): return self.portal_try_rule(rule, query, continuation, choice_point) if inline: return self.main_loop(TRY_RULE, query, continuation, rule) #if _is_early_constant(rule): # rule = hint(rule, promote=True) # return self.portal_try_rule(rule, query, continuation, choice_point) return self._opaque_try_rule(rule, query, continuation, choice_point)
def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=None, blindargs=0): """Parse args and kwargs according to the signature of a code object, or raise an ArgErr in case of failure. Return the number of arguments filled in. """ if jit.we_are_jitted() and self._dont_jit: return self._match_signature_jit_opaque(w_firstarg, scope_w, signature, defaults_w, blindargs) return self._really_match_signature(w_firstarg, scope_w, signature, defaults_w, blindargs)
def call__Type(space, w_type, __args__): promote(w_type) # invoke the __new__ of the type if not we_are_jitted(): # note that the annotator will figure out that w_type.w_bltin_new can # only be None if the newshortcut config option is not set w_bltin_new = w_type.w_bltin_new else: # for the JIT it is better to take the slow path because normal lookup # is nicely optimized, but the w_type.w_bltin_new attribute is not # known to the JIT w_bltin_new = None call_init = True if w_bltin_new is not None: w_newobject = space.call_obj_args(w_bltin_new, w_type, __args__) else: w_newtype, w_newdescr = w_type.lookup_where("__new__") w_newfunc = space.get(w_newdescr, w_type) if ( space.config.objspace.std.newshortcut and not we_are_jitted() and isinstance(w_newtype, W_TypeObject) and not w_newtype.is_heaptype() and not space.is_w(w_newtype, space.w_type) ): w_type.w_bltin_new = w_newfunc w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) call_init = space.isinstance_w(w_newobject, w_type) # maybe invoke the __init__ of the type if call_init and not ( space.is_w(w_type, space.w_type) and not __args__.keywords and len(__args__.arguments_w) == 1 ): w_descr = space.lookup(w_newobject, "__init__") w_result = space.get_and_call_args(w_descr, w_newobject, __args__) if not space.is_w(w_result, space.w_None): raise OperationError(space.w_TypeError, space.wrap("__init__() should return None")) return w_newobject
def eval_arithmetic(self, engine): from pypy.lang.prolog.interpreter.arithmetic import arithmetic_functions from pypy.lang.prolog.interpreter.arithmetic import arithmetic_functions_list if we_are_jitted(): signature = hint(self.signature, promote=True) func = None for sig, func in arithmetic_functions_list: if sig == signature: break else: func = arithmetic_functions.get(self.signature, None) if func is None: error.throw_type_error("evaluable", self.get_prolog_signature()) return func(engine, self)
def funccall_valuestack(self, nargs, frame): # speed hack from pypy.interpreter import gateway from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit # if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info and nargs == 0): from pypy.module.sys.vm import exc_info_direct return exc_info_direct(self.space, frame) # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: assert isinstance(code, gateway.BuiltinCode0) return code.fastcall_0(self.space, self) elif nargs == 1: assert isinstance(code, gateway.BuiltinCode1) return code.fastcall_1(self.space, self, frame.peekvalue(0)) elif nargs == 2: assert isinstance(code, gateway.BuiltinCode2) return code.fastcall_2(self.space, self, frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 3: assert isinstance(code, gateway.BuiltinCode3) return code.fastcall_3(self.space, self, frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 4: assert isinstance(code, gateway.BuiltinCode4) return code.fastcall_4(self.space, self, frame.peekvalue(3), frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif (nargs | Code.FLATPYCALL) == fast_natural_arity: assert isinstance(code, PyCode) return self._flat_pycall(code, nargs, frame) elif fast_natural_arity & Code.FLATPYCALL: natural_arity = fast_natural_arity & 0xff if natural_arity > nargs >= natural_arity - len(self.defs_w): assert isinstance(code, PyCode) return self._flat_pycall_defaults(code, nargs, frame, natural_arity - nargs) elif fast_natural_arity == Code.PASSTHROUGHARGS1 and nargs >= 1: assert isinstance(code, gateway.BuiltinCodePassThroughArguments1) w_obj = frame.peekvalue(nargs - 1) args = frame.make_arguments(nargs - 1) return code.funcrun_obj(self, w_obj, args) args = frame.make_arguments(nargs) return self.call_args(args)
def funccall_valuestack(self, nargs, frame): # speed hack from pypy.interpreter import gateway from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit # if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info and nargs == 0): from pypy.module.sys.vm import exc_info_direct return exc_info_direct(self.space, frame) # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: assert isinstance(code, gateway.BuiltinCode0) return code.fastcall_0(self.space, self) elif nargs == 1: assert isinstance(code, gateway.BuiltinCode1) return code.fastcall_1(self.space, self, frame.peekvalue(0)) elif nargs == 2: assert isinstance(code, gateway.BuiltinCode2) return code.fastcall_2(self.space, self, frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 3: assert isinstance(code, gateway.BuiltinCode3) return code.fastcall_3(self.space, self, frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 4: assert isinstance(code, gateway.BuiltinCode4) return code.fastcall_4(self.space, self, frame.peekvalue(3), frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif (nargs | Code.FLATPYCALL) == fast_natural_arity: assert isinstance(code, PyCode) return self._flat_pycall(code, nargs, frame) elif fast_natural_arity & Code.FLATPYCALL: natural_arity = fast_natural_arity & 0xff if natural_arity > nargs >= natural_arity - len(self.defs_w): assert isinstance(code, PyCode) return self._flat_pycall_defaults(code, nargs, frame, natural_arity - nargs) elif fast_natural_arity == Code.PASSTHROUGHARGS1 and nargs >= 1: assert isinstance(code, gateway.BuiltinCodePassThroughArguments1) w_obj = frame.peekvalue(nargs-1) args = frame.make_arguments(nargs-1) return code.funcrun_obj(self, w_obj, args) args = frame.make_arguments(nargs) return self.call_args(args)
def call__Type(space, w_type, __args__): promote(w_type) # invoke the __new__ of the type if not we_are_jitted(): # note that the annotator will figure out that w_type.w_bltin_new can # only be None if the newshortcut config option is not set w_bltin_new = w_type.w_bltin_new else: # for the JIT it is better to take the slow path because normal lookup # is nicely optimized, but the w_type.w_bltin_new attribute is not # known to the JIT w_bltin_new = None call_init = True if w_bltin_new is not None: w_newobject = space.call_obj_args(w_bltin_new, w_type, __args__) else: w_newtype, w_newdescr = w_type.lookup_where('__new__') w_newfunc = space.get(w_newdescr, w_type) if (space.config.objspace.std.newshortcut and not we_are_jitted() and isinstance(w_newtype, W_TypeObject) and not w_newtype.is_heaptype() and not space.is_w(w_newtype, space.w_type)): w_type.w_bltin_new = w_newfunc w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) call_init = space.isinstance_w(w_newobject, w_type) # maybe invoke the __init__ of the type if (call_init and not (space.is_w(w_type, space.w_type) and not __args__.keywords and len(__args__.arguments_w) == 1)): w_descr = space.lookup(w_newobject, '__init__') w_result = space.get_and_call_args(w_descr, w_newobject, __args__) if not space.is_w(w_result, space.w_None): raise OperationError(space.w_TypeError, space.wrap("__init__() should return None")) return w_newobject
def lookup_where_with_method_cache(w_self, name): space = w_self.space w_self = hint(w_self, promote=True) assert space.config.objspace.std.withmethodcache if (space.config.objspace.std.immutable_builtintypes and we_are_jitted() and not w_self.is_heaptype()): w_self = hint(w_self, promote=True) name = hint(name, promote=True) return w_self._pure_lookup_where_builtin_type(name) version_tag = w_self.version_tag version_tag = hint(version_tag, promote=True) if version_tag is None: tup = w_self._lookup_where(name) return tup name = hint(name, promote=True) return w_self._pure_lookup_where_with_method_cache(name, version_tag)
def jump_absolute(self, jumpto, _, ec=None): if we_are_jitted(): # # assume that only threads are using the bytecode counter decr_by = 0 if self.space.actionflag.has_bytecode_counter: # constant-folded if self.space.threadlocals.gil_ready: # quasi-immutable field decr_by = _get_adapted_tick_counter() # self.last_instr = intmask(jumpto) ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), is_being_profiled=self.is_being_profiled) return jumpto
def issubtype__Type_Type(space, w_type1, w_type2): w_type1 = hint(w_type1, promote=True) w_type2 = hint(w_type2, promote=True) if space.config.objspace.std.withtypeversion and we_are_jitted(): if (space.config.objspace.std.immutable_builtintypes and not w_type1.is_heaptype() and not w_type2.is_heaptype()): res = _pure_issubtype_builtin(w_type1, w_type2) return space.newbool(res) else: version_tag1 = w_type1.version_tag version_tag2 = w_type2.version_tag version_tag1 = hint(version_tag1, promote=True) version_tag2 = hint(version_tag2, promote=True) if version_tag1 is not None and version_tag2 is not None: res = _pure_issubtype(w_type1, w_type2, version_tag1, version_tag2) return space.newbool(res) res = _issubtype(w_type1, w_type2) return space.newbool(res)
def add_attr(self, obj, selector, w_value): # grumble, jit needs this attr = self._get_new_attr(selector[0], selector[1]) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() - oldattr.size_estimate()) assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) oldattr._size_estimate = size_est if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() new_storage = [None] * attr.size_estimate() for i in range(obj._mapdict_storage_length()): new_storage[i] = obj._mapdict_read_storage(i) obj._set_mapdict_storage_and_map(new_storage, attr) # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) obj._mapdict_write_storage(attr.position, w_value)
def handle_operation_error(self, ec, operr, attach_tb=True): self.last_exception = operr if attach_tb: pytraceback.record_application_traceback( self.space, operr, self, self.last_instr) if not we_are_jitted(): ec.exception_trace(self, operr) block = self.unrollstack(SApplicationException.kind) if block is None: # no handler found for the OperationError if we_are_translated(): raise operr else: # try to preserve the CPython-level traceback import sys tb = sys.exc_info()[2] raise OperationError, operr, tb else: unroller = SApplicationException(operr) next_instr = block.handle(self, unroller) return next_instr
def handle_operation_error(self, ec, operr, attach_tb=True): self.last_exception = operr if attach_tb: pytraceback.record_application_traceback(self.space, operr, self, self.last_instr) if not we_are_jitted(): ec.exception_trace(self, operr) block = self.unrollstack(SApplicationException.kind) if block is None: # no handler found for the OperationError if we_are_translated(): raise operr else: # try to preserve the CPython-level traceback import sys tb = sys.exc_info()[2] raise OperationError, operr, tb else: unroller = SApplicationException(operr) next_instr = block.handle(self, unroller) return next_instr
def getattribute_if_not_from_object(w_self): """ this method returns the applevel __getattribute__ if that is not the one from object, in which case it returns None """ from pypy.objspace.descroperation import object_getattribute if not we_are_jitted(): shortcut = w_self.space.config.objspace.std.getattributeshortcut if not shortcut or not w_self.uses_object_getattribute: # slow path: look for a custom __getattribute__ on the class w_descr = w_self.lookup('__getattribute__') # if it was not actually overriden in the class, we remember this # fact for the next time. if w_descr is object_getattribute(w_self.space): if shortcut: w_self.uses_object_getattribute = True else: return w_descr return None # in the JIT case, just use a lookup, because it is folded away # correctly using the version_tag w_descr = w_self.lookup('__getattribute__') if w_descr is not object_getattribute(w_self.space): return w_descr
def dispatch_bytecode(self, co_code, next_instr, ec): space = self.space while True: self.last_instr = intmask(next_instr) if not we_are_jitted(): ec.bytecode_trace(self) next_instr = r_uint(self.last_instr) opcode = ord(co_code[next_instr]) next_instr += 1 if space.config.objspace.logbytecodes: space.bytecodecounts[opcode] = space.bytecodecounts.get( opcode, 0) + 1 if opcode >= HAVE_ARGUMENT: lo = ord(co_code[next_instr]) hi = ord(co_code[next_instr + 1]) next_instr += 2 oparg = (hi << 8) | lo else: oparg = 0 hint(opcode, concrete=True) hint(oparg, concrete=True) while opcode == opcodedesc.EXTENDED_ARG.index: opcode = ord(co_code[next_instr]) if opcode < HAVE_ARGUMENT: raise BytecodeCorruption lo = ord(co_code[next_instr + 1]) hi = ord(co_code[next_instr + 2]) next_instr += 3 oparg = (oparg << 16) | (hi << 8) | lo hint(opcode, concrete=True) hint(oparg, concrete=True) if opcode == opcodedesc.RETURN_VALUE.index: w_returnvalue = self.popvalue() block = self.unrollstack(SReturnValue.kind) if block is None: self.pushvalue(w_returnvalue) # XXX ping pong raise Return else: unroller = SReturnValue(w_returnvalue) next_instr = block.handle(self, unroller) return next_instr # now inside a 'finally' block if opcode == opcodedesc.YIELD_VALUE.index: #self.last_instr = intmask(next_instr - 1) XXX clean up! raise Yield if opcode == opcodedesc.END_FINALLY.index: unroller = self.end_finally() if isinstance(unroller, SuspendedUnroller): # go on unrolling the stack block = self.unrollstack(unroller.kind) if block is None: w_result = unroller.nomoreblocks() self.pushvalue(w_result) raise Return else: next_instr = block.handle(self, unroller) return next_instr if we_are_translated(): for opdesc in unrolling_opcode_descs: # static checks to skip this whole case if necessary if not opdesc.is_enabled(space): continue if not hasattr(pyframe.PyFrame, opdesc.methodname): continue # e.g. for JUMP_FORWARD, implemented above if opcode == opdesc.index: # dispatch to the opcode method meth = getattr(self, opdesc.methodname) res = meth(oparg, next_instr) if opdesc.index == opcodedesc.CALL_FUNCTION.index: rstack.resume_point("dispatch_call", self, co_code, next_instr, ec) # !! warning, for the annotator the next line is not # comparing an int and None - you can't do that. # Instead, it's constant-folded to either True or False if res is not None: next_instr = res break else: self.MISSING_OPCODE(oparg, next_instr) else: # when we are not translated, a list lookup is much faster methodname = opcode_method_names[opcode] res = getattr(self, methodname)(oparg, next_instr) if res is not None: next_instr = res if we_are_jitted(): return next_instr