def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): # Do the interrupt-check at the end of a loop, don't interrupt loops midway. self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, w_class=self.getreceiverclass(s_context), blockmethod=self.getblockmethod(s_context), s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, w_class=self.getreceiverclass(s_context), blockmethod=self.getblockmethod(s_context), s_context=s_context) try: self.step(s_context) except FreshReturn, ret: raise ret.exception except LocalReturn, ret: s_context.push(ret.value(self.space))
def descr_call(self, space, __args__): promote(self) # invoke the __new__ of the type if not we_are_jitted(): # note that the annotator will figure out that self.w_new_function # can only be None if the newshortcut config option is not set w_newfunc = self.w_new_function else: # for the JIT it is better to take the slow path because normal lookup # is nicely optimized, but the self.w_new_function attribute is not # known to the JIT w_newfunc = None if w_newfunc is None: w_newtype, w_newdescr = self.lookup_where('__new__') w_newfunc = space.get(w_newdescr, self) if (space.config.objspace.std.newshortcut and not we_are_jitted() and isinstance(w_newtype, W_TypeObject)): self.w_new_function = w_newfunc w_newobject = space.call_obj_args(w_newfunc, self, __args__) call_init = space.isinstance_w(w_newobject, self) # maybe invoke the __init__ of the type if (call_init and not (space.is_w(self, space.w_type) and not __args__.keywords and len(__args__.arguments_w) == 1)): w_descr = space.lookup(w_newobject, '__init__') w_result = space.get_and_call_args(w_descr, w_newobject, __args__) if not space.is_w(w_result, space.w_None): raise oefmt(space.w_TypeError, "__init__() should return None") return w_newobject
def c_loop(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.s_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): self.quick_check_for_interrupt( s_context, dec=self._get_adapted_tick_counter()) self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point(pc=pc, self=self, method=method, s_context=s_context) try: self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: if not s_context.is_closure_context( ) and s_context.s_method().primitive() == 198: s_context.activate_unwind_context(self) s_context.mark_returned() raise nlr else: s_context.push(nlr.value)
def loop_bytecodes(self, s_context, may_context_switch): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): # Do the interrupt-check at the end of a loop, don't interrupt loops midway. self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, w_class=self.getreceiverclass(s_context), blockmethod=self.getblockmethod(s_context), s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, w_class=self.getreceiverclass(s_context), blockmethod=self.getblockmethod(s_context), s_context=s_context) try: self.step(s_context) except FreshReturn, ret: raise ret.exception except LocalReturn, ret: s_context.push(ret.value(self.space))
def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): # Do the interrupt-check at the end of a loop, don't interrupt loops midway. self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, s_context=s_context) try: self.step(s_context) except Return, ret: if ret.arrived_at_target: s_context.push(ret.value) else: raise ret
def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): # Do the interrupt-check at the end of a loop, don't interrupt loops midway. self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point(pc=pc, self=self, method=method, s_context=s_context) try: self.step(s_context) except Return, ret: if ret.arrived_at_target: s_context.push(ret.value) else: raise ret
def c_loop(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.s_method() while True: pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): self.quick_check_for_interrupt(s_context, dec=self._get_adapted_tick_counter()) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, s_context=s_context) old_pc = pc self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, s_context=s_context) try: self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: if not s_context.is_closure_context() and s_context.s_method().primitive() == 198: s_context.activate_unwind_context(self) s_context.mark_returned() raise nlr else: s_context.push(nlr.value)
def LOOKUP_METHOD(f, nameindex, *ignored): from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # # w_object None # w_object => w_function w_boundmethod_or_whatever # (more stuff) (more stuff) (more stuff) # space = f.space w_obj = f.popvalue() if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return w_name = f.getname_w(nameindex) w_value = None w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.text_w(w_name) # bit of a mess to use these internal functions, but it allows the # mapdict caching below to work without an additional lookup version_tag = w_type.version_tag() if version_tag is None: _, w_descr = w_type._lookup_where(name) w_descr_cell = None else: _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( name, version_tag) w_descr = w_descr_cell if isinstance(w_descr, MutableCell): w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) w_value = w_obj.getdictvalue(space, name) # xxx we could also use the mapdict cache in that case, probably else: typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, name) if w_value is None: # fast method path: a function object in the class, # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type, w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) f.pushvalue_none()
def LOOKUP_METHOD(f, nameindex, *ignored): from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # # w_object None # w_object => w_function w_boundmethod_or_whatever # (more stuff) (more stuff) (more stuff) # space = f.space w_obj = f.popvalue() if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return w_name = f.getname_w(nameindex) w_value = None w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) # bit of a mess to use these internal functions, but it allows the # mapdict caching below to work without an additional lookup version_tag = w_type.version_tag() if version_tag is None: _, w_descr = w_type._lookup_where(name) w_descr_cell = None else: _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( name, version_tag) w_descr = w_descr_cell if isinstance(w_descr, MutableCell): w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) w_value = w_obj.getdictvalue(space, name) # xxx we could also use the mapdict cache in that case, probably else: typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, name) if w_value is None: # fast method path: a function object in the class, # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type, w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) f.pushvalue(None)
def LOOKUP_METHOD(f, nameindex, *ignored): # stack before after # -------------- --fast-method----fallback-case------------ # # w_object None # w_object => w_function w_boundmethod_or_whatever # (more stuff) (more stuff) (more stuff) # space = f.space w_obj = f.popvalue() if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): # mapdict has an extra-fast version of this function from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return w_name = f.getname_w(nameindex) w_value = None w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) w_descr = w_type.lookup(name) if w_descr is None: # this handles directly the common case # module.function(args..) w_value = w_obj.getdictvalue(space, name) # xxx we could also use the mapdict cache in that case, probably else: typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, name) if w_value is None: # fast method path: a function object in the class, # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) if (space.config.objspace.std.withmapdict and not jit.we_are_jitted()): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type) return if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) f.pushvalue(None)
def f(x): try: if we_are_jitted(): return x return x + 1 except Exception: return 5
def register_call(self, lam, calling_app, cont, env): if jit.we_are_jitted(): return if not calling_app: return calling_lam = calling_app.surrounding_lambda if not calling_lam: return subdct = self.calls.get(calling_lam, None) if subdct is None: self.calls[calling_lam] = subdct = {} lam_in_subdct = False else: lam_in_subdct = lam in subdct cont_ast = cont.get_next_executed_ast() config = env.pycketconfig() is_recursive = False if not lam_in_subdct: subdct[lam] = None if self.is_recursive(calling_lam, lam): is_recursive = True if config.log_callgraph: print "enabling jitting", calling_lam.tostring() calling_lam.enable_jitting() # It is possible to have multiple consuming continuations for a given # function body. This will attempt to mark them all. same_lambda = cont_ast and cont_ast.surrounding_lambda is calling_lam if same_lambda: if lam_in_subdct: # did not call is_recursive yet is_recursive = self.is_recursive(calling_lam, lam) if is_recursive: if cont_ast.set_should_enter() and config.log_callgraph: print "jitting downrecursion", cont_ast.tostring()
def execute(self, frame): self_obj = self._self_exp.execute(frame) assert isinstance(self_obj, ObjectWithLayout) if we_are_jitted(): return self_obj.get_field(self._field_idx) else: return self._read.read(self_obj)
def ll_math_isfinite(y): # Use a custom hack that is reasonably well-suited to the JIT. # Floats are awesome (bis). if use_library_isinf_isnan and not jit.we_are_jitted(): return bool(_lib_finite(y)) z = 0.0 * y return z == z # i.e.: z is not a NaN
def execute(self, frame): self_obj = self._self_exp.execute(frame) assert isinstance(self_obj, Object) if we_are_jitted(): return self_obj.get_field(self._field_idx) else: return self._read.read(frame, self_obj)
def ll_math_isinf(y): if jit.we_are_jitted(): return (y + VERY_LARGE_FLOAT) == y elif use_library_isinf_isnan: return not _lib_finite(y) and not _lib_isnan(y) else: return y == INFINITY or y == -INFINITY
def new_jump(self, space, bytecode, frame, cur_pc, target_pc): if target_pc < cur_pc and jit.we_are_jitted(): trace_length = jit.current_trace_length() decr_by = int(trace_length >> 12 | 1) if interrupt_counter.triggers(decr_by=decr_by): switch_to_smalltalk(space.current_ruby_process.get()) return old_jump(self, space, bytecode, frame, cur_pc, target_pc)
def reenter(self): if not jit.we_are_jitted(): assert self._virt_cache_state is None cache_state = self._nonvirt_cache_state self._nonvirt_cache_state = None self._virt_cache_state = Virt(cache_state) return cache_state
def _set_field_after_layout_change(self, field_idx, value): assert not we_are_jitted() location = self.get_location(field_idx) # we aren't handling potential exceptions here, because, # they should not happen by construction location.write_location(self, value)
def _unset_or_generalize(self, obj, value): if value is nilObject: self._mark_as_unset(obj) else: if we_are_jitted(): assert False raise GeneralizeStorageLocationException()
def send_ex(self, w_arg, operr=None): pycode = self.pycode if pycode is not None: if jit.we_are_jitted() and should_not_inline(pycode): generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, operr=operr, pycode=pycode) return self._send_ex(w_arg, operr)
def register_call(self, lam, calling_app, cont, env): if jit.we_are_jitted() or calling_app is None: return calling_lam = calling_app.surrounding_lambda if calling_lam is None: return subdct = self.calls.get(calling_lam, None) if subdct is None: self.calls[calling_lam] = subdct = {} lam_in_subdct = False else: lam_in_subdct = lam in subdct cont_ast = cont.get_next_executed_ast() config = env.pycketconfig() status = NOT_LOOP if not lam_in_subdct: subdct[lam] = None status = self.is_recursive(calling_lam, lam) if status == LOOP_HEADER: calling_lam.enable_jitting() # It is possible to have multiple consuming continuations for a given # function body. This will attempt to mark them all. same_lambda = cont_ast and cont_ast.surrounding_lambda is calling_lam if same_lambda: # did not call is_recursive yet if lam_in_subdct: status = self.is_recursive(calling_lam, lam) if status != NOT_LOOP: cont_ast.set_should_enter()
def STORE_GLOBAL_cached(self, nameindex, next_instr): w_newvalue = self.popvalue() if jit.we_are_jitted() or self.getdebug() is not None: varname = self.getname_u(nameindex) self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) return pycode = self.pycode cache_wref = pycode._globals_caches[nameindex] if cache_wref is not None: cache = cache_wref() if cache and cache.valid: w_value = write_cell(self.space, cache.cell, w_newvalue) if w_value is None: return varname = self.getname_u(nameindex) w_globals = self.pycode.w_globals self.space.setitem_str(w_globals, varname, w_newvalue) if isinstance(w_globals, W_ModuleDictObject): # the following can never be true, becaus W_ModuleDictObject can't be # user-subclassed, but let's be safe assert not w_globals.user_overridden_class cache = w_globals.get_global_cache(varname) if cache is not None: assert cache.valid and cache.ref is not None pycode._globals_caches[nameindex] = cache.ref
def LOOKUP_METHOD(f, nameindex, *ignored): # stack before after # -------------- --fast-method----fallback-case------------ # # w_object None # w_object => w_function w_boundmethod_or_whatever # (more stuff) (more stuff) (more stuff) # space = f.space w_obj = f.popvalue() if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return w_name = f.getname_w(nameindex) w_value = None w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) w_descr = w_type.lookup(name) if w_descr is None: # this handles directly the common case # module.function(args..) w_value = w_obj.getdictvalue(space, name) # xxx we could also use the mapdict cache in that case, probably else: typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, name) if w_value is None: # fast method path: a function object in the class, # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) if (space.config.objspace.std.withmapdict and not jit.we_are_jitted()): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type) return if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) f.pushvalue(None)
def _get_all_fields(self): assert not we_are_jitted() num_fields = self._object_layout.get_number_of_fields() field_values = [None] * num_fields for i in range(0, num_fields): if self._is_field_set(i): field_values[i] = self.get_field(i) return field_values
def above_threshold(self, field, *args): if jit.we_are_jitted(): return True for _ in range(n): if not isinstance(self, W_InterposeStructBase): return False self = self.inner return True
def find_map_attr(self, selector): if jit.we_are_jitted(): # hack for the jit: # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple return self._find_map_attr_jit_pure(selector[0], selector[1]) else: return self._find_map_attr_indirection(selector)
def _update_layout_with_initialized_field(self, idx, field_type): assert not we_are_jitted() layout = self._class.update_instance_layout_with_initialized_field( idx, field_type) assert layout is not self._object_layout self._set_layout_and_transfer_fields(layout)
def add_attr(self, obj, name, index, w_value): self._reorder_and_add(obj, name, index, w_value) if not jit.we_are_jitted(): oldattr = self attr = obj._get_mapdict_map() size_est = oldattr._size_estimate + attr.size_estimate() - oldattr.size_estimate() assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) oldattr._size_estimate = size_est
def read(self, obj): if we_are_jitted(): assert False if self._depth < _max_chain_length: next_node = _UninitializedReadFieldNode(self._field_idx, self._depth + 1) else: next_node = _GenericReadFieldNode(self._field_idx, self._depth + 1) return self._specialize_and_read(obj, "uninitialized node", next_node)
def execute_evaluated(self, frame, rcvr, args): assert frame is not None assert rcvr is not None assert args is not None make_sure_not_resized(args) if we_are_jitted(): return self._direct_dispatch(rcvr, args) else: return self._dispatch.execute_dispatch(rcvr, args)
def add_attr(self, obj, name, index, w_value): self._reorder_and_add(obj, name, index, w_value) if not jit.we_are_jitted(): oldattr = self attr = obj._get_mapdict_map() size_est = (oldattr._size_estimate + attr.size_estimate() - oldattr.size_estimate()) assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) oldattr._size_estimate = size_est
def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): if stop > len(s1.chars): stop = len(s1.chars) else: if stop >= len(s1.chars): if start == 0: return s1 stop = len(s1.chars) return LLHelpers._ll_stringslice(s1, start, stop)
def update_layout_to_match_class(self): assert not we_are_jitted() class_layout = self._class.get_layout_for_instances() assert self._object_layout.get_number_of_fields() == class_layout.get_number_of_fields() if self._object_layout is not class_layout: self._set_layout_and_transfer_fields(class_layout) return True else: return False
def _set_all_fields(self, field_values): assert not we_are_jitted() self._field1 = self._field2 = self._field3 = self._field4 = self._field5 = nilObject self._primField1 = self._primField2 = self._primField3 = self._primField4 = self._primField5 = 1234567890 for i in range(0, self._object_layout.get_number_of_fields()): if field_values[i] is None: self.set_field(i, nilObject) else: self.set_field(i, field_values[i])
def issubtype(w_self, w_type): promote(w_self) promote(w_type) if w_self.space.config.objspace.std.withtypeversion and we_are_jitted(): version_tag1 = w_self.version_tag() version_tag2 = w_type.version_tag() if version_tag1 is not None and version_tag2 is not None: res = _pure_issubtype(w_self, w_type, version_tag1, version_tag2) return res return _issubtype(w_self, w_type)
def execute(self, frame): self_obj = self._self_exp.execute(frame) value = self._value_exp.execute(frame) assert isinstance(self_obj, ObjectWithLayout) assert isinstance(value, AbstractObject) if we_are_jitted(): self_obj.set_field(self._field_idx, value) else: self._write.write(self_obj, value) return value
def funccall_valuestack(self, nargs, frame, methodcall=False): # speed hack # methodcall is only for better error messages from pypy.interpreter import gateway from pypy.interpreter.pycode import PyCode code = self.getcode() # hook for the jit # if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info and nargs == 0): from pypy.module.sys.vm import exc_info_direct return exc_info_direct(self.space, frame) # fast_natural_arity = code.fast_natural_arity if nargs == fast_natural_arity: if nargs == 0: assert isinstance(code, gateway.BuiltinCode0) return code.fastcall_0(self.space, self) elif nargs == 1: assert isinstance(code, gateway.BuiltinCode1) return code.fastcall_1(self.space, self, frame.peekvalue(0)) elif nargs == 2: assert isinstance(code, gateway.BuiltinCode2) return code.fastcall_2(self.space, self, frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 3: assert isinstance(code, gateway.BuiltinCode3) return code.fastcall_3(self.space, self, frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif nargs == 4: assert isinstance(code, gateway.BuiltinCode4) return code.fastcall_4(self.space, self, frame.peekvalue(3), frame.peekvalue(2), frame.peekvalue(1), frame.peekvalue(0)) elif (nargs | Code.FLATPYCALL) == fast_natural_arity: assert isinstance(code, PyCode) return self._flat_pycall(code, nargs, frame) elif fast_natural_arity & Code.FLATPYCALL: natural_arity = fast_natural_arity & 0xff if natural_arity > nargs >= natural_arity - len(self.defs_w): assert isinstance(code, PyCode) return self._flat_pycall_defaults(code, nargs, frame, natural_arity - nargs) elif fast_natural_arity == Code.PASSTHROUGHARGS1 and nargs >= 1: assert isinstance(code, gateway.BuiltinCodePassThroughArguments1) w_obj = frame.peekvalue(nargs - 1) args = frame.make_arguments(nargs - 1, w_function=self) return code.funcrun_obj(self, w_obj, args) args = frame.make_arguments(nargs, methodcall=methodcall, w_function=self) return self.call_args(args)
def f(n): total = 0 while n > 0: jitdriver.can_enter_jit(n=n, total=total) jitdriver.jit_merge_point(n=n, total=total) s = _chr(n) if not we_are_jitted(): s += s # forces to be a string total += escape(s) n -= 1 return total
def f(n): while n > 0: jitdriver.can_enter_jit(n=n) jitdriver.jit_merge_point(n=n) s = _chr(n) if not we_are_jitted(): s += s # forces to be a string if n > 100: escape(s) n -= 1 return 42