def set_compile_hook(space, w_hook): """ set_compile_hook(hook) Set a compiling hook that will be called each time a loop is compiled. The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations, assembler_addr, assembler_length) jitdriver_name is the name of this particular jitdriver, 'pypyjit' is the main interpreter loop loop_type can be either `loop` `entry_bridge` or `bridge` in case loop is not `bridge`, greenkey will be a tuple of constants or a string describing it. for the interpreter loop` it'll be a tuple (code, offset, is_being_profiled) assembler_addr is an integer describing where assembler starts, can be accessed via ctypes, assembler_lenght is the lenght of compiled asm Note that jit hook is not reentrant. It means that if the code inside the jit hook is itself jitted, it will get compiled, but the jit hook won't be called for that. """ cache = space.fromcache(Cache) cache.w_compile_hook = w_hook cache.in_recursion = NonConstant(False)
def set_optimize_hook(space, w_hook): """ set_optimize_hook(hook) Set a compiling hook that will be called each time a loop is optimized, but before assembler compilation. This allows to add additional optimizations on Python level. The hook will be called with the following signature: hook(jitdriver_name, loop_type, greenkey or guard_number, operations) jitdriver_name is the name of this particular jitdriver, 'pypyjit' is the main interpreter loop loop_type can be either `loop` `entry_bridge` or `bridge` in case loop is not `bridge`, greenkey will be a tuple of constants or a string describing it. for the interpreter loop` it'll be a tuple (code, offset, is_being_profiled) Note that jit hook is not reentrant. It means that if the code inside the jit hook is itself jitted, it will get compiled, but the jit hook won't be called for that. Result value will be the resulting list of operations, or None """ cache = space.fromcache(Cache) cache.w_optimize_hook = w_hook cache.in_recursion = NonConstant(False)
def isvirtual(value): """ Returns if this value is virtual, while tracing, it's relatively conservative and will miss some cases. This is for advanced usage only. """ return NonConstant(False)
def isconstant(value): """ While tracing, returns whether or not the value is currently known to be constant. This is not perfect, values can become constant later. Mostly for use with @look_inside_iff. This is for advanced usage only. """ return NonConstant(False)
def jit_walk_stack_root(callback, addr, end): root_iterator.context = NonConstant(llmemory.NULL) gc = self.gc while True: addr = root_iterator.next(gc, addr, end) if addr == llmemory.NULL: return callback(gc, addr) addr += sizeofaddr
def non_constant(bytecode, pool): from pypy.rlib.nonconst import NonConstant if NonConstant(False): pool = ConstantPool() pool.add_string("foo") pool.add_string("bazz") pool.add_classdescr(["a", "bc"], [("foo", 3), ("y", 5)]) return "123", pool else: return bytecode, pool
def f(n): if NonConstant(False): dtype = float64_dtype else: dtype = int32_dtype ar = W_NDimArray(n, [n], dtype=dtype) i = 0 while i < n: ar.get_concrete().setitem(i, int32_dtype.box(7)) i += 1 v = ar.descr_add(space, ar).descr_sum(space) assert isinstance(v, IntObject) return v.intval
def set_abort_hook(space, w_hook): """ set_abort_hook(hook) Set a hook (callable) that will be called each time there is tracing aborted due to some reason. The hook will be called as in: hook(jitdriver_name, greenkey, reason) Where reason is the reason for abort, see documentation for set_compile_hook for descriptions of other arguments. """ cache = space.fromcache(Cache) cache.w_abort_hook = w_hook cache.in_recursion = NonConstant(False)
def set_param_enable_opts(self, value): from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES d = {} if NonConstant(False): value = 'blah' # not a constant '' if value is None or value == 'all': value = ALL_OPTS_NAMES for name in value.split(":"): if name: if name not in ALL_OPTS_DICT: raise ValueError('Unknown optimization ' + name) d[name] = None self.enable_opts = d
def maybe_compile_and_run(*args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ globaldata = metainterp_sd.globaldata if NonConstant(False): # make sure we always see the saner optimizer from an # annotation point of view, otherwise we get lots of # blocked ops self.set_param_optimizer(OPTIMIZER_FULL) if vinfo is not None: virtualizable = args[vinfo.index_of_virtualizable] virtualizable = vinfo.cast_to_vtype(virtualizable) assert virtualizable != globaldata.blackhole_virtualizable, ( "reentering same frame via blackhole") # look for the cell corresponding to the current greenargs greenargs = args[:num_green_args] cell = get_jitcell(*greenargs) if cell.counter >= 0: # update the profiling counter n = cell.counter + self.increment_threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return # bound reached; start tracing from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd) try: loop_token = metainterp.compile_and_run_once(*args) except ContinueRunningNormally: # the trace got too long, reset the counter cell.counter = 0 raise else: # machine code was already compiled for these greenargs # get the assembler and fill in the boxes set_future_values(*args[num_green_args:]) loop_token = cell.entry_loop_token # ---------- execute assembler ---------- while True: # until interrupted by an exception metainterp_sd.profiler.start_running() fail_index = metainterp_sd.cpu.execute_token(loop_token) metainterp_sd.profiler.end_running() fail_descr = globaldata.get_fail_descr_from_number(fail_index) loop_token = fail_descr.handle_fail(metainterp_sd)
def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import keepalive_until_here # XXX: Hack to ensure that we get a proper effectinfo.write_descrs_arrays if NonConstant(False): dest[dest_start] = source[source_start] # supports non-overlapping copies only if not we_are_translated(): if source == dest: assert (source_start + length <= dest_start or dest_start + length <= source_start) TP = lltype.typeOf(source).TO assert TP == lltype.typeOf(dest).TO if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc': # perform a write barrier that copies necessary flags from # source to dest if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest, source_start, dest_start, length): # if the write barrier is not supported, copy by hand i = 0 while i < length: dest[i + dest_start] = source[i + source_start] i += 1 return source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * source_start) cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * dest_start) llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, llmemory.sizeof(TP.OF) * length) keepalive_until_here(source) keepalive_until_here(dest)
def unicode_w(self, space): return NonConstant(u"foobar")
def int_w(self, space): return NonConstant(-42)
def deldictvalue(self, space, attr): attr + "xx" # check that it's a string return NonConstant(True)
def str_w(self, space): return NonConstant("foobar")
def decode_index4(self, w_index_or_slice, seqlength): is_root(w_index_or_slice) return (NonConstant(42), NonConstant(42), NonConstant(42), NonConstant(42))
def setdictvalue(self, space, attr, w_value): attr + "xx" # check that it's a string is_root(w_value) return NonConstant(True)
def nonconst_l(): a = NonConstant([1, 2, 3]) return a[0]
def check(): scope_w = [w_some_obj()] * NonConstant(42) w_result = activation._run(self, scope_w) is_root(w_result)
def uint_w(self, space): return r_uint(NonConstant(42))
def nonconst_i(): return NonConstant(a)
def w_some_obj(): if NonConstant(False): return W_Root() return W_MyObject()
def mmap(fileno, length, flags=MAP_SHARED, prot=PROT_WRITE | PROT_READ, access=_ACCESS_DEFAULT): fd = fileno # check size boundaries _check_map_size(length) map_size = length # check access is not there when flags and prot are there if access != _ACCESS_DEFAULT and ((flags != MAP_SHARED) or\ (prot != (PROT_WRITE | PROT_READ))): raise RValueError( "mmap can't specify both access and flags, prot.") if access == ACCESS_READ: flags = MAP_SHARED prot = PROT_READ elif access == ACCESS_WRITE: flags = MAP_SHARED prot = PROT_READ | PROT_WRITE elif access == ACCESS_COPY: flags = MAP_PRIVATE prot = PROT_READ | PROT_WRITE elif access == _ACCESS_DEFAULT: pass else: raise RValueError("mmap invalid access parameter.") # check file size try: st = os.fstat(fd) except OSError: pass # ignore errors and trust map_size else: mode = st[stat.ST_MODE] size = st[stat.ST_SIZE] if size > sys.maxint: size = sys.maxint else: size = int(size) if stat.S_ISREG(mode): if map_size == 0: map_size = size elif map_size > size: raise RValueError("mmap length is greater than file size") m = MMap(access) if fd == -1: # Assume the caller wants to map anonymous memory. # This is the same behaviour as Windows. mmap.mmap(-1, size) # on both Windows and Unix map anonymous memory. m.fd = -1 flags |= MAP_ANONYMOUS else: m.fd = os.dup(fd) # XXX if we use hintp below in alloc, the NonConstant # is necessary since we want a general version of c_mmap # to be annotated with a non-constant pointer. res = c_mmap(NonConstant(NULL), map_size, prot, flags, fd, 0) if res == rffi.cast(PTR, -1): errno = _get_error_no() raise OSError(errno, os.strerror(errno)) m.setdata(res, map_size) return m
def fn(): return bool(NonConstant(False))
def bigint_w(self, space): from pypy.rlib.rbigint import rbigint return rbigint.fromint(NonConstant(42))
def is_true(self, w_obj): is_root(w_obj) return NonConstant(False)
def w_obj_or_none(): if NonConstant(False): return None return w_some_obj()
def tocli(self): return NonConstant(None)
def fn(*args): if NonConstant(True): return a else: return b
def float_w(self, w_obj): is_root(w_obj) return NonConstant(42.5)