def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata loop_token = loop.token loop_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_loop(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, loop.token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): if type != "entry bridge": metainterp_sd.stats.compiled() else: loop._ignore_during_counting = True metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) short = loop.token.short_preamble if short: metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token)
def _really_force(self, optforce): op = self.source_op assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name if self._is_immutable_and_filled_with_constants(): box = optforce.optimizer.constant_fold(op) self.make_constant(box) for ofs, value in self._fields.iteritems(): subbox = value.force_box(optforce) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, ofs, box, subbox) # keep self._fields, because it's all immutable anyway else: optforce.emit_operation(op) self.box = box = op.result # iteritems = self._fields.iteritems() if not we_are_translated(): #random order is fine, except for tests iteritems = list(iteritems) iteritems.sort(key = lambda (x,y): x.sort_key()) for ofs, value in iteritems: if value.is_null(): continue subbox = value.force_box(optforce) op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, descr=ofs) optforce.emit_operation(op)
def f(): state.data = [] state.threadlocals = gil.GILThreadLocals() state.threadlocals.setup_threads(space) thread.gc_thread_prepare() subident = thread.start_new_thread(bootstrap, ()) mainident = thread.get_ident() runme() still_waiting = 3000 while len(state.data) < 2*N: if not still_waiting: raise ValueError("time out") still_waiting -= 1 if not we_are_translated(): gil.before_external_call() time.sleep(0.01) if not we_are_translated(): gil.after_external_call() i1 = i2 = 0 for tid, i in state.data: if tid == mainident: assert i == i1; i1 += 1 elif tid == subident: assert i == i2; i2 += 1 else: assert 0 assert i1 == N assert i2 == N return len(state.data)
def f(): state.gil = allocate_ll_lock() acquire_NOAUTO(state.gil, True) state.bootstrapping = allocate_lock() state.answers = [] state.finished = 0 # the next line installs before_extcall() and after_extcall() # to be called automatically around external function calls. # When not translated it does not work around time.sleep(), # so we have to call them manually for this test. invoke_around_extcall(before_extcall, after_extcall) g(10, 1) done = False willing_to_wait_more = 2000 while not done: if not willing_to_wait_more: break willing_to_wait_more -= 1 done = len(state.answers) == expected if not we_are_translated(): before_extcall() time.sleep(0.01) if not we_are_translated(): after_extcall() if not we_are_translated(): before_extcall() time.sleep(0.1) if not we_are_translated(): after_extcall() return len(state.answers)
def _really_force(self): op = self.source_op assert op is not None # ^^^ This case should not occur any more (see test_bug_3). # if not we_are_translated(): op.name = 'FORCE ' + self.source_op.name if self._is_immutable_and_filled_with_constants(): box = self.optimizer.constant_fold(op) self.make_constant(box) for ofs, value in self._fields.iteritems(): subbox = value.force_box() assert isinstance(subbox, Const) execute(self.optimizer.cpu, None, rop.SETFIELD_GC, ofs, box, subbox) # keep self._fields, because it's all immutable anyway else: newoperations = self.optimizer.newoperations newoperations.append(op) self.box = box = op.result # iteritems = self._fields.iteritems() if not we_are_translated(): #random order is fine, except for tests iteritems = list(iteritems) iteritems.sort(key = lambda (x,y): x.sort_key()) for ofs, value in iteritems: if value.is_null(): continue subbox = value.force_box() op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, descr=ofs) newoperations.append(op) self._fields = None
def generate_operations(self, mc): if not we_are_translated(): print # reserve locations for the inputvars for i in range(len(self.inputvars_gv)): v = self.inputvars_gv[i] if v in self.lifetime: # else: input argument is not used loc = self.inputlocations[i] if v in self.var2loc: # duplicate inputvars_gv, which is ok assert self.var2loc[v] == loc else: self.var2loc[v] = loc self.vars_in_use[v] = self.lifetime[v] self.force_loc_used(v, loc) if not we_are_translated(): print 'in %20s: %s' % (loc, short(v)) self._check() self.mc = mc # Generate all operations. # Actual registers or stack locations are allocated as we go. for i in range(len(self.operations)): self.registers_pinned = 0 # bitmask op = self.operations[i] if op.clobbers_cc: self.clobber_cc() self._check() op.generate(self) if not we_are_translated(): self._showprogress() self.operationindex = i + 1
def send_loop_to_backend(metainterp_sd, loop, type): globaldata = metainterp_sd.globaldata loop_token = loop.token loop_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type) if not we_are_translated(): show_loop(metainterp_sd, loop) loop.check_consistency() metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: metainterp_sd.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): if type != "entry bridge": metainterp_sd.stats.compiled() else: loop._ignore_during_counting = True metainterp_sd.log("compiled new " + type)
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): n = metainterp_sd.cpu.get_fail_descr_number(faildescr) if not we_are_translated(): show_procedures(metainterp_sd) seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo( jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, "bridge", fail_descr_no=n ) hooks.before_compile_bridge(debug_info) else: hooks = None debug_info = None operations = get_deep_immutable_oplist(operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile_bridge(debug_info) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge") # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset)
def f(): state.data = [] state.datalen1 = 0 state.datalen2 = 0 state.datalen3 = 0 state.datalen4 = 0 state.threadlocals = gil.GILThreadLocals() state.threadlocals.setup_threads(space) thread.gc_thread_prepare() subident = thread.start_new_thread(bootstrap, ()) mainident = thread.get_ident() runme(True) still_waiting = 3000 while len(state.data) < 2*N: debug_print(len(state.data)) if not still_waiting: raise ValueError("time out") still_waiting -= 1 if not we_are_translated(): gil.before_external_call() time.sleep(0.01) if not we_are_translated(): gil.after_external_call() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: if tid == mainident: assert i == i1; i1 += 1 elif tid == subident: assert i == i2; i2 += 1 else: assert 0 assert i1 == N + skew assert i2 == N - skew return len(state.data)
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) original_jitcell_token = loop.original_jitcell_token loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_procedures(metainterp_sd, loop) loop.check_consistency() if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) hooks.before_compile(debug_info) else: debug_info = None hooks = None operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_loop(metainterp_sd, loop.inputargs, operations, original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile(debug_info) metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( original_jitcell_token)
def get_const_ptr_for_string(s): from pypy.rpython.annlowlevel import llstr if not we_are_translated(): try: return _const_ptr_for_string[s] except KeyError: pass result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llstr(s))) if not we_are_translated(): _const_ptr_for_string[s] = result return result
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) original_jitcell_token = loop.original_jitcell_token loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_procedures(metainterp_sd, loop) loop.check_consistency() if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) hooks.before_compile(debug_info) else: debug_info = None hooks = None operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile(debug_info) metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token)
def get_const_ptr_for_unicode(s): from pypy.rpython.annlowlevel import llunicode if not we_are_translated(): try: return _const_ptr_for_unicode[s] except KeyError: pass if isinstance(s, str): s = unicode(s) result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llunicode(s))) if not we_are_translated(): _const_ptr_for_unicode[s] = result return result
def send_bridge_to_backend(metainterp_sd, faildescr, inputargs, operations): n = faildescr.get_index() metainterp_sd.logger_ops.log_bridge(inputargs, operations, n) metainterp_sd.profiler.start_backend() if not we_are_translated(): show_loop(metainterp_sd) TreeLoop.check_consistency_of(inputargs, operations) pass metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations) metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge")
def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ # get the original jitcell token corresponding to jitcell form which # this trace starts original_jitcell_token = loop.original_jitcell_token assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests assert original_jitcell_token.generation > 0 # has been registered with memmgr wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number original_jitcell_token.compiled_loop_token.record_faildescr_index( n) elif isinstance(descr, JitCellToken): # for a CALL_ASSEMBLER: record it as a potential jump. if descr is not original_jitcell_token: original_jitcell_token.record_jump_to(descr) descr.exported_state = None op.cleardescr() # clear reference, mostly for tests elif isinstance(descr, TargetToken): # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) if descr.original_jitcell_token is not original_jitcell_token: assert descr.original_jitcell_token is not None original_jitcell_token.record_jump_to( descr.original_jitcell_token) # exported_state is clear by optimizeopt when the short preamble is # constrcucted. if that did not happen the label should not show up # in a trace that will be used assert descr.exported_state is None if not we_are_translated(): op._descr_wref = weakref.ref(op._descr) op.cleardescr() # clear reference to prevent the history.Stats # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.original_jitcell_token = None if not we_are_translated(): loop._looptoken_number = original_jitcell_token.number
def record_loop_or_bridge(metainterp_sd, loop): """Do post-backend recordings and cleanups on 'loop'. """ # get the original jitcell token corresponding to jitcell form which # this trace starts original_jitcell_token = loop.original_jitcell_token assert original_jitcell_token is not None if metainterp_sd.warmrunnerdesc is not None: # for tests assert original_jitcell_token.generation > 0 # has been registered with memmgr wref = weakref.ref(original_jitcell_token) for op in loop.operations: descr = op.getdescr() if isinstance(descr, ResumeDescr): descr.wref_original_loop_token = wref # stick it there n = descr.index if n >= 0: # we also record the resumedescr number original_jitcell_token.compiled_loop_token.record_faildescr_index(n) elif isinstance(descr, JitCellToken): # for a CALL_ASSEMBLER: record it as a potential jump. if descr is not original_jitcell_token: original_jitcell_token.record_jump_to(descr) descr.exported_state = None op.cleardescr() # clear reference, mostly for tests elif isinstance(descr, TargetToken): # for a JUMP: record it as a potential jump. # (the following test is not enough to prevent more complicated # cases of cycles, but at least it helps in simple tests of # test_memgr.py) if descr.original_jitcell_token is not original_jitcell_token: assert descr.original_jitcell_token is not None original_jitcell_token.record_jump_to(descr.original_jitcell_token) # exported_state is clear by optimizeopt when the short preamble is # constrcucted. if that did not happen the label should not show up # in a trace that will be used assert descr.exported_state is None if not we_are_translated(): op._descr_wref = weakref.ref(op._descr) op.cleardescr() # clear reference to prevent the history.Stats # from keeping the loop alive during tests # record this looptoken on the QuasiImmut used in the code if loop.quasi_immutable_deps is not None: for qmut in loop.quasi_immutable_deps: qmut.register_loop_token(wref) # XXX maybe we should clear the dictionary here # mostly for tests: make sure we don't keep a reference to the LoopToken loop.original_jitcell_token = None if not we_are_translated(): loop._looptoken_number = original_jitcell_token.number
def getconst(self, const): if const.type == INT: val = const.getint() if not we_are_translated() and not isinstance(val, int): # unhappiness, probably a symbolic return self._newconst(const) try: return tag(val, TAGINT) except ValueError: pass tagged = self.large_ints.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): return tagged tagged = self._newconst(const) self.large_ints[val] = tagged return tagged elif const.type == REF: val = const.getref_base() if not val: return NULLREF tagged = self.refs.get(val, UNASSIGNED) if not tagged_eq(tagged, UNASSIGNED): return tagged tagged = self._newconst(const) self.refs[val] = tagged return tagged return self._newconst(const)
def ll_arraycopy(source, dest, source_start, dest_start, length): from pypy.rpython.lltypesystem.lloperation import llop from pypy.rlib.objectmodel import keepalive_until_here # supports non-overlapping copies only if not we_are_translated(): if source == dest: assert (source_start + length <= dest_start or dest_start + length <= source_start) TP = lltype.typeOf(source).TO assert TP == lltype.typeOf(dest).TO if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc': # perform a write barrier that copies necessary flags from # source to dest if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest): # if the write barrier is not supported, copy by hand for i in range(length): dest[i + dest_start] = source[i + source_start] return source_addr = llmemory.cast_ptr_to_adr(source) dest_addr = llmemory.cast_ptr_to_adr(dest) cp_source_addr = (source_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * source_start) cp_dest_addr = (dest_addr + llmemory.itemoffsetof(TP, 0) + llmemory.sizeof(TP.OF) * dest_start) llmemory.raw_memcopy(cp_source_addr, cp_dest_addr, llmemory.sizeof(TP.OF) * length) keepalive_until_here(source) keepalive_until_here(dest)
def _dispatch_loop(self): code = self.code.co_code instr_index = 0 while True: jitdriver.jit_merge_point(code=code, instr_index=instr_index, frame=self) self.stack_depth = hint(self.stack_depth, promote=True) op = ord(code[instr_index]) instr_index += 1 if op >= opcode.HAVE_ARGUMENT: low = ord(code[instr_index]) hi = ord(code[instr_index + 1]) oparg = (hi << 8) | low instr_index += 2 else: oparg = 0 if we_are_translated(): for opdesc in unrolling_opcode_descs: if op == opdesc.index: meth = getattr(self, opdesc.methodname) instr_index = meth(oparg, instr_index, code) break else: raise MissingOpcode(op) else: meth = getattr(self, opcode_method_names[op]) instr_index = meth(oparg, instr_index, code)
def clear(self, space): # for sys.exc_clear() self.w_type = space.w_None self._w_value = space.w_None self._application_traceback = None if not we_are_translated(): del self.debug_excs[:]
def _spill(self, spillvar, oldloc): spillloc = self._use_another_stack_loc() if not we_are_translated(): print ' # %20s: SPILL %s' % (spillloc, oldloc) self.mc.MOV(spillloc, oldloc) self.var2loc[spillvar] = spillloc return spillloc
def gc_thread_die(): """To call just before the final GIL release done by a dying thread. After a thread_die(), no more gc operation should occur in this thread. """ if we_are_translated(): llop.gc_thread_die(lltype.Void)
def __init__(self, w_type, w_value, tb=None): if not we_are_translated() and w_type is None: from pypy.tool.error import FlowingError raise FlowingError(w_value) self.setup(w_type) self._w_value = w_value self._application_traceback = tb
def step(self): next = self.w_active_context.getNextBytecode() # we_are_translated returns false on top of CPython and true when # translating the interpreter if not objectmodel.we_are_translated(): bytecodeimpl = BYTECODE_TABLE[next] if self._w_last_active_context != self.w_active_context: cnt = 0 p = self.w_active_context # AK make method while p is not None: cnt += 1 p = p.w_sender self._last_indent = " " * cnt self._w_last_active_context = self.w_active_context if self.should_trace(): print "%sStack=%s" % ( self._last_indent, repr(self.w_active_context.stack),) print "%sBytecode at %d (%d:%s):" % ( self._last_indent, self.w_active_context.pc, next, bytecodeimpl.__name__,) bytecodeimpl(self.w_active_context, self) else: # this is a performance optimization: when translating the # interpreter, the bytecode dispatching is not implemented as a # list lookup and an indirect call but as a switch. The for loop # below produces the switch (by being unrolled). for code, bytecodeimpl in unrolling_bytecode_table: if code == next: bytecodeimpl(self.w_active_context, self) break
def _read(self, storage, width, i, offset): if we_are_translated(): res = libffi.array_getitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset) else: res = libffi.array_getitem_T(self.T, width, storage, i, offset) return byteswap(res)
def _write(self, storage, width, i, offset, value): #value = byteswap(value) XXX if we_are_translated(): libffi.array_setitem(clibffi.cast_type_to_ffitype(self.T), width, storage, i, offset, value) else: libffi.array_setitem_T(self.T, width, storage, i, offset, value)
def call(self): space = self.space coro = AppCoroutine.w_getcurrent(space) assert isinstance(coro, AppCoroutine) cspace = coro._cspace w("-- initial DISTRIBUTOR thunk CALL in", str(id(coro))) sched.uler.trace_vars(coro, logic_args(self.args.unpack())) try: try: try: _AppThunk.call(self) finally: coro = AppCoroutine.w_getcurrent(space) assert isinstance(coro, AppCoroutine) cspace = coro._cspace except FailedSpace, exc: w("-- EXIT of DISTRIBUTOR %s, space is FAILED with %s" % (id(coro), str(exc))) failed_value = W_FailedValue(exc) interp_bind(cspace._solution, failed_value) except Exception, exc: # maybe app_level let something buble up ... w("-- exceptional EXIT of DISTRIBUTOR %s with %s" % (id(coro), str(exc))) if not we_are_translated(): import traceback traceback.print_exc() failed_value = W_FailedValue(exc) sched.uler.dirty_traced_vars(coro, failed_value) interp_bind(cspace._solution, failed_value) cspace.fail()
def call(self): #coro = AppCoroutine.w_getcurrent(self.space) try: space = self.space cspace = self.coro._cspace const = self.const try: while 1: if not interp_free(cspace._finished): break entailed = const.revise(cspace._domains) if entailed: break # we will block on domains being pruned wait_list = [] _doms = [cspace._domains[var] for var in const._variables] for dom in _doms: #assert isinstance(dom, W_AbstractDomain) wait_list.append(dom.one_shot_watcher()) #or the cspace being dead wait_list.append(cspace._finished) interp_wait_or(space, wait_list) cspace = get_current_cspace(space) # might have been cloned except ConsistencyError: cspace.fail() except Exception: # rpython doesn't like just except:\n ... if not we_are_translated(): import traceback traceback.print_exc() finally: sched.uler.remove_thread(self.coro) sched.uler.schedule()
def Py_DecRef(space, obj): if not obj: return assert lltype.typeOf(obj) == PyObject obj.c_ob_refcnt -= 1 if DEBUG_REFCOUNT: debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) if obj.c_ob_refcnt == 0: state = space.fromcache(RefcountState) ptr = rffi.cast(ADDR, obj) if ptr not in state.py_objects_r2w: # this is a half-allocated object, lets call the deallocator # without modifying the r2w/w2r dicts _Py_Dealloc(space, obj) else: w_obj = state.py_objects_r2w[ptr] del state.py_objects_r2w[ptr] w_type = space.type(w_obj) if not w_type.is_cpytype(): _Py_Dealloc(space, obj) del state.py_objects_w2r[w_obj] # if the object was a container for borrowed references state.delete_borrower(w_obj) else: if not we_are_translated() and obj.c_ob_refcnt < 0: message = "Negative refcount for obj %s with type %s" % ( obj, rffi.charp2str(obj.c_ob_type.c_tp_name)) print >> sys.stderr, message assert False, message
def debug_print(self, indent, seen, bad): mark = "" if self in bad: mark = "*" if we_are_translated(): l = { LEVEL_UNKNOWN: "Unknown", LEVEL_NONNULL: "NonNull", LEVEL_KNOWNCLASS: "KnownClass", LEVEL_CONSTANT: "Constant", }[self.level] else: l = { LEVEL_UNKNOWN: "Unknown", LEVEL_NONNULL: "NonNull", LEVEL_KNOWNCLASS: "KnownClass(%r)" % self.known_class, LEVEL_CONSTANT: "Constant(%r)" % self.constbox, }[self.level] lb = "" if self.lenbound: lb = ", " + self.lenbound.bound.__repr__() debug_print( indent + mark + "NotVirtualInfo(%d" % self.position + ", " + l + ", " + self.intbound.__repr__() + lb + ")" )
def reraise(lle): if we_are_translated(): e = cast_base_ptr_to_instance(Exception, lle) raise e else: etype = rclass.ll_type(lle) raise LLException(etype, lle)
def __init__(self, value=0): if not we_are_translated(): if isinstance(value, int): value = int(value) # bool -> int else: assert isinstance(value, Symbolic) self.value = value
def Py_DecRef(space, obj): if not obj: return assert lltype.typeOf(obj) == PyObject obj.c_ob_refcnt -= 1 if DEBUG_REFCOUNT: debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) if obj.c_ob_refcnt == 0: state = space.fromcache(RefcountState) ptr = rffi.cast(ADDR, obj) if ptr not in state.py_objects_r2w: # this is a half-allocated object, lets call the deallocator # without modifying the r2w/w2r dicts _Py_Dealloc(space, obj) else: w_obj = state.py_objects_r2w[ptr] del state.py_objects_r2w[ptr] w_type = space.type(w_obj) if not w_type.is_cpytype(): _Py_Dealloc(space, obj) del state.py_objects_w2r[w_obj] # if the object was a container for borrowed references state.delete_borrower(w_obj) else: if not we_are_translated() and obj.c_ob_refcnt < 0: message = "Negative refcount for obj %s with type %s" % ( obj, rffi.charp2str(obj.c_ob_type.c_tp_name)) print >>sys.stderr, message assert False, message
def fatalerror_notb(msg): # a variant of fatalerror() that doesn't print the RPython traceback if not we_are_translated(): raise FatalError(msg) from pypy.rpython.lltypesystem import lltype from pypy.rpython.lltypesystem.lloperation import llop llop.debug_fatalerror(lltype.Void, msg)
def bound_reached(cell, *args): # bound reached, but we do a last check: if it is the first # time we reach the bound, or if another loop or bridge was # compiled since the last time we reached it, then decrease # the counter by a few percents instead. It should avoid # sudden bursts of JIT-compilation, and also corner cases # where we suddenly compile more than one loop because all # counters reach the bound at the same time, but where # compiling all but the first one is pointless. curgen = warmrunnerdesc.memory_manager.current_generation curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits if we_are_translated() and curgen != cell.extra_delay: cell.counter = int(self.THRESHOLD_LIMIT * 0.98) cell.extra_delay = curgen return # if not confirm_enter_jit(*args): cell.counter = 0 return # start tracing from pypy.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: cell.counter = 0
def record_interpreter_traceback(self): """Records the current traceback inside the interpreter. This traceback is only useful to debug the interpreter, not the application.""" if not we_are_translated(): if RECORD_INTERPLEVEL_TRACEBACK: self.debug_excs.append(sys.exc_info())
def returns_bool_result(self): opnum = self.getopnum() if we_are_translated(): assert opnum >= 0 elif opnum < 0: return False # for tests return opboolresult[opnum]
def cast_int_to_adr(x): assert x == 0 or x > (1<<20) or x < (-1<<20) if we_are_translated(): return rffi.cast(llmemory.Address, x) else: # indirect casting because the above doesn't work with ll2ctypes return llmemory.cast_ptr_to_adr(rffi.cast(llmemory.GCREF, x))
def gc_thread_after_fork(result_of_fork, opaqueaddr): """To call just after fork(). """ if we_are_translated(): llop.gc_thread_after_fork(lltype.Void, result_of_fork, opaqueaddr) else: assert opaqueaddr == llmemory.NULL
def start_of_page(addr, page_size): """Return the address of the start of the page that contains 'addr'.""" if we_are_translated(): offset = llmemory.cast_adr_to_int(addr) % page_size return addr - offset else: return _start_of_page_untranslated(addr, page_size)
def get_location_str(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) llres = fn(*greenargs) if not we_are_translated() and isinstance(llres, str): return llres return hlstr(llres)
def generate_final_moves(self, final_vars_gv, locations): # XXX naive algo for now pops = [] for i in range(len(final_vars_gv)): v = final_vars_gv[i] if not v.is_const: srcloc = self.var2loc[v] dstloc = locations[i] if srcloc != dstloc: if not we_are_translated(): print ' > %20s--->->->---%s' % (srcloc, dstloc) if isinstance(srcloc, CCFLAG): self.mc.PUSH(imm8(0)) srcloc.SETCOND(self.mc, mem8(esp)) else: self.mc.PUSH(srcloc) pops.append(dstloc) while pops: dstloc = pops.pop() self.mc.POP(dstloc) for i in range(len(final_vars_gv)): v = final_vars_gv[i] if v.is_const: dstloc = locations[i] self.mc.MOV(dstloc, imm(v.revealconst(lltype.Signed)))
def _really_force(self): if self.source_op is None: # this case should not occur; I only managed to get it once # in pypy-c-jit and couldn't reproduce it. The point is # that it relies on optimizefindnode.py computing exactly # the right level of specialization, and it seems that there # is still a corner case where it gets too specialized for # optimizeopt.py. Let's not crash in release-built # pypy-c-jit's. XXX find out when from pypy.rlib.debug import ll_assert ll_assert(False, "_really_force: source_op is None") raise InvalidLoop # newoperations = self.optimizer.newoperations newoperations.append(self.source_op) self.box = box = self.source_op.result # iteritems = self._fields.iteritems() if not we_are_translated(): #random order is fine, except for tests iteritems = list(iteritems) iteritems.sort(key=lambda (x, y): x.sort_key()) for ofs, value in iteritems: if value.is_null(): continue subbox = value.force_box() op = ResOperation(rop.SETFIELD_GC, [box, subbox], None, descr=ofs) newoperations.append(op) self._fields = None
def __init__(self, translated=None): if translated is None: translated = we_are_translated() if translated: self.init_block_builder() else: self._become_a_plain_block_builder()
def free_temp_buffers(self, space): for buf in self.to_free: if not we_are_translated(): buf[0] = '\00' # invalidate the buffer, so that # test_keepalive_temp_buffer can fail lltype.free(buf, flavor='raw') self.to_free = []