def debug_collect_finish(self): if 1:# start_time != -1: #end_time = time.time() #elapsed_time = end_time - start_time #self.total_collection_time += elapsed_time #self.total_collection_count += 1 #total_program_time = end_time - self.program_start_time #ct = self.total_collection_time #cc = self.total_collection_count #debug_print("| number of collections so far ", # cc) debug_print("| total space size ", self.space_size) debug_print("| number of objects alive ", self.num_alive_objs) debug_print("| used space size ", self.free - self.space) debug_print("| next collection after ", self.next_collect_after) #debug_print("| total collections per second: ", # cc / total_program_time) #debug_print("| total time in markcompact-collect: ", # ct, "seconds") #debug_print("| percentage collection<->total time:", # ct * 100.0 / total_program_time, "%") debug_print("`----------------------------------------------") debug_stop("gc-collect")
def debug_collect_finish(self): if 1: # start_time != -1: #end_time = time.time() #elapsed_time = end_time - start_time #self.total_collection_time += elapsed_time #self.total_collection_count += 1 #total_program_time = end_time - self.program_start_time #ct = self.total_collection_time #cc = self.total_collection_count #debug_print("| number of collections so far ", # cc) debug_print("| total space size ", self.space_size) debug_print("| number of objects alive ", self.num_alive_objs) debug_print("| used space size ", self.free - self.space) debug_print("| next collection after ", self.next_collect_after) #debug_print("| total collections per second: ", # cc / total_program_time) #debug_print("| total time in markcompact-collect: ", # ct, "seconds") #debug_print("| percentage collection<->total time:", # ct * 100.0 / total_program_time, "%") debug_print("`----------------------------------------------") debug_stop("gc-collect")
def get_total_memory_linux2(filename): debug_start("gc-hardware") result = -1.0 try: fd = os.open(filename, os.O_RDONLY, 0644) try: buf = os.read(fd, 4096) finally: os.close(fd) except OSError: pass else: if buf.startswith('MemTotal:'): start = _skipspace(buf, len('MemTotal:')) stop = start while stop < len(buf) and buf[stop].isdigit(): stop += 1 if start < stop: result = float(buf[start:stop]) * 1024.0 # assume kB if result < 0.0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal =", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def f(): state.data = [] state.datalen1 = 0 state.datalen2 = 0 state.datalen3 = 0 state.datalen4 = 0 state.threadlocals = gil.GILThreadLocals() state.threadlocals.setup_threads(space) thread.gc_thread_prepare() subident = thread.start_new_thread(bootstrap, ()) mainident = thread.get_ident() runme(True) still_waiting = 3000 while len(state.data) < 2*N: debug_print(len(state.data)) if not still_waiting: raise ValueError("time out") still_waiting -= 1 if not we_are_translated(): gil.before_external_call() time.sleep(0.01) if not we_are_translated(): gil.after_external_call() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: if tid == mainident: assert i == i1; i1 += 1 elif tid == subident: assert i == i2; i2 += 1 else: assert 0 assert i1 == N + skew assert i2 == N - skew return len(state.data)
def debug_print(self, indent, seen, bad): mark = "" if self in bad: mark = "*" if we_are_translated(): l = { LEVEL_UNKNOWN: "Unknown", LEVEL_NONNULL: "NonNull", LEVEL_KNOWNCLASS: "KnownClass", LEVEL_CONSTANT: "Constant", }[self.level] else: l = { LEVEL_UNKNOWN: "Unknown", LEVEL_NONNULL: "NonNull", LEVEL_KNOWNCLASS: "KnownClass(%r)" % self.known_class, LEVEL_CONSTANT: "Constant(%r)" % self.constbox, }[self.level] lb = "" if self.lenbound: lb = ", " + self.lenbound.bound.__repr__() debug_print( indent + mark + "NotVirtualInfo(%d" % self.position + ", " + l + ", " + self.intbound.__repr__() + lb + ")" )
def disable_noninlinable_function(self, greenkey): cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def f(x): debug_start("mycat") debug_print("foo", 2, "bar", x) debug_stop("mycat") debug_flush() # does nothing debug_offset() # should not explode at least return have_debug_prints()
def update(self, op): if (op.has_no_side_effect() or op.is_ovf() or op.is_guard()): return opnum = op.getopnum() descr = op.getdescr() if (opnum == rop.DEBUG_MERGE_POINT): return if (opnum == rop.SETFIELD_GC or opnum == rop.SETFIELD_RAW): self.unsafe_getitem[descr] = True return if (opnum == rop.SETARRAYITEM_GC or opnum == rop.SETARRAYITEM_RAW): index = op.getarg(1) if isinstance(index, Const): d = self.unsafe_getarrayitem_indexes.get(descr, None) if d is None: d = self.unsafe_getarrayitem_indexes[descr] = {} d[index.getint()] = True else: self.unsafe_getarrayitem[descr] = True return if opnum == rop.CALL: effectinfo = descr.get_extra_info() if effectinfo is not None: for fielddescr in effectinfo.write_descrs_fields: self.unsafe_getitem[fielddescr] = True for arraydescr in effectinfo.write_descrs_arrays: self.unsafe_getarrayitem[arraydescr] = True return debug_print("heap dirty due to op ", opnum) self.heap_dirty = True
def _print_stats(self): cnt = self.counters tim = self.times calls = self.calls self._print_line_time("Tracing", cnt[TRACING], tim[TRACING]) self._print_line_time("Backend", cnt[BACKEND], tim[BACKEND]) line = "TOTAL: \t\t%f" % (self.tk - self.starttime, ) debug_print(line) self._print_intline("ops", cnt[OPS]) self._print_intline("recorded ops", cnt[RECORDED_OPS]) self._print_intline(" calls", calls) self._print_intline("guards", cnt[GUARDS]) self._print_intline("opt ops", cnt[OPT_OPS]) self._print_intline("opt guards", cnt[OPT_GUARDS]) self._print_intline("forcings", cnt[OPT_FORCINGS]) self._print_intline("abort: trace too long", cnt[ABORT_TOO_LONG]) self._print_intline("abort: compiling", cnt[ABORT_BRIDGE]) self._print_intline("abort: vable escape", cnt[ABORT_ESCAPE]) self._print_intline("abort: bad loop", cnt[ABORT_BAD_LOOP]) self._print_intline("abort: force quasi-immut", cnt[ABORT_FORCE_QUASIIMMUT]) self._print_intline("nvirtuals", cnt[NVIRTUALS]) self._print_intline("nvholes", cnt[NVHOLES]) self._print_intline("nvreused", cnt[NVREUSED]) cpu = self.cpu if cpu is not None: # for some tests self._print_intline("Total # of loops", cpu.total_compiled_loops) self._print_intline("Total # of bridges", cpu.total_compiled_bridges) self._print_intline("Freed # of loops", cpu.total_freed_loops) self._print_intline("Freed # of bridges", cpu.total_freed_bridges)
def get_total_memory_linux(filename): debug_start("gc-hardware") result = -1.0 try: fd = os.open(filename, os.O_RDONLY, 0644) try: buf = os.read(fd, 4096) finally: os.close(fd) except OSError: pass else: if buf.startswith('MemTotal:'): start = _skipspace(buf, len('MemTotal:')) stop = start while stop < len(buf) and buf[stop].isdigit(): stop += 1 if start < stop: result = float(buf[start:stop]) * 1024.0 # assume kB if result < 0.0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal =", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def get_L2cache_linux2(filename="/proc/cpuinfo"): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\ncache size', linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : 2048 KB\n" start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": 2048 KB\n" start = _skipspace(data, start + 1) # *** data[start:linepos] == "2048 KB\n" end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print( lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def make_guards(self, box): guards = [] if self.level == LEVEL_CONSTANT: op = ResOperation(rop.GUARD_VALUE, [box, self.box], None) guards.append(op) elif self.level == LEVEL_KNOWNCLASS: op = ResOperation(rop.GUARD_NONNULL, [box], None) guards.append(op) op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) guards.append(op) else: if self.level == LEVEL_NONNULL: op = ResOperation(rop.GUARD_NONNULL, [box], None) guards.append(op) self.intbound.make_guards(box, guards) if self.lenbound: lenbox = BoxInt() if self.lenbound.mode == MODE_ARRAY: op = ResOperation(rop.ARRAYLEN_GC, [box], lenbox, self.lenbound.descr) elif self.lenbound.mode == MODE_STR: op = ResOperation(rop.STRLEN, [box], lenbox, self.lenbound.descr) elif self.lenbound.mode == MODE_UNICODE: op = ResOperation(rop.UNICODELEN, [box], lenbox, self.lenbound.descr) else: debug_print("Unknown lenbound mode") assert False guards.append(op) self.lenbound.bound.make_guards(lenbox, guards) return guards
def debug_print(self, hdr='', bad=None): if bad is None: bad = {} debug_print(hdr + "VirtualState():") seen = {} for s in self.state: s.debug_print(" ", seen, bad)
def debug_print(self, indent, seen, bad): mark = '' if self in bad: mark = '*' if we_are_translated(): l = { LEVEL_UNKNOWN: 'Unknown', LEVEL_NONNULL: 'NonNull', LEVEL_KNOWNCLASS: 'KnownClass', LEVEL_CONSTANT: 'Constant', }[self.level] else: l = { LEVEL_UNKNOWN: 'Unknown', LEVEL_NONNULL: 'NonNull', LEVEL_KNOWNCLASS: 'KnownClass(%r)' % self.known_class, LEVEL_CONSTANT: 'Constant(%r)' % self.constbox, }[self.level] lb = '' if self.lenbound: lb = ', ' + self.lenbound.bound.__repr__() debug_print(indent + mark + 'NotVirtualInfo(%d' % self.position + ', ' + l + ', ' + self.intbound.__repr__() + lb + ')')
def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 debug_start("jit-mem-looptoken-alloc") debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def get_L2cache_linux2(filename="/proc/cpuinfo"): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\ncache size', linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : 2048 KB\n" start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": 2048 KB\n" start = _skipspace(data, start + 1) # *** data[start:linepos] == "2048 KB\n" end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def __del__(self): debug_start("jit-mem-looptoken-free") debug_print("freeing Loop #", self.number, 'with', self.bridges_count, 'attached bridges') self.cpu.free_loop_and_bridges(self) self.cpu.total_freed_loops += 1 self.cpu.total_freed_bridges += self.bridges_count debug_stop("jit-mem-looptoken-free")
def _emergency_initial_block(self, requested_size): # xxx before the GC is fully setup, we might get there. Hopefully # we will only allocate a couple of strings, e.g. in read_from_env(). # Just allocate them raw and leak them. debug_start("gc-initial-block") debug_print("leaking", requested_size, "bytes") debug_stop("gc-initial-block") return llmemory.raw_malloc(requested_size)
def debug_print(self, logops): debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ': None') debug_stop('jit-short-boxes')
def debug_print(self, logops): debug_start("jit-short-boxes") for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ": " + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ": None") debug_stop("jit-short-boxes")
def _check_rawsize_alloced(self, size_estimate): self.large_objects_collect_trigger -= size_estimate if self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print("allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def _check_rawsize_alloced(self, size_estimate, can_collect=True): self.large_objects_collect_trigger -= size_estimate if can_collect and self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print("allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def debug_print(self, logops): debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): if op: debug_print( logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ': None') debug_stop('jit-short-boxes')
def disable_noninlinable_function(self, metainterp): greenkey = metainterp.greenkey_of_huge_function if greenkey is not None: cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") sd = self.warmrunnerdesc.metainterp_sd loc = sd.state.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def crash_in_jit(e): if not we_are_translated(): print "~~~ Crash in JIT!" print '~~~ %s: %s' % (e.__class__, e) if sys.stdout == sys.__stdout__: import pdb; pdb.post_mortem(sys.exc_info()[2]) raise debug_print('~~~ Crash in JIT!') debug_print('~~~ %s' % (e,)) raise history.CrashInJIT("crash in JIT")
def propagate_forward(self, op): if self.logops is not None: debug_print(self.logops.repr_of_resop(op)) opnum = op.getopnum() for value, func in optimize_ops: if opnum == value: func(self, op) break else: self.emit_operation(op)
def _end(self, event): t0 = self.t1 self.t1 = self.timer() if not self.current: debug_print("BROKEN PROFILER DATA!") return ev1 = self.current.pop() if ev1 != event: debug_print("BROKEN PROFILER DATA!") return self.times[ev1] += self.t1 - t0
def get_total_memory_darwin(result): debug_start("gc-hardware") if result <= 0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal = ", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def debug_print(self, indent, seen, bad): mark = '' if self in bad: mark = '*' self.debug_header(indent + mark) if self not in seen: seen[self] = True for s in self.fieldstate: s.debug_print(indent + " ", seen, bad) else: debug_print(indent + " ...")
def log_loop(self, inputargs, operations, number=0, type=None): if type is None: debug_start("jit-log-noopt-loop") self._log_operations(inputargs, operations) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, ":", type, "with", len(operations), "ops") self._log_operations(inputargs, operations) debug_stop("jit-log-opt-loop")
def crash_in_jit(e): if not we_are_translated(): print "~~~ Crash in JIT!" print '~~~ %s: %s' % (e.__class__, e) if sys.stdout == sys.__stdout__: import pdb pdb.post_mortem(sys.exc_info()[2]) raise debug_print('~~~ Crash in JIT!') debug_print('~~~ %s' % (e, )) raise history.CrashInJIT("crash in JIT")
def log_bridge(self, inputargs, operations, number=-1): if number == -1: debug_start("jit-log-noopt-bridge") self._log_operations(inputargs, operations) debug_stop("jit-log-noopt-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, "with", len(operations), "ops") self._log_operations(inputargs, operations) debug_stop("jit-log-opt-bridge")
def log_bridge(self, inputargs, operations, number=-1, ops_offset=None): if number == -1: debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-bridge") return logops
def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") return logops
def collect_oldrefs_to_nursery(self): # Follow the old_objects_pointing_to_young list and move the # young objects they point to out of the nursery. count = 0 oldlist = self.old_objects_pointing_to_young while oldlist.non_empty(): count += 1 obj = oldlist.pop() hdr = self.header(obj) hdr.tid |= GCFLAG_NO_YOUNG_PTRS self.trace_and_drag_out_of_nursery(obj) debug_print("collect_oldrefs_to_nursery", count)
def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, start_resumedescr, full_preamble_needed=True): """Try to compile a new loop by closing the current history back to the first operation. """ from pypy.jit.metainterp.optimize import optimize_loop history = metainterp.history loop = create_empty_loop(metainterp) loop.inputargs = history.inputargs[:] for box in loop.inputargs: assert isinstance(box, Box) # make a copy, because optimize_loop can mutate the ops and descrs h_ops = history.operations loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP loop.preamble = create_empty_loop(metainterp, "Preamble ") loop.preamble.inputargs = loop.inputargs loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.preamble.start_resumedescr = start_resumedescr try: old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop, jitdriver_sd.warmstate.enable_opts) except InvalidLoop: debug_print("compile_new_loop: got an InvalidLoop") return None if old_loop_token is not None: metainterp.staticdata.log("reusing old loop") return old_loop_token if loop.preamble.operations is not None: send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") record_loop_or_bridge(metainterp_sd, loop) token = loop.preamble.token if full_preamble_needed: send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop.preamble, "entry bridge") insert_loop_token(old_loop_tokens, loop.preamble.token) jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp(greenkey, loop.preamble.token) record_loop_or_bridge(metainterp_sd, loop.preamble) elif token.short_preamble: short = token.short_preamble[-1] metainterp_sd.logger_ops.log_short_preamble(short.inputargs, short.operations) return token else: send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop") insert_loop_token(old_loop_tokens, loop_token) jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp(greenkey, loop.token) record_loop_or_bridge(metainterp_sd, loop) return loop_token
def _compile_and_run(self, t, entry_point, entry_point_graph, args): from pypy.translator.c.genc import CStandaloneBuilder as CBuilder # XXX patch exceptions cbuilder = CBuilder(t, entry_point, config=t.config) cbuilder.generate_source() self._check_cbuilder(cbuilder) exe_name = cbuilder.compile() debug_print('---------- Test starting ----------') stdout = cbuilder.cmdexec(" ".join([str(arg) for arg in args])) res = int(stdout) debug_print('---------- Test done (%d) ----------' % (res,)) return res
def _compile_and_run(self, t, entry_point, entry_point_graph, args): from pypy.translator.c.genc import CStandaloneBuilder as CBuilder # XXX patch exceptions cbuilder = CBuilder(t, entry_point, config=t.config) cbuilder.generate_source() self._check_cbuilder(cbuilder) exe_name = cbuilder.compile() debug_print('---------- Test starting ----------') stdout = cbuilder.cmdexec(" ".join([str(arg) for arg in args])) res = int(stdout) debug_print('---------- Test done (%d) ----------' % (res, )) return res
def __init__(self, cpu, number): cpu.total_compiled_loops += 1 self.cpu = cpu self.number = number self.bridges_count = 0 # This growing list gives the 'descr_number' of all fail descrs # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def _dump(self, addr, logname, backend=None): debug_start(logname) if have_debug_prints(): # if backend is not None: debug_print('BACKEND', backend) # from pypy.jit.backend.hlinfo import highleveljitinfo if highleveljitinfo.sys_executable: debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable) else: debug_print('SYS_EXECUTABLE', '??') # HEX = '0123456789ABCDEF' dump = [] src = rffi.cast(rffi.CCHARP, addr) for p in range(self.get_relative_pos()): o = ord(src[p]) dump.append(HEX[o >> 4]) dump.append(HEX[o & 15]) debug_print( 'CODE_DUMP', '@%x' % addr, '+0 ', # backwards compatibility ''.join(dump)) # debug_stop(logname)
def set_nursery_size(self, newsize): debug_start("gc-set-nursery-size") if newsize < self.min_nursery_size: newsize = self.min_nursery_size if newsize > self.space_size // 2: newsize = self.space_size // 2 # Compute the new bounds for how large young objects can be # (larger objects are allocated directly old). XXX adjust self.nursery_size = newsize self.largest_young_fixedsize = self.get_young_fixedsize(newsize) self.largest_young_var_basesize = self.get_young_var_basesize(newsize) scale = 0 while (self.min_nursery_size << (scale + 1)) <= newsize: scale += 1 self.nursery_scale = scale debug_print("nursery_size =", newsize) debug_print("largest_young_fixedsize =", self.largest_young_fixedsize) debug_print("largest_young_var_basesize =", self.largest_young_var_basesize) debug_print("nursery_scale =", scale) # we get the following invariant: assert self.nursery_size >= (self.min_nursery_size << scale) # Force a full collect to remove the current nursery whose size # no longer matches the bounds that we just computed. This must # be done after changing the bounds, because it might re-create # a new nursery (e.g. if it invokes finalizers). self.semispace_collect() debug_stop("gc-set-nursery-size")
def rollback_maybe(self, msg, op): if self.funcinfo is None: return # nothing to rollback # # we immediately set funcinfo to None to prevent recursion when # calling emit_op if self.logops is not None: debug_print('rollback: ' + msg + ': ', self.logops.repr_of_resop(op)) funcinfo = self.funcinfo self.funcinfo = None self.emit_operation(funcinfo.prepare_op) for op in funcinfo.opargs: self.emit_operation(op) for delayed_op in funcinfo.delayed_ops: self.emit_operation(delayed_op)
def runme(main=False): j = 0 for i in range(N + [-skew, skew][main]): state.datalen1 += 1 # try to crash if the GIL is not state.datalen2 += 1 # correctly acquired state.data.append((thread.get_ident(), i)) state.datalen3 += 1 state.datalen4 += 1 assert state.datalen1 == len(state.data) assert state.datalen2 == len(state.data) assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) gil.do_yield_thread() assert i == j j += 1