def get_total_memory_linux(filename): debug_start("gc-hardware") result = -1.0 try: fd = os.open(filename, os.O_RDONLY, 0644) try: buf = os.read(fd, 4096) finally: os.close(fd) except OSError: pass else: if buf.startswith('MemTotal:'): start = _skipspace(buf, len('MemTotal:')) stop = start while stop < len(buf) and buf[stop].isdigit(): stop += 1 if start < stop: result = float(buf[start:stop]) * 1024.0 # assume kB if result < 0.0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal =", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): n = metainterp_sd.cpu.get_fail_descr_number(faildescr) if not we_are_translated(): show_procedures(metainterp_sd) seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo( jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, "bridge", fail_descr_no=n ) hooks.before_compile_bridge(debug_info) else: hooks = None debug_info = None operations = get_deep_immutable_oplist(operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile_bridge(debug_info) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge") # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset)
def get_total_memory_linux2(filename): debug_start("gc-hardware") result = -1.0 try: fd = os.open(filename, os.O_RDONLY, 0644) try: buf = os.read(fd, 4096) finally: os.close(fd) except OSError: pass else: if buf.startswith('MemTotal:'): start = _skipspace(buf, len('MemTotal:')) stop = start while stop < len(buf) and buf[stop].isdigit(): stop += 1 if start < stop: result = float(buf[start:stop]) * 1024.0 # assume kB if result < 0.0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal =", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): jitdriver_sd.on_compile(metainterp_sd.logger_ops, loop.token, loop.operations, type, greenkey) loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata loop_token = loop.token loop_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_loop(metainterp_sd, loop) loop.check_consistency() operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: ops_offset = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, loop.token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): if type != "entry bridge": metainterp_sd.stats.compiled() else: loop._ignore_during_counting = True metainterp_sd.log("compiled new " + type) # metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset) short = loop.token.short_preamble if short: metainterp_sd.logger_ops.log_short_preamble(short[-1].inputargs, short[-1].operations) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(loop.token)
def set_nursery_size(self, newsize): debug_start("gc-set-nursery-size") if newsize < self.min_nursery_size: newsize = self.min_nursery_size if newsize > self.space_size // 2: newsize = self.space_size // 2 # Compute the new bounds for how large young objects can be # (larger objects are allocated directly old). XXX adjust self.nursery_size = newsize self.largest_young_fixedsize = self.get_young_fixedsize(newsize) self.largest_young_var_basesize = self.get_young_var_basesize(newsize) scale = 0 while (self.min_nursery_size << (scale+1)) <= newsize: scale += 1 self.nursery_scale = scale debug_print("nursery_size =", newsize) debug_print("largest_young_fixedsize =", self.largest_young_fixedsize) debug_print("largest_young_var_basesize =", self.largest_young_var_basesize) debug_print("nursery_scale =", scale) # we get the following invariant: assert self.nursery_size >= (self.min_nursery_size << scale) # Force a full collect to remove the current nursery whose size # no longer matches the bounds that we just computed. This must # be done after changing the bounds, because it might re-create # a new nursery (e.g. if it invokes finalizers). self.semispace_collect() debug_stop("gc-set-nursery-size")
def _dump(self, addr, logname, backend=None): debug_start(logname) if have_debug_prints(): # if backend is not None: debug_print('BACKEND', backend) # from pypy.jit.backend.hlinfo import highleveljitinfo if highleveljitinfo.sys_executable: debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable) else: debug_print('SYS_EXECUTABLE', '??') # HEX = '0123456789ABCDEF' dump = [] src = rffi.cast(rffi.CCHARP, addr) for p in range(self.get_relative_pos()): o = ord(src[p]) dump.append(HEX[o >> 4]) dump.append(HEX[o & 15]) debug_print('CODE_DUMP', '@%x' % addr, '+0 ', # backwards compatibility ''.join(dump)) # debug_stop(logname)
def debug_collect_start(self, requested_size): if 1:# have_debug_prints(): debug_start("gc-collect") debug_print() debug_print(".----------- Full collection -------------------") debug_print("| requested size:", requested_size)
def get_L2cache_linux2(filename="/proc/cpuinfo"): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\ncache size', linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : 2048 KB\n" start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": 2048 KB\n" start = _skipspace(data, start + 1) # *** data[start:linepos] == "2048 KB\n" end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print( lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts): debug_start("jit-optimize") try: return _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts) finally: debug_stop("jit-optimize")
def compiling_a_bridge(self): self.cpu.total_compiled_bridges += 1 self.bridges_count += 1 debug_start("jit-mem-looptoken-alloc") debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def f(x): debug_start("mycat") debug_print("foo", 2, "bar", x) debug_stop("mycat") debug_flush() # does nothing debug_offset() # should not explode at least return have_debug_prints()
def disable_noninlinable_function(self, greenkey): cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def set_nursery_size(self, newsize): debug_start("gc-set-nursery-size") if newsize < self.min_nursery_size: newsize = self.min_nursery_size if newsize > self.space_size // 2: newsize = self.space_size // 2 # Compute the new bounds for how large young objects can be # (larger objects are allocated directly old). XXX adjust self.nursery_size = newsize self.largest_young_fixedsize = self.get_young_fixedsize(newsize) self.largest_young_var_basesize = self.get_young_var_basesize(newsize) scale = 0 while (self.min_nursery_size << (scale + 1)) <= newsize: scale += 1 self.nursery_scale = scale debug_print("nursery_size =", newsize) debug_print("largest_young_fixedsize =", self.largest_young_fixedsize) debug_print("largest_young_var_basesize =", self.largest_young_var_basesize) debug_print("nursery_scale =", scale) # we get the following invariant: assert self.nursery_size >= (self.min_nursery_size << scale) # Force a full collect to remove the current nursery whose size # no longer matches the bounds that we just computed. This must # be done after changing the bounds, because it might re-create # a new nursery (e.g. if it invokes finalizers). self.semispace_collect() debug_stop("gc-set-nursery-size")
def send_loop_to_backend(metainterp_sd, loop, type): globaldata = metainterp_sd.globaldata loop_token = loop.token loop_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type) if not we_are_translated(): show_loop(metainterp_sd, loop) loop.check_consistency() metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: metainterp_sd.cpu.compile_loop(loop.inputargs, loop.operations, loop.token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): if type != "entry bridge": metainterp_sd.stats.compiled() else: loop._ignore_during_counting = True metainterp_sd.log("compiled new " + type)
def _dump(self, addr, logname, backend=None): debug_start(logname) if have_debug_prints(): # if backend is not None: debug_print('BACKEND', backend) # from pypy.jit.backend.hlinfo import highleveljitinfo if highleveljitinfo.sys_executable: debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable) # HEX = '0123456789ABCDEF' dump = [] src = rffi.cast(rffi.CCHARP, addr) for p in range(self.get_relative_pos()): o = ord(src[p]) dump.append(HEX[o >> 4]) dump.append(HEX[o & 15]) debug_print( 'CODE_DUMP', '@%x' % addr, '+0 ', # backwards compatibility ''.join(dump)) # debug_stop(logname)
def dump_storage(storage, liveboxes): "For profiling only." from pypy.rlib.objectmodel import compute_unique_id debug_start("jit-resume") if have_debug_prints(): debug_print('Log storage', compute_unique_id(storage)) frameinfo = storage.rd_frame_info_list while frameinfo is not None: try: jitcodename = frameinfo.jitcode.name except AttributeError: jitcodename = str(compute_unique_id(frameinfo.jitcode)) debug_print('\tjitcode/pc', jitcodename, frameinfo.pc, frameinfo.exception_target, 'at', compute_unique_id(frameinfo)) frameinfo = frameinfo.prev numb = storage.rd_numb while numb is not None: debug_print('\tnumb', str([untag(i) for i in numb.nums]), 'at', compute_unique_id(numb)) numb = numb.prev for const in storage.rd_consts: debug_print('\tconst', const.repr_rpython()) for box in liveboxes: if box is None: debug_print('\tbox', 'None') else: debug_print('\tbox', box.repr_rpython()) if storage.rd_virtuals is not None: for virtual in storage.rd_virtuals: if virtual is None: debug_print('\t\t', 'None') else: virtual.debug_prints() debug_stop("jit-resume")
def get_L2cache_linux2(filename="/proc/cpuinfo"): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\ncache size', linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : 2048 KB\n" start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": 2048 KB\n" start = _skipspace(data, start + 1) # *** data[start:linepos] == "2048 KB\n" end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def __del__(self): debug_start("jit-mem-looptoken-free") debug_print("freeing Loop #", self.number, 'with', self.bridges_count, 'attached bridges') self.cpu.free_loop_and_bridges(self) self.cpu.total_freed_loops += 1 self.cpu.total_freed_bridges += self.bridges_count debug_stop("jit-mem-looptoken-free")
def _emergency_initial_block(self, requested_size): # xxx before the GC is fully setup, we might get there. Hopefully # we will only allocate a couple of strings, e.g. in read_from_env(). # Just allocate them raw and leak them. debug_start("gc-initial-block") debug_print("leaking", requested_size, "bytes") debug_stop("gc-initial-block") return llmemory.raw_malloc(requested_size)
def debug_print(self, logops): debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ': None') debug_stop('jit-short-boxes')
def debug_print(self, logops): debug_start("jit-short-boxes") for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ": " + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ": None") debug_stop("jit-short-boxes")
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) original_jitcell_token = loop.original_jitcell_token loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_procedures(metainterp_sd, loop) loop.check_consistency() if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) hooks.before_compile(debug_info) else: debug_info = None hooks = None operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_loop(metainterp_sd, loop.inputargs, operations, original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile(debug_info) metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( original_jitcell_token)
def semispace_collect(self, size_changing=False): debug_start("gc-collect") debug_print() debug_print(".----------- Full collection ------------------") start_usage = self.free - self.tospace debug_print("| used before collection: ", start_usage, "bytes") # start_time = time.time() # llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing)) # Switch the spaces. We copy everything over to the empty space # (self.fromspace at the beginning of the collection), and clear the old # one (self.tospace at the beginning). Their purposes will be reversed # for the next collection. tospace = self.fromspace fromspace = self.tospace self.fromspace = fromspace self.tospace = tospace self.top_of_space = tospace + self.space_size scan = self.free = tospace self.starting_full_collect() self.collect_roots() if self.run_finalizers.non_empty(): self.update_run_finalizers() scan = self.scan_copied(scan) if self.objects_with_finalizers.non_empty(): scan = self.deal_with_objects_with_finalizers(scan) if self.objects_with_weakrefs.non_empty(): self.invalidate_weakrefs() self.update_objects_with_id() self.finished_full_collect() self.debug_check_consistency() if not size_changing: llarena.arena_reset(fromspace, self.space_size, True) self.record_red_zone() self.execute_finalizers() # llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free) if have_debug_prints(): # end_time = time.time() # elapsed_time = end_time - start_time # self.total_collection_time += elapsed_time self.total_collection_count += 1 # total_program_time = end_time - self.program_start_time end_usage = self.free - self.tospace debug_print("| used after collection: ", end_usage, "bytes") debug_print("| freed: ", start_usage - end_usage, "bytes") debug_print("| size of each semispace: ", self.space_size, "bytes") debug_print("| fraction of semispace now used: ", end_usage * 100.0 / self.space_size, "%") # ct = self.total_collection_time cc = self.total_collection_count debug_print("| number of semispace_collects: ", cc) # debug_print("| i.e.: ", # cc / total_program_time, "per second") # debug_print("| total time in semispace_collect: ", # ct, "seconds") # debug_print("| i.e.: ", # ct * 100.0 / total_program_time, "%") debug_print("`----------------------------------------------") debug_stop("gc-collect")
def _check_rawsize_alloced(self, size_estimate): self.large_objects_collect_trigger -= size_estimate if self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print("allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, inline_short_preamble=True, retraced=False): debug_start("jit-optimize") try: return _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts, inline_short_preamble, retraced) finally: debug_stop("jit-optimize")
def _check_rawsize_alloced(self, size_estimate, can_collect=True): self.large_objects_collect_trigger -= size_estimate if can_collect and self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print("allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def debug_print(self, logops): debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): if op: debug_print( logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ': None') debug_stop('jit-short-boxes')
def disable_noninlinable_function(self, metainterp): greenkey = metainterp.greenkey_of_huge_function if greenkey is not None: cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") sd = self.warmrunnerdesc.metainterp_sd loc = sd.state.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def log_loop(self, inputargs, operations, number=0, type=None): if type is None: debug_start("jit-log-noopt-loop") self._log_operations(inputargs, operations) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, ":", type, "with", len(operations), "ops") self._log_operations(inputargs, operations) debug_stop("jit-log-opt-loop")
def log_bridge(self, inputargs, operations, number=-1): if number == -1: debug_start("jit-log-noopt-bridge") self._log_operations(inputargs, operations) debug_stop("jit-log-noopt-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, "with", len(operations), "ops") self._log_operations(inputargs, operations) debug_stop("jit-log-opt-bridge")
def get_total_memory_darwin(result): debug_start("gc-hardware") if result <= 0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal = ", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def log_bridge(self, inputargs, operations, number=-1, ops_offset=None): if number == -1: debug_start("jit-log-noopt-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", number, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-bridge") return logops
def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name , ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") return logops
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd) original_jitcell_token = loop.original_jitcell_token loopname = jitdriver_sd.warmstate.get_location_str(greenkey) globaldata = metainterp_sd.globaldata original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_procedures(metainterp_sd, loop) loop.check_consistency() if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) hooks.before_compile(debug_info) else: debug_info = None hooks = None operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations, original_jitcell_token, name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile(debug_info) metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # loopname = jitdriver_sd.warmstate.get_location_str(greenkey) if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token)
def __init__(self, cpu, number): cpu.total_compiled_loops += 1 self.cpu = cpu self.number = number self.bridges_count = 0 # This growing list gives the 'descr_number' of all fail descrs # that belong to this loop or to a bridge attached to it. # Filled by the frontend calling record_faildescr_index(). self.faildescr_indices = [] self.invalidate_positions = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def entry_point(argv): x = "got:" debug_start ("mycat") if have_debug_prints(): x += "b" debug_print ("foo", 2, "bar", 3) debug_start ("cat2") if have_debug_prints(): x += "c" debug_print ("baz") debug_stop ("cat2") if have_debug_prints(): x += "d" debug_print ("bok") debug_stop ("mycat") if have_debug_prints(): x += "a" debug_print("toplevel") os.write(1, x + '.\n') return 0
def entry_point(argv): x = "got:" debug_start("mycat") if have_debug_prints(): x += "b" debug_print("foo", 2, "bar", 3) debug_start("cat2") if have_debug_prints(): x += "c" debug_print("baz") debug_stop("cat2") if have_debug_prints(): x += "d" debug_print("bok") debug_stop("mycat") if have_debug_prints(): x += "a" debug_print("toplevel") os.write(1, x + '.\n') return 0
def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ debug_start("jit-optimize") try: loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, loop.operations) optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) else: optimizer = Optimizer(metainterp_sd, loop, optimizations) optimizer.propagate_all_forward() finally: debug_stop("jit-optimize")
def compile(self): # ---- debug_start('jit-backend-emit_ops') if self.nocast: self.compute_types() self.emit_load_inputargs() self.emit_preamble() self.emit_operations(self.cliloop.operations) self.emit_branches() self.emit_end() debug_stop('jit-backend-emit_ops') # ---- debug_start('jit-backend-finish_code') res = self.finish_code() debug_stop('jit-backend-finish_code') return res
def send_bridge_to_backend(metainterp_sd, faildescr, inputargs, operations): n = metainterp_sd.cpu.get_fail_descr_number(faildescr) metainterp_sd.logger_ops.log_bridge(inputargs, operations, n) if not we_are_translated(): show_loop(metainterp_sd) TreeLoop.check_consistency_of(inputargs, operations) pass metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge")
def entry_point(argv): x = "got:" debug_start ("mycat") if have_debug_prints(): x += "b" debug_print ("foo", r_longlong(2), "bar", 3) debug_start ("cat2") if have_debug_prints(): x += "c" debug_print ("baz") debug_stop ("cat2") if have_debug_prints(): x += "d" debug_print ("bok") debug_stop ("mycat") if have_debug_prints(): x += "a" debug_print("toplevel") debug_flush() os.write(1, x + "." + str(debug_offset()) + '.\n') return 0
def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name, ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-loop") return logops
def get_L2cache_darwin(): """Try to estimate the best nursery size at run-time, depending on the machine we are running on. """ debug_start("gc-hardware") L2cache = get_darwin_sysctl_signed("hw.l2cachesize") L3cache = get_darwin_sysctl_signed("hw.l3cachesize") debug_print("L2cache =", L2cache) debug_print("L3cache =", L3cache) debug_stop("gc-hardware") mangled = L2cache + L3cache if mangled > 0: return mangled else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size with sysctl()") return -1
def _kill_old_loops_now(self): debug_start("jit-mem-collect") oldtotal = len(self.alive_loops) #print self.alive_loops.keys() debug_print("Current generation:", self.current_generation) debug_print("Loop tokens before:", oldtotal) max_generation = self.current_generation - (self.max_age-1) for looptoken in self.alive_loops.keys(): if (0 <= looptoken.generation < max_generation or looptoken.invalidated): del self.alive_loops[looptoken] newtotal = len(self.alive_loops) debug_print("Loop tokens freed: ", oldtotal - newtotal) debug_print("Loop tokens left: ", newtotal) #print self.alive_loops.keys() if not we_are_translated() and oldtotal != newtotal: looptoken = None from pypy.rlib import rgc # a single one is not enough for all tests :-( rgc.collect(); rgc.collect(); rgc.collect() debug_stop("jit-mem-collect")
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): n = metainterp_sd.cpu.get_fail_descr_number(faildescr) if not we_are_translated(): show_procedures(metainterp_sd) seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, 'bridge', fail_descr_no=n) hooks.before_compile_bridge(debug_info) else: hooks = None debug_info = None operations = get_deep_immutable_oplist(operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile_bridge(debug_info) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge") # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_bridge(inputargs, operations, n, ops_offset)