def _do(self, goal, func, *args, **kwds): title = func.task_title if goal in self.done: self.log.info("already done: %s" % title) return else: self.log.info("%s..." % title) debug_start('translation-task') debug_print('starting', goal) self.timer.start_event(goal) try: instrument = False try: if goal in PROFILE: res = self._profile(goal, func) else: res = func() except Instrument: instrument = True if not func.task_idempotent: self.done[goal] = True if instrument: self.proceed('compile') assert False, 'we should not get here' finally: try: debug_stop('translation-task') self.timer.end_event(goal) except (KeyboardInterrupt, SystemExit): raise except: pass #import gc; gc.dump_rpy_heap('rpyheap-after-%s.dump' % goal) return res
def disable_noninlinable_function(self, greenkey): cell = self.JitCell.ensure_jit_cell_at_key(greenkey) cell.flags |= JC_DONT_TRACE_HERE debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def dump(self, memo): if have_debug_prints(): debug_start("jit-log-exported-state") debug_print("[" + ", ".join([x.repr_short(memo) for x in self.next_iteration_args]) + "]") for box in self.short_boxes: debug_print(" " + box.repr(memo)) debug_stop("jit-log-exported-state")
def load_linklet_from_fasl(file_name, set_version=False): from pycket.fasl import Fasl from pycket.env import w_version from pycket.util import console_log from pycket.ast_vs_sexp import deserialize_loop debug_start("loading-linklet") debug_print("Loading linklet from fasl -- %s" % file_name) sexp = Fasl().to_sexp_from_file(file_name) version_sexp, linklet_sexp = W_String.make(""), None if set_version: version_sexp = sexp.car() linklet_sexp = sexp.cdr() else: linklet_sexp = sexp linklet = None if "zo" in file_name: linklet = deserialize_loop(linklet_sexp) else: console_log("Run pycket with --make-linklet-zos to make the compiled zo files for bootstrap linklets", 1) compile_linklet = get_primitive("compile-linklet") linklet = compile_linklet.call_interpret([linklet_sexp, W_Symbol.make("linkl"), w_false, w_false, w_false]) if set_version: ver = version_sexp.as_str_ascii() console_log("Setting the version to %s" % ver) w_version.set_version(ver) debug_stop("loading-linklet") return linklet, version_sexp
def set_nursery_size(self, newsize): debug_start("gc-set-nursery-size") if newsize < self.min_nursery_size: newsize = self.min_nursery_size if newsize > self.space_size // 2: newsize = self.space_size // 2 # Compute the new bounds for how large young objects can be # (larger objects are allocated directly old). XXX adjust self.nursery_size = newsize self.largest_young_fixedsize = self.get_young_fixedsize(newsize) self.largest_young_var_basesize = self.get_young_var_basesize(newsize) scale = 0 while (self.min_nursery_size << (scale+1)) <= newsize: scale += 1 self.nursery_scale = scale debug_print("nursery_size =", newsize) debug_print("largest_young_fixedsize =", self.largest_young_fixedsize) debug_print("largest_young_var_basesize =", self.largest_young_var_basesize) debug_print("nursery_scale =", scale) # we get the following invariant: assert self.nursery_size >= (self.min_nursery_size << scale) # Force a full collect to remove the current nursery whose size # no longer matches the bounds that we just computed. This must # be done after changing the bounds, because it might re-create # a new nursery (e.g. if it invokes finalizers). self.semispace_collect() debug_stop("gc-set-nursery-size")
def f(x): debug_start("mycat") debug_print("foo", 2, "bar", x) debug_stop("mycat") debug_flush() # does nothing debug_offset() # should not explode at least return have_debug_prints()
def get_total_memory_linux(filename): debug_start("gc-hardware") result = -1.0 try: fd = os.open(filename, os.O_RDONLY, 0644) try: buf = os.read(fd, 4096) finally: os.close(fd) except OSError: pass else: if buf.startswith('MemTotal:'): start = _skipspace(buf, len('MemTotal:')) stop = start while stop < len(buf) and buf[stop].isdigit(): stop += 1 if start < stop: result = float(buf[start:stop]) * 1024.0 # assume kB if result < 0.0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal =", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def compiling_a_bridge(self): self.cpu.tracker.total_compiled_bridges += 1 self.bridges_count += 1 debug_start("jit-mem-looptoken-alloc") debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def invalidate(self, descr_repr=None): debug_start("jit-invalidate-quasi-immutable") # When this is called, all the loops that we record become # invalid: all GUARD_NOT_INVALIDATED in these loops (and # in attached bridges) must now fail. if self.looptokens_wrefs is None: # can't happen, but helps compiled tests return wrefs = self.looptokens_wrefs self.looptokens_wrefs = [] invalidated = 0 for wref in wrefs: looptoken = wref() if looptoken is not None: invalidated += 1 looptoken.invalidated = True self.cpu.invalidate_loop(looptoken) # NB. we must call cpu.invalidate_loop() even if # looptoken.invalidated was already set to True. # It's possible to invalidate several times the # same looptoken; see comments in jit.backend.model # in invalidate_loop(). if not we_are_translated(): self.cpu.stats.invalidated_token_numbers.add( looptoken.number) debug_print("fieldname", descr_repr or "<unknown>", "invalidated", invalidated) debug_stop("jit-invalidate-quasi-immutable")
def get_L2cache_linux2_sparc(): debug_start("gc-hardware") cpu = 0 L2cache = sys.maxint while True: try: fd = os.open('/sys/devices/system/cpu/cpu' + assert_str0(str(cpu)) + '/l2_cache_size', os.O_RDONLY, 0644) try: line = os.read(fd, 4096) finally: os.close(fd) end = len(line) - 1 assert end > 0 number = int(line[:end]) except OSError: break if number < L2cache: L2cache = number cpu += 1 debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size in " "/sys/devices/system/cpu/cpuX/l2_cache_size") return -1
def disable_noninlinable_function(self, greenkey): cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining")
def detect_arch_version(filename="/proc/cpuinfo"): fd = os.open(filename, os.O_RDONLY, 0644) n = 0 debug_start("jit-backend-arch") try: buf = os.read(fd, 2048) if not buf: n = 6 # we assume ARMv6 as base case debug_print("Could not detect ARM architecture " "version, assuming", "ARMv%d" % n) finally: os.close(fd) # "Processor : ARMv%d-compatible processor rev 7 (v6l)" i = buf.find('ARMv') if i == -1: n = 6 debug_print("Could not detect architecture version, " "falling back to", "ARMv%d" % n) else: n = int(buf[i + 4]) if n < 6: raise ValueError("Unsupported ARM architecture version") debug_print("Detected", "ARMv%d" % n) if n > 7: n = 7 debug_print("Architecture version not explicitly supported, " "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n
def get_L2cache_linux2_sparc(): debug_start("gc-hardware") cpu = 0 L2cache = sys.maxint while True: try: fd = os.open( '/sys/devices/system/cpu/cpu' + assert_str0(str(cpu)) + '/l2_cache_size', os.O_RDONLY, 0644) try: line = os.read(fd, 4096) finally: os.close(fd) end = len(line) - 1 assert end > 0 number = int(line[:end]) except OSError: break if number < L2cache: L2cache = number cpu += 1 debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print( lltype.Void, "Warning: cannot find your CPU L2 cache size in " "/sys/devices/system/cpu/cpuX/l2_cache_size") return -1
def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type, orig_inpargs, memo): forget_optimization_info(loop.operations) forget_optimization_info(loop.inputargs) vinfo = jitdriver_sd.virtualizable_info if vinfo is not None: vable = orig_inpargs[jitdriver_sd.index_of_virtualizable].getref_base() patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd, vable) original_jitcell_token = loop.original_jitcell_token globaldata = metainterp_sd.globaldata original_jitcell_token.number = n = globaldata.loopnumbering globaldata.loopnumbering += 1 if not we_are_translated(): show_procedures(metainterp_sd, loop) loop.check_consistency() if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_jitcell_token, loop.operations, type, greenkey) hooks.before_compile(debug_info) else: debug_info = None hooks = None operations = get_deep_immutable_oplist(loop.operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: loopname = jitdriver_sd.warmstate.get_location_str(greenkey) unique_id = jitdriver_sd.warmstate.get_unique_id(greenkey) asminfo = do_compile_loop(jitdriver_sd.index, unique_id, metainterp_sd, loop.inputargs, operations, original_jitcell_token, name=loopname, log=have_debug_prints(), memo=memo) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile(debug_info) metainterp_sd.stats.add_new_loop(loop) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new " + type) # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token)
def get_L2cache_linux2_cpuinfo(filename="/proc/cpuinfo", label='cache size'): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\n' + label, linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : 2048 KB\n" start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": 2048 KB\n" start = _skipspace(data, start + 1) # *** data[start:linepos] == "2048 KB\n" end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print( lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def setup_once(self): # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() if hasattr(gc_ll_descr, 'minimal_size_in_nursery'): self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery else: self.gc_minimal_size_in_nursery = 0 if hasattr(gc_ll_descr, 'gcheaderbuilder'): self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header else: self.gc_size_of_header = WORD # for tests self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) self.memset_addr = self.cpu.cast_ptr_to_int(memset_fn) self._build_failure_recovery(False, withfloats=False) self._build_failure_recovery(True, withfloats=False) self._build_wb_slowpath(False) self._build_wb_slowpath(True) self._build_wb_slowpath(False, for_frame=True) # only one of those self.build_frame_realloc_slowpath() if self.cpu.supports_floats: self._build_failure_recovery(False, withfloats=True) self._build_failure_recovery(True, withfloats=True) self._build_wb_slowpath(False, withfloats=True) self._build_wb_slowpath(True, withfloats=True) self._build_propagate_exception_path() if gc_ll_descr.get_malloc_slowpath_addr is not None: # generate few slowpaths for various cases self.malloc_slowpath = self._build_malloc_slowpath(kind='fixed') self.malloc_slowpath_varsize = self._build_malloc_slowpath( kind='var') if hasattr(gc_ll_descr, 'malloc_str'): self.malloc_slowpath_str = self._build_malloc_slowpath(kind='str') else: self.malloc_slowpath_str = None if hasattr(gc_ll_descr, 'malloc_unicode'): self.malloc_slowpath_unicode = self._build_malloc_slowpath( kind='unicode') else: self.malloc_slowpath_unicode = None self.cond_call_slowpath = [self._build_cond_call_slowpath(False, False), self._build_cond_call_slowpath(False, True), self._build_cond_call_slowpath(True, False), self._build_cond_call_slowpath(True, True)] self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) if not self._debug: # if self._debug is already set it means that someone called # set_debug by hand before initializing the assembler. Leave it # as it is debug_start('jit-backend-counts') self.set_debug(have_debug_prints()) debug_stop('jit-backend-counts') # when finishing, we only have one value at [0], the rest dies self.gcmap_for_finish = lltype.malloc(jitframe.GCMAP, 1, flavor='raw', track_allocation=False) self.gcmap_for_finish[0] = r_uint(1)
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token): if not we_are_translated(): show_procedures(metainterp_sd) seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks debug_info = JitDebugInfo( jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, "bridge", fail_descr=faildescr ) hooks.before_compile_bridge(debug_info) else: hooks = None debug_info = None operations = get_deep_immutable_oplist(operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile_bridge(debug_info) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge") # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, ops_offset)
def _dump(self, addr, logname, backend=None): debug_start(logname) if have_debug_prints(): # if backend is not None: debug_print('BACKEND', backend) # from rpython.jit.backend.hlinfo import highleveljitinfo if highleveljitinfo.sys_executable: debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable) else: debug_print('SYS_EXECUTABLE', '??') # HEX = '0123456789ABCDEF' dump = [] src = rffi.cast(rffi.CCHARP, addr) for p in range(self.get_relative_pos()): o = ord(src[p]) dump.append(HEX[o >> 4]) dump.append(HEX[o & 15]) debug_print('CODE_DUMP', '@%x' % addr, '+0 ', # backwards compatibility ''.join(dump)) # debug_stop(logname)
def come_up(basename): """ Bring up previously marshalled Tags, shapes and transformations from '.docked' file un-marshalling, slurping and replacement of current Tags. """ from theseus.shape import CompoundShape # later # from os import stat # statres = stat(path) debug_start("theseus-come-up") path = basename + '.docked' if not os.path.exists(path): return try: f = open_file_as_stream(path, buffering=0) except OSError as e: os.write(2, "Error(come_up)%s -- %s\n" % (os.strerror(e.errno), path)) return try: res = unmarshaller(f.readall()) finally: f.close() del CompoundShape._shapes[:] W_Tag.tags.clear() new_tags = slurp_tags(res) for key, value in new_tags.items(): W_Tag.tags[key] = value debug_stop("theseus-come-up")
def get_L2cache_linux2_cpuinfo_s390x(filename="/proc/cpuinfo", label='cache3'): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\n' + label, linepos) if start < 0: break # done linepos = _findend(data, '\n', start) if linepos < 0: break # no end-of-line?? # *** data[start:linepos] == " : level=2 type=Instruction scope=Private size=2048K ..." start = _skipspace(data, start) if data[start] != ':': continue # *** data[start:linepos] == ": level=2 type=Instruction scope=Private size=2048K ..." start = _skipspace(data, start + 1) # *** data[start:linepos] == "level=2 type=Instruction scope=Private size=2048K ..." start += 44 end = start while '0' <= data[end] <= '9': end += 1 # *** data[start:end] == "2048" if start == end: continue number = int(data[start:end]) # *** data[end:linepos] == " KB\n" end = _skipspace(data, end) if data[end] not in ('K', 'k'): # assume kilobytes for now continue number = number * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def _instantiate_linklet(file_name_for_log, linkl): debug_start("instantiating-linklet") debug_print("Instantiating : %s" % file_name_for_log) instantiate_linklet = get_primitive("instantiate-linklet") linkl_instance = instantiate_linklet.call_interpret([linkl, w_null, w_false, w_false]) debug_print("DONE Instantiating %s ...." % file_name_for_log) debug_stop("instantiating-linklet") return linkl_instance
def log_short_preamble(self, inputargs, operations, memo=None): debug_start("jit-log-short-preamble") logops = self._log_operations(inputargs, operations, ops_offset=None, memo=memo) debug_stop("jit-log-short-preamble") return logops
def debug_print(self, logops): if 0: debug_start('jit-short-boxes') for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ': None') debug_stop('jit-short-boxes')
def log_abort_loop(self, trace, memo=None): debug_start("jit-abort-log") if not have_debug_prints(): return inputargs, operations = self._unpack_trace(trace) logops = self._log_operations(inputargs, operations, ops_offset=None, memo=memo) debug_stop("jit-abort-log") return logops
def _invalid_read(self, message, offset, length, descr): debug_start('jit-log-rawbuffer') debug_print('Invalid read: %s' % message) debug_print(" offset: %d" % offset) debug_print(" length: %d" % length) debug_print(" descr: %s" % self._repr_of_descr(descr)) self._dump_to_log() debug_stop('jit-log-rawbuffer') raise InvalidRawRead
def log_loop_from_trace(self, trace, memo): if not have_debug_prints(): return inputargs, ops = self._unpack_trace(trace) debug_start("jit-log-noopt") debug_print("# Traced loop or bridge with", len(ops), "ops") logops = self._log_operations(inputargs, ops, None, memo) debug_stop("jit-log-noopt") return logops
def _check_rawsize_alloced(self, size_estimate): self.large_objects_collect_trigger -= size_estimate if self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print("allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def debug_print(self, logops): if 0: debug_start("jit-short-boxes") for box, op in self.short_boxes.items(): if op: debug_print(logops.repr_of_arg(box) + ": " + logops.repr_of_resop(op)) else: debug_print(logops.repr_of_arg(box) + ": None") debug_stop("jit-short-boxes")
def _check_rawsize_alloced(self, size_estimate): self.large_objects_collect_trigger -= size_estimate if self.large_objects_collect_trigger < 0: debug_start("gc-rawsize-collect") debug_print( "allocated", (self._initial_trigger - self.large_objects_collect_trigger), "bytes, triggering full collection") self.semispace_collect() debug_stop("gc-rawsize-collect")
def entry_point(argv): debug_start("foo") debug_print("test line") childpid = os.fork() debug_print("childpid =", childpid) if childpid == 0: childpid2 = os.fork() # double-fork debug_print("childpid2 =", childpid2) debug_stop("foo") return 0
def _invalid_write(self, message, offset, length, descr, value): debug_start('jit-log-rawbuffer') debug_print('Invalid write: %s' % message) debug_print(" offset: %d" % offset) debug_print(" length: %d" % length) debug_print(" descr: %s" % self._repr_of_descr(descr)) debug_print(" value: %s" % self._repr_of_value(value)) self._dump_to_log() debug_stop('jit-log-rawbuffer') raise InvalidRawWrite
def log_abort_loop(self, trace, memo=None): debug_start("jit-abort-log") if not have_debug_prints(): debug_stop("jit-abort-log") return inputargs, operations = self._unpack_trace(trace) logops = self._log_operations(inputargs, operations, ops_offset=None, memo=memo) debug_stop("jit-abort-log") return logops
def get_total_memory_darwin(result): debug_start("gc-hardware") if result <= 0: debug_print("get_total_memory() failed") result = addressable_size else: debug_print("memtotal = ", result) if result > addressable_size: result = addressable_size debug_stop("gc-hardware") return result
def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token, memo): forget_optimization_info(operations) forget_optimization_info(inputargs) if not we_are_translated(): show_procedures(metainterp_sd) seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) debug_info = None hooks = None if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks if hooks.are_hooks_enabled(): debug_info = JitDebugInfo(jitdriver_sd, metainterp_sd.logger_ops, original_loop_token, operations, 'bridge', fail_descr=faildescr) hooks.before_compile_bridge(debug_info) else: hooks = None operations = get_deep_immutable_oplist(operations) metainterp_sd.profiler.start_backend() debug_start("jit-backend") log = have_debug_prints() or jl.jitlog_enabled() try: asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log, memo) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() if hooks is not None: debug_info.asminfo = asminfo hooks.after_compile_bridge(debug_info) if not we_are_translated(): metainterp_sd.stats.compiled() metainterp_sd.log("compiled new bridge") # if asminfo is not None: ops_offset = asminfo.ops_offset else: ops_offset = None metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, ops_offset, memo=memo) # #if metainterp_sd.warmrunnerdesc is not None: # for tests # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( # original_loop_token) return asminfo
def alloc_hinted(hintp, map_size, hugetlb=False): debug_start("jit-alloc") debug_print("Map size: %d" % map_size) debug_stop("jit-alloc") flags = MAP_PRIVATE | MAP_ANONYMOUS if hugetlb: flags = flags | MAP_HUGETLB prot = PROT_EXEC | PROT_READ | PROT_WRITE if we_are_translated(): flags = NonConstant(flags) prot = NonConstant(prot) return c_mmap_safe(hintp, map_size, prot, flags, -1, 0)
def __init__(self, cpu, number): cpu.tracker.total_compiled_loops += 1 self.cpu = cpu self.number = number self.bridges_count = 0 self.invalidate_positions = [] # a list of weakrefs to looptokens that has been redirected to # this one self.looptokens_redirected_to = [] debug_start("jit-mem-looptoken-alloc") debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc")
def finish_once(self): if self._debug: debug_start('jit-backend-counts') for i in range(len(self.loop_run_counters)): struct = self.loop_run_counters[i] if struct.type == 'l': prefix = 'TargetToken(%d)' % struct.number elif struct.type == 'b': prefix = 'bridge ' + str(struct.number) else: prefix = 'entry ' + str(struct.number) debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts')
def get_L2cache_linux2_cpuinfo_s390x(filename="/proc/cpuinfo", label='cache2'): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\n' + label, linepos) if start < 0: break # done start = _findend(data, 'size=', start) if start < 0: break end = _findend(data, ' ', start) - 1 if end < 0: break linepos = end size = data[start:end] last_char = len(size) - 1 assert 0 <= last_char < len(size) if size[last_char] not in ('K', 'k'): # assume kilobytes for now continue number = int(size[:last_char]) * 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print( lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def done(self): from rpython.rlib.debug import debug_start, debug_stop, debug_print self._bigints_dict = {} self._refs_dict = llhelper.new_ref_dict_3() debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) debug_print(" total snapshots: " + str(self._total_snapshots)) debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) debug_print(" descrs: " + str(len(self._descrs))) debug_stop("jit-trace-done") return 0 # completely different than TraceIter.done, but we have to
def log_bridge(self, inputargs, operations, extra=None, descr=None, ops_offset=None, memo=None): if extra == "noopt": debug_start("jit-log-noopt-bridge") debug_print("# bridge out of Guard", "0x%x" % compute_unique_id(descr), "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-noopt-bridge") elif extra == "rewritten": debug_start("jit-log-rewritten-bridge") debug_print("# bridge out of Guard", "0x%x" % compute_unique_id(descr), "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-rewritten-bridge") elif extra == "compiling": debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-compiling-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", "0x%x" % r_uint(compute_unique_id(descr)), "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-opt-bridge") return logops
def get_L2cache_linux2_cpuinfo_s390x(filename="/proc/cpuinfo", label='cache2'): debug_start("gc-hardware") L2cache = sys.maxint try: fd = os.open(filename, os.O_RDONLY, 0644) try: data = [] while True: buf = os.read(fd, 4096) if not buf: break data.append(buf) finally: os.close(fd) except OSError: pass else: data = ''.join(data) linepos = 0 while True: start = _findend(data, '\n' + label, linepos) if start < 0: break # done start = _findend(data, 'size=', start) if start < 0: break end = _findend(data, ' ', start) - 1 if end < 0: break linepos = end size = data[start:end] last_char = len(size)-1 assert 0 <= last_char < len(size) if size[last_char] not in ('K', 'k'): # assume kilobytes for now continue number = int(size[:last_char])* 1024 # for now we look for the smallest of the L2 caches of the CPUs if number < L2cache: L2cache = number debug_print("L2cache =", L2cache) debug_stop("gc-hardware") if L2cache < sys.maxint: return L2cache else: # Print a top-level warning even in non-debug builds llop.debug_print(lltype.Void, "Warning: cannot find your CPU L2 cache size in /proc/cpuinfo") return -1
def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name='', memo=None): if type is None: debug_start("jit-log-noopt-loop") debug_print("# Loop", number, '(%s)' % name, ":", "noopt", "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-noopt-loop") elif type == "rewritten": debug_start("jit-log-rewritten-loop") debug_print("# Loop", number, '(%s)' % name, ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-rewritten-loop") elif number == -2: debug_start("jit-log-compiling-loop") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-compiling-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name, ":", type, "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset, memo) debug_stop("jit-log-opt-loop") return logops