def print_stack(self, limit=None, file=None): """Print the stack or traceback for this task's coroutine. This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream to which the output is written; by default output is written to sys.stderr. """ extracted_list = [] checked = set() for f in self.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = self._exception if not extracted_list: print('No stack for %r' % self, file=file) elif exc is not None: print('Traceback for %r (most recent call last):' % self, file=file) else: print('Stack for %r (most recent call last):' % self, file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='')
def __call__(self, *args): assert len(args) == len(self.input_types), "Wrong number of inputs provided" self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types)) for instr in self.eg.instrs: if profiler.on: tstart = time.time() try: instr.fire(self) except Exception as e: traceback.print_exc() if isinstance(instr, (ReturnByRef,ReturnByVal)): if core.get_config()["debug"]: assert "stack" in instr.node_props utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr) print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>" traceback.print_list(instr.node_props["stack"]) print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<" raise e else: utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True") raise e else: utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr)) raise e if profiler.on: profiler.update(instr, time.time()-tstart) outputs = [self.get(loc) for loc in self.output_locs] if self.copy_outputs: outputs = map(_copy, outputs) return outputs
def _task_print_stack(task, limit, file): extracted_list = [] checked = set() for f in task.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = task._exception if not extracted_list: print('No stack for {!r}'.format(task), file=file) elif exc is not None: print('Traceback for {!r} (most recent call last):'.format(task), file=file) else: print('Stack for {!r} (most recent call last):'.format(task), file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='')
def format_thread(self): subject = SUBJECT_FORMAT % (self.thread_id, datetime.fromtimestamp(self.start), time.time() - self.start) frame = sys._current_frames()[self.thread_id] try: request = self.format_request(self.extract_request(frame)) stack = traceback.extract_stack(frame) query = self.extract_sql(frame) finally: del frame body = request, stack, query if self._last == body: return subject + "Same.\n" result = StringIO() result.write(subject) if request != self._last[0]: result.write(request) if stack != self._last[1]: result.write("Traceback:\n") traceback.print_list(stack, result) if query: result.write("SQL Query:%s\n" % ( " Same." if query == self._last[2] else '\n' + query)) self._last = body return result.getvalue()
def print_stack(self, *, limit=None, file=None): """Print the stack or traceback for this task's coroutine. This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream to which the output is written; by default output is written to sys.stderr. """ extracted_list = [] checked = set() for f in self.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = self._exception if not extracted_list: print('No stack for %r' % self, file=file) elif exc is not None: print('Traceback for %r (most recent call last):' % self, file=file) else: print('Stack for %r (most recent call last):' % self, file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='')
def handler(signum, frame): limit = None file = None traceback.print_list(traceback.extract_stack(frame, limit=limit), file=file) print(signum, frame) print(dir(frame)) sys.exit(-1)
def print_exc(typ, exc, tb): seen.add(exc) context = exc.__context__ cause = exc.__cause__ if cause is not None and cause not in seen: print_exc(type(cause), cause, cause.__traceback__) print( "\nThe above exception was the direct cause " "of the following exception:\n", file=efile) elif (context is not None and not exc.__suppress_context__ and context not in seen): print_exc(type(context), context, context.__traceback__) print( "\nDuring handling of the above exception, " "another exception occurred:\n", file=efile) if tb: tbe = traceback.extract_tb(tb) print('Traceback (most recent call last):', file=efile) exclude = ("run.py", "rpc.py", "threading.py", "queue.py", "debugger_r.py", "bdb.py") cleanup_traceback(tbe, exclude) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, exc) for line in lines: print(line, end='', file=efile)
def _task_print_stack(task, limit, file): extracted_list = [] checked = set() for f in task.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = task._exception if not extracted_list: print('No stack for %r' % task, file=file) elif exc is not None: print('Traceback for %r (most recent call last):' % task, file=file) else: print('Stack for %r (most recent call last):' % task, file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='')
def ok() -> bool: try: print(111) raise ValueError("not int") except ValueError as e: traceback.print_list() return False finally: print("finally")
def print_traceback(): """ Print the traceback containing the method that triggered the query. Ignore the last 3 entries which would be the __getattribute__, warn_on_cold_cache, and the _print_traceback methods in this class. """ stack = traceback.extract_stack(limit=7)[:-2] traceback.print_list(stack, file=TracebackLogger)
def stacktrace(): buf = cStringIO.StringIO() stack = traceback.extract_stack() traceback.print_list(stack[:-2], file=buf) stacktrace_string = buf.getvalue() buf.close() return stacktrace_string
def new_test(): p = PssAnalyzer() folder = os.path.join(os.getcwd(), "run_files") folder = os.path.join(folder, "uniqes") p.appent_pattern(folder, ".*best.*") r = p.solved_percent_ext() print_list(r)
def handle_unsuccess(ef, warning_level, test_name=None): if warning_level == WarningLevels.HIGH: stacksummary = [ traceback.extract_stack()[-3] ] # Get third to last traceback (last two will be in this file) traceback.print_list(stacksummary) if warning_level >= WarningLevels.LOW: if test_name is None: test_name = inspect.getouterframes(inspect.currentframe())[2][3] print_color("\t{} -> {}\n".format(test_name, ef.args[0]), Style.RED)
def dump(cls, label): df = cls.dump_file or sys.stderr s = StringIO() print >> s, "\nDumping thread %s:" % (label, ) try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back.f_back traceback.print_list(traceback.extract_stack(f, None), s) df.write(s.getvalue())
def new_test(): p = PssAnalyzer() folder = os.path.join(os.getcwd(),"run_files") folder = os.path.join(folder,"uniqes") p.appent_pattern(folder, ".*best.*") r = p.solved_percent_ext() print_list(r)
def dump(cls, label): df = cls.dump_file or sys.stderr s = StringIO() print >>s, "\nDumping thread %s:" % (label, ) try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back.f_back traceback.print_list(traceback.extract_stack(f, None), s) df.write(s.getvalue())
def die(m, s=''): """Emit an error message m (and optionally s) and exit with a return value 1""" msgb("MBUILD ERROR", "%s %s\n\n" % (m, s)) etype, value, tb = sys.exc_info() if tb is None: stack = traceback.extract_stack()[:-1] traceback.print_list(stack, file=sys.stdout) else: traceback.print_exception(etype, value, tb, file=sys.stdout) sys.exit(1)
def test_print_list(): expected_string = u""" vi +21 traceback/tests.py # _triple one() vi +11 traceback/tests.py # one two() vi +10 traceback/tests.py # two h[1] """ out = StringIO() print_list(extract_tb(_tb()), file=out) eq_(out.getvalue(), expected_string)
def check_node_not_in_scope(self): if self.node_without_tensor_info: for node, info in self.node_without_tensor_info.items(): for expr in info[0]._exprs: if node in expr.inputs or node in expr.outputs: traceback.print_list(info[1]) raise ValueError( "node({}) not in the graph:\n{}".format( node, info[0])) return True else: return False
def handle_keyboard_interrupt(sig_no, frame): frames = list(traceback.walk_stack(frame)) frames.reverse() stack = traceback.StackSummary.extract(frames) traceback.print_list(stack) signal.signal(signal.SIGINT, signal.SIG_IGN) try: result = handle_exception(frames) finally: signal.signal(signal.SIGINT, handle_keyboard_interrupt) if result.need_reload: raise HandlerPassthrough(result)
def trunc_traceback(xxx_todo_changeme, source_file): """ Format a traceback where entries before a frame from source_file are omitted (unless the last frame is from source_file). Return the result as a unicode string. """ (_typ, value, tb) = xxx_todo_changeme linecache.checkcache() efile = StringIO() if py3k: values = _iter_chain(value, tb) else: values = [(value, tb)] # The source_file and filename may differ in extension (pyc/py), so we # ignore the extension source_file = canonical_fn(source_file) for value, tb in values: if isinstance(value, str): efile.write(value+'\n') continue tbe = traceback.extract_tb(tb) # This is a work around a really weird IronPython bug. while len(tbe)>1 and 'split_to_singles' in tbe[-1][0]: tbe.pop() if canonical_fn(tbe[-1][0]) != source_file: # If the last entry is from this file, don't remove # anything. Otherwise, remove lines before the current # frame. for i in range(len(tbe)-2, -1, -1): if canonical_fn(tbe[i][0]) == source_file: tbe = tbe[i+1:] break efile.write('Traceback (most recent call last):'+'\n') traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(type(value), value) for line in lines: efile.write(line) if not hasattr(efile, 'buflist'): # Py3k return efile.getvalue() else: # The following line replaces efile.getvalue(), because if it # includes both unicode strings and byte string with non-ascii # chars, it fails. return ''.join(unicodify(s) for s in efile.buflist)
def test_unsolved(): p = PssAnalyzer() folder = os.path.join(os.getcwd(),"run_files") folder = os.path.join(folder,"uniqes") p.appent_pattern(folder, ".*beam.*") #p = p.select("AnytimeBest-d250_with_PowerHeuristic2") #p = p.select("AnytimeBeam-w20-.*") print "unsolved_rooms" unsolved_rooms = p.get_unsolved_rooms(roomset="heavy_roomset") print_list(unsolved_rooms)
def test_unsolved(): p = PssAnalyzer() folder = os.path.join(os.getcwd(), "run_files") folder = os.path.join(folder, "uniqes") p.appent_pattern(folder, ".*beam.*") #p = p.select("AnytimeBest-d250_with_PowerHeuristic2") #p = p.select("AnytimeBeam-w20-.*") print "unsolved_rooms" unsolved_rooms = p.get_unsolved_rooms(roomset="heavy_roomset") print_list(unsolved_rooms)
def t(): p = PssAnalyzer() p.load_bstf() r = p.solved_percent() name, _ = do_by_key(max, r, 1) pss = p.select_first(name, '.*stat.*') print pss.roomset.name print pss.name print pss.solutions t = p.room_id_with_runtime_table(pss) print_list(do_by_key(sorted, t, 1)) return r
def __init__(self, *args, **kwargs): if kwargs: # The call to list is needed for Python 3 assert list(kwargs.keys()) == ["variable"] tr = getattr(list(kwargs.values())[0].tag, "trace", []) if type(tr) is list and len(tr) > 0: sio = StringIO() print("\nBacktrace when the variable is created:", file=sio) for subtr in list(kwargs.values())[0].tag.trace: traceback.print_list(subtr, sio) args = args + (str(sio.getvalue()),) s = "\n".join(args) # Needed to have the new line print correctly Exception.__init__(self, s)
def t(): p = PssAnalyzer() p.load_bstf() r = p.solved_percent() name,_ = do_by_key(max,r,1) pss = p.select_first(name,'.*stat.*') print pss.roomset.name print pss.name print pss.solutions t = p.room_id_with_runtime_table(pss) print_list( do_by_key(sorted,t,1) ) return r
def __init__(self, *args, **kwargs): if kwargs: # The call to list is needed for Python 3 assert list(kwargs.keys()) == ["variable"] tr = getattr(list(kwargs.values())[0].tag, 'trace', []) if type(tr) is list and len(tr) > 0: sio = StringIO() print("\nBacktrace when the variable is created:", file=sio) for subtr in list(kwargs.values())[0].tag.trace: traceback.print_list(subtr, sio) args = args + (str(sio.getvalue()), ) s = '\n'.join(args) # Needed to have the new line print correctly Exception.__init__(self, s)
def print_exception(): import linecache linecache.checkcache() efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo tbe = traceback.extract_tb(tb) print(efile, '\nTraceback (most recent call last):') traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, val) for line in lines: print(efile, line)
def error(key, include_traceback=False): exc_type, exc_value, _ = sys.exc_info() msg = StringIO() if include_traceback: frame = inspect.trace()[-1] filename = frame[1] lineno = frame[2] funcname = frame[3] code = ''.join(frame[4]) traceback.print_list([(filename, lineno, funcname, code)], file=msg) if exc_type: msg.write(''.join(traceback.format_exception_only(exc_type, exc_value))) errors[key] = msg.getvalue() sys.exc_clear()
def test_solution_improvment(): p = PssAnalyzer() folder = os.path.join(os.getcwd(),"run_files") folder = os.path.join(folder,"uniqes") p.appent_pattern(folder, ".*") #p.appent_pattern(folder, ".*limit.*") #p = p.select("AnytimeBest-d250_with_PowerHeuristic2") #p = p.select(".*", roomset_pattern="heavy_roomset") l = p.solution_imp() print_list(l) print len(l), "from", p.rooms_count()
def print_exception(): flush_stdout() efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo tbe = traceback.extract_tb(tb) print>>efile, '\nTraceback (most recent call last):' exclude = ("run.py", "rpc.py", "threading.py", "Queue.py", "RemoteDebugger.py", "bdb.py") cleanup_traceback(tbe, exclude) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, val) for line in lines: print>>efile, line,
def test_solution_improvment(): p = PssAnalyzer() folder = os.path.join(os.getcwd(), "run_files") folder = os.path.join(folder, "uniqes") p.appent_pattern(folder, ".*") #p.appent_pattern(folder, ".*limit.*") #p = p.select("AnytimeBest-d250_with_PowerHeuristic2") #p = p.select(".*", roomset_pattern="heavy_roomset") l = p.solution_imp() print_list(l) print len(l), "from", p.rooms_count()
def process_exception(ex, value, tb): print("--------------------------------------") print(">>> print_tb:\n") traceback.print_tb(tb) print("--------------------------------------") print(">>> print_exception:\n") traceback.print_exception(ex, value, tb)0 print("--------------------------------------") print(">>> print_list(extract_tb):\n") stack_summary = traceback.extract_tb(tb) print(stack_summary) traceback.print_list(stack_summary) print("--------------------------------------") print(">>> print_list(extract_stack):\n") traceback.print_list(traceback.extract_stack())
def get_variable_trace_string(v): sio = StringIO() # For backward compatibility with old trace tr = getattr(v.tag, 'trace', []) if isinstance(tr, list) and len(tr) > 0: print(" \nBacktrace when that variable is created:\n", file=sio) # The isinstance is needed to handle old pickled trace if isinstance(tr[0], tuple): traceback.print_list(v.tag.trace, sio) else: # Print separate message for each element in the list of # batcktraces for subtr in tr: traceback.print_list(subtr, sio) return sio.getvalue()
def print_task_stack(task: asyncio.Task, *, file: IO = sys.stderr, limit: int = DEFAULT_MAX_FRAMES, capture_locals: bool = False) -> None: """Print the stack trace for an :class:`asyncio.Task`.""" print(f'Stack for {task!r} (most recent call last):', file=file) tb = Traceback.from_task(task, limit=limit) print_list( StackSummary.extract( cast(Generator, walk_tb(cast(TracebackType, tb))), limit=limit, capture_locals=capture_locals, ), file=file, )
def print_coro_stack(coro: Coroutine, *, file: IO = sys.stderr, limit: int = DEFAULT_MAX_FRAMES, capture_locals: bool = False) -> None: """Print the stack trace for a currently running coroutine.""" print(f'Stack for {coro!r} (most recent call last):', file=file) tb = Traceback.from_coroutine(coro, limit=limit) print_list( StackSummary.extract( cast(Generator, walk_tb(cast(TracebackType, tb))), limit=limit, capture_locals=capture_locals, ), file=file, )
def loadFile(self,relFilePath): self.unLoadFile(relFilePath) f = QFile(self.directory.absolutePath() + '/' + relFilePath) if(f.open(QIODevice.ReadOnly)): fileContents=QString(f.readAll().toBase64()) self.hashValue.reset() self.hashValue.addData(fileContents) self.fileBuffer[relFilePath]=fileContents,QString(self.hashValue.result().toHex()) #print "fileBuffer status: \n",self.fileBuffer #print "Alert: Loaded :", self.fileBuffer[fileName] return True else: print "Error: loadFile failed -> couldn't open: ", f.fileName() traceback.print_list(traceback.extract_stack()) return False
def print_exception(): import linecache linecache.checkcache() flush_stdout() efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo tbe = traceback.extract_tb(tb) print("Traceback (most recent call last):", file=efile) exclude = ("run.py", "rpc.py", "threading.py", "queue.py", "RemoteDebugger.py", "bdb.py") cleanup_traceback(tbe, exclude) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, val) for line in lines: print(line, end="", file=efile)
def print_agen_stack(agen: AsyncGenerator, *, file: IO = sys.stderr, limit: int = DEFAULT_MAX_FRAMES, capture_locals: bool = False) -> None: """Print the stack trace for a currently running async generator.""" print(f'Stack for {agen!r} (most recent call last):', file=file) tb = Traceback.from_agen(agen, limit=limit) print_list( StackSummary.extract( cast(Generator, walk_tb(cast(TracebackType, tb))), limit=limit, capture_locals=capture_locals, ), file=file, )
class Breakpoint(theano.Op): view_map = {0: [0]} global_breakpoint_enable = False def __init__(self, var_names, cond, tb, py_vars, breakpoint_grad, is_grad=False): self.var_names = var_names self.cond = cond self.tb = tb self.py_vars = py_vars self.nvars = len(var_names) self.breakpoint_grad = breakpoint_grad self.is_grad = is_grad def __eq__(self, other): return (type(self) == type(other) and self.var_names == other.var_names and self.cond == other.cond and self.tb == other.tb) def __hash__(self): return reduce( operator.xor, map(hash, (type(self), self.var_names, self.cond, self.tb))) def make_node(self, *inputs): output = T.as_tensor_variable(inputs[0]).type() return theano.Apply(self, inputs, (output, )) def make_gpu_node(self, *inputs): return Breakpoint_GPU(self.var_names, self.cond, self.tb, self.py_vars, self.breakpoint_grad, self.is_grad)(*inputs) def infer_shape(self, node, input_shapes): return (input_shapes[0], ) def perform(self, node, inputs, (output, )): output[0] = inputs[0] if not Breakpoint.global_breakpoint_enable: return x = inputs[0] if not isinstance(x, np.ndarray): x = np.array(x) if self.cond(x): vars = _BreakpointVars( dict(zip(self.var_names, map(np.array, inputs[1:]))), self.py_vars) if self.is_grad: place = 'theano gradient eval' else: place = 'theano eval' print >> sys.stderr, 'Breakpoint in %s, created at' % place print >> sys.stderr, ' ...' traceback.print_list(self.tb[-4:], sys.stderr) ipdb.set_trace() pass # in theano breakpoint
def print_stack(task): extracted_list = [] checked = set() for f in get_stack(task): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) if not extracted_list: print('No stack for %r' % task) else: print('Stack for %r (most recent call last):' % task) traceback.print_list(extracted_list)
def _get_test_value(cls, v): """ Extract test value from variable v. Raises AttributeError if there is none. For a Constant, the test value is v.value. For a Shared variable, it is the internal value. For another Variable, it is the content of v.tag.test_value. """ # avoid circular import from theano.compile.sharedvalue import SharedVariable if isinstance(v, graph.Constant): return v.value elif isinstance(v, SharedVariable): return v.get_value(borrow=True, return_internal_type=True) elif isinstance(v, graph.Variable) and hasattr(v.tag, 'test_value'): # ensure that the test value is correct try: ret = v.type.filter(v.tag.test_value) except Exception as e: # Better error message. detailed_err_msg = ( "For compute_test_value, one input test value does not" " have the requested type.\n") tr = getattr(v.tag, 'trace', None) if tr: sio = StringIO.StringIO() traceback.print_list(tr, sio) tr = sio.getvalue() detailed_err_msg += ( " \nBacktrace when that variable is created:\n") detailed_err_msg += str(tr) detailed_err_msg += ( "\nThe error when converting the test value to that" " variable type:") # We need to only have 1 args and it should be of type # string. Otherwise, it print the tuple and so the # new line do not get printed. args = (detailed_err_msg,) + tuple(str(arg) for arg in e.args) e.args = ("\n".join(args),) raise return ret raise AttributeError('%s has no test value' % v)
def print_exception(source = None, filename = None): import linecache linecache.checkcache() flush_stdout() efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo tbe = traceback.extract_tb(tb) print>>efile, '\nTraceback (most recent call last):' exclude = ("run.py", "rpc.py", "threading.py", "Queue.py", "RemoteDebugger.py", "bdb.py", "Commands.py") cleanup_traceback(tbe, exclude) add_exception_link(tbe) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, val) for line in lines: print>>efile, line, if source is not None and filename is not None: Suggest.exception_suggest(typ, val, tb, source, filename)
def print_exception(source=None, filename=None): import linecache linecache.checkcache() flush_stdout() efile = sys.stderr typ, val, tb = excinfo = sys.exc_info() sys.last_type, sys.last_value, sys.last_traceback = excinfo tbe = traceback.extract_tb(tb) print >> efile, '\nTraceback (most recent call last):' exclude = ("run.py", "rpc.py", "threading.py", "Queue.py", "RemoteDebugger.py", "bdb.py", "Commands.py") cleanup_traceback(tbe, exclude) add_exception_link(tbe) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, val) for line in lines: print >> efile, line, if source is not None and filename is not None: Suggest.exception_suggest(typ, val, tb, source, filename)
def print_exc(typ, exc, tb): seen.add(exc) context = exc.__context__ cause = exc.__cause__ if cause is not None and cause not in seen: print_exc(type(cause), cause, cause.__traceback__) print("\nThe above exception was the direct cause " "of the following exception:\n", file=efile) elif context is not None and not exc.__suppress_context__ and context not in seen: print_exc(type(context), context, context.__traceback__) print("\nDuring handling of the above exception, " "another exception occurred:\n", file=efile) if tb: tbe = traceback.extract_tb(tb) print("Traceback (most recent call last):", file=efile) exclude = ("run.py", "rpc.py", "threading.py", "queue.py", "debugger_r.py", "bdb.py") cleanup_traceback(tbe, exclude) traceback.print_list(tbe, file=efile) lines = traceback.format_exception_only(typ, exc) for line in lines: print(line, end="", file=efile)
def dump_exception(msg, f = sys.stdout, extra = None): exc_type, exc_value, exc_traceback = sys.exc_info() if isinstance(exc_value, StdException): cus_traceback = exc_value.traceback else: if hasattr(exc_value, 'traceback'): exc_traceback = exc_value.traceback cus_traceback = None f.write('%s %s:\n' % (datetime.now(), msg)) f.write(SEPT) if cus_traceback != None: f.write('Traceback (most recent call last):\n') print_list(cus_traceback, file = f) f.write(format_exception_only(exc_type, exc_value)[0]) else: print_exception(exc_type, exc_value, exc_traceback, file = f) f.write(SEPT) if extra != None: f.write(extra) f.write(SEPT) f.flush()
def astart_solved(): pp = PssAnalyzer() folder = os.path.join(os.getcwd(),"run_files") folder = os.path.join(folder,"uniqes") pp.appent_pattern(folder, ".*astar.*") p = pp#.select(".*Power.*") easy = p.solved_percent_ext(roomset="easy_roomset") mild = p.solved_percent_ext(roomset="mild_roomset") heavy = p.solved_percent_ext(roomset="heavy_roomset") easy = do_by_key(sorted, easy, 1) mild = do_by_key(sorted, mild, 1) heavy = do_by_key(sorted, heavy, 1) print "easy" print_list(easy) print "mild" print_list(mild) print "heavy" print_list(heavy)
def new_test2(): pp = PssAnalyzer() #d =r"C:\Users\inesmeya\Documents\PythonxyWS\HW1\AI1\src\run_files\results\2011-05-06_at_19-36_best_first_depth0.pck" #p.load(d) folder = os.path.join(os.getcwd(),"run_files") folder = os.path.join(folder,"uniqes") pp.appent_pattern(folder, ".*beam.*") p = pp#.select(".*Power.*") easy = p.solved_percent_ext(roomset="easy_roomset") mild = p.solved_percent_ext(roomset="mild_roomset") heavy = p.solved_percent_ext(roomset="heavy_roomset") easy = do_by_key(sorted, easy, 1) mild = do_by_key(sorted, mild, 1) heavy = do_by_key(sorted, heavy, 1) print "easy" print_list(easy) print "mild" print_list(mild) print "heavy" print_list(heavy)
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None): """ Re-raise an exception while annotating the exception object with debug info. Parameters ---------- node : Apply node The Apply node object that resulted in the raised exception. exc_info : tuple, optional A tuple containing the exception type, exception object and associated traceback, as would be returned by a call to `sys.exc_info()` (which is done if `None` is passed). storage_map: dict, optional storage map of the theano function that resulted in the raised exception. Notes ----- This re-raises the exception described by `exc_info` (or the last one raised, if `exc_info` is omitted) and annotates the exception object with several new members which may be helpful for debugging Theano graphs. They are: * __op_instance__: The Op that is responsible for the exception being raised. * __thunk_trace__: A traceback corresponding to the code that actually generated the exception, if it is available. * __applynode_index__: The index of the Apply node corresponding to this op in `op.fgraph.toposort()`. The exception is not annotated if it is of type `KeyboardInterrupt`. """ if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_trace = exc_info if exc_type == KeyboardInterrupt: # print a simple traceback from KeyboardInterrupt raise exc_type, exc_value, exc_trace try: trace = node.outputs[0].tag.trace except AttributeError: try: trace = node.op.tag.trace except AttributeError: trace = () exc_value.__thunk_trace__ = trace exc_value.__op_instance__ = node if node in node.fgraph.toposort(): exc_value.__applynode_index__ = node.fgraph.toposort().index(node) else: exc_value.__applynode_index__ = None hints = [] detailed_err_msg = "\nApply node that caused the error: " + str(node) types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs] detailed_err_msg += "\nInputs types: %s\n" % types if thunk is not None: if hasattr(thunk, 'inputs'): shapes = [getattr(ipt[0], 'shape', 'No shapes') for ipt in thunk.inputs] strides = [getattr(ipt[0], 'strides', 'No strides') for ipt in thunk.inputs] scalar_values = [] for ipt in thunk.inputs: if getattr(ipt[0], "size", -1) <= 5: scalar_values.append(ipt[0]) else: scalar_values.append("not shown") else: shapes = "The thunk don't have an inputs attributes." strides = "So we can't access the strides of inputs values" scalar_values = "And can't print its inputs scalar value" detailed_err_msg += ("Inputs shapes: %s" % shapes + "\nInputs strides: %s" % strides + "\nInputs values: %s\n" % scalar_values) else: hints.append( "HINT: Use another linker then the c linker to" " have the inputs shapes and strides printed.") # Print node backtrace tr = getattr(node.outputs[0].tag, 'trace', None) if tr: sio = StringIO.StringIO() traceback.print_list(tr, sio) tr = sio.getvalue() detailed_err_msg += "\nBacktrace when the node is created:\n" detailed_err_msg += str(tr) else: hints.append( "HINT: Re-running with most Theano optimization disabled could" " give you a back-trace of when this node was created. This can" " be done with by setting the Theano flag" " 'optimizer=fast_compile'. If that does not work," " Theano optimizations can be disabled with 'optimizer=None'.") if theano.config.exception_verbosity == 'high': f = StringIO.StringIO() theano.printing.debugprint(node, file=f, stop_on_name=True, print_type=True) detailed_err_msg += "\nDebugprint of the apply node: \n" detailed_err_msg += f.getvalue() # Prints output_map if storage_map is not None: detailed_err_msg += "\nStorage map footprint:\n" for k in storage_map.keys(): if storage_map[k][0] is not None: detailed_err_msg += " - " + str(k) + ", " shapeinfo = None if hasattr(storage_map[k][0], 'shape'): shapeinfo = storage_map[k][0].shape if len(shapeinfo) != 0: detailed_err_msg += "Shape: %s, " % str(shapeinfo) else: detailed_err_msg += "Shape: (1,), " if hasattr(storage_map[k][0], 'dtype'): dtype = storage_map[k][0].dtype detailed_err_msg += "ElemSize: %s Byte(s)" % numpy.dtype(dtype).itemsize if shapeinfo is None: detailed_err_msg += "\n" else: detailed_err_msg += ", TotalSize: %s Byte(s)\n" % (numpy.dtype(dtype).itemsize * numpy.prod(shapeinfo)) else: bytes = getsizeof(storage_map[k][0]) detailed_err_msg += "ElemSize: %s Byte(s)\n" % str(bytes) else: hints.append( "HINT: Use the Theano flag 'exception_verbosity=high'" " for a debugprint and storage map footprint of this apply node.") exc_value = exc_type(str(exc_value) + detailed_err_msg + '\n' + '\n'.join(hints)) raise exc_type, exc_value, exc_trace
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None): """ Re-raise an exception while annotating the exception object with debug info. Parameters ---------- node : Apply node The Apply node object that resulted in the raised exception. exc_info : tuple, optional A tuple containing the exception type, exception object and associated traceback, as would be returned by a call to `sys.exc_info()` (which is done if `None` is passed). storage_map: dict, optional storage map of the theano function that resulted in the raised exception. Notes ----- This re-raises the exception described by `exc_info` (or the last one raised, if `exc_info` is omitted) and annotates the exception object with several new members which may be helpful for debugging Theano graphs. They are: * __op_instance__: The Op that is responsible for the exception being raised. * __thunk_trace__: A traceback corresponding to the code that actually generated the exception, if it is available. * __applynode_index__: The index of the Apply node corresponding to this op in `op.fgraph.toposort()`. The exception is not annotated if it is of type `KeyboardInterrupt`. """ if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_trace = exc_info if exc_type == KeyboardInterrupt: # print a simple traceback from KeyboardInterrupt reraise(exc_type, exc_value, exc_trace) try: trace = node.outputs[0].tag.trace except AttributeError: try: trace = node.op.tag.trace except AttributeError: trace = () exc_value.__thunk_trace__ = trace exc_value.__op_instance__ = node topo = node.fgraph.toposort() if node in topo: node_index = topo.index(node) else: node_index = None exc_value.__applynode_index__ = node_index hints = [] detailed_err_msg = "\nApply node that caused the error: " + str(node) if exc_value.__applynode_index__ is not None: detailed_err_msg += "\nToposort index: %d" % node_index types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs] detailed_err_msg += "\nInputs types: %s\n" % types if thunk is not None: if hasattr(thunk, 'inputs'): shapes = [getattr(ipt[0], 'shape', 'No shapes') for ipt in thunk.inputs] strides = [getattr(ipt[0], 'strides', 'No strides') for ipt in thunk.inputs] scalar_values = [] for ipt in thunk.inputs: if getattr(ipt[0], "size", -1) <= 5: scalar_values.append(ipt[0]) else: scalar_values.append("not shown") else: shapes = "The thunk don't have an inputs attributes." strides = "So we can't access the strides of inputs values" scalar_values = "And can't print its inputs scalar value" clients = [[c[0] for c in var.clients] for var in node.outputs] detailed_err_msg += ("Inputs shapes: %s" % shapes + "\nInputs strides: %s" % strides + "\nInputs values: %s" % scalar_values) if hasattr(node.op, '__input_name__'): detailed_err_msg += "\nInputs name: %s\n" % str(node.op.__input_name__) detailed_err_msg += "\nOutputs clients: %s\n" % clients else: hints.append( "HINT: Use another linker then the c linker to" " have the inputs shapes and strides printed.") # Print node backtraces tr = getattr(node.outputs[0].tag, 'trace', []) if len(tr) > 0: detailed_err_msg += "\nBacktrace when the node is created:\n" # Print separate message for each element in the list of batcktraces sio = StringIO() for subtr in tr: traceback.print_list(subtr, sio) detailed_err_msg += str(sio.getvalue()) else: hints.append( "HINT: Re-running with most Theano optimization disabled could" " give you a back-trace of when this node was created. This can" " be done with by setting the Theano flag" " 'optimizer=fast_compile'. If that does not work," " Theano optimizations can be disabled with 'optimizer=None'.") if theano.config.exception_verbosity == 'high': f = StringIO() theano.printing.debugprint(node, file=f, stop_on_name=True, print_type=True) detailed_err_msg += "\nDebugprint of the apply node: \n" detailed_err_msg += f.getvalue() # Prints output_map if theano.config.exception_verbosity == 'high' and storage_map is not None: detailed_err_msg += "\nStorage map footprint:\n" shared_input_list = [ item for item in node.fgraph.inputs if isinstance(item, theano.compile.SharedVariable)] nonshared_input_list = [ item for item in node.fgraph.inputs if not isinstance(item, theano.compile.SharedVariable)] storage_map_list = [] total_size = 0 total_size_inputs = 0 for k in storage_map: storage_map_item = [] # storage_map_item[0]: the variable storage_map_item.append(str(k)) # storage_map_item[1]: the shape shapeinfo = None if hasattr(storage_map[k][0], 'shape'): shapeinfo = storage_map[k][0].shape if len(shapeinfo) != 0: storage_map_item.append(shapeinfo) else: storage_map_item.append(tuple()) else: storage_map_item.append(None) # storage_map_item[2]: itemsize # storage_map_item[3]: bytes if hasattr(storage_map[k][0], 'dtype'): dtype = storage_map[k][0].dtype storage_map_item.append(numpy.dtype(dtype).itemsize) if shapeinfo is None: storage_map_item.append(None) else: sz = numpy.dtype(dtype).itemsize * numpy.prod(shapeinfo) storage_map_item.append(sz) total_size += sz if not k.owner: total_size_inputs += sz else: # If it is a view, don't count it twice. if getattr(k.owner.op, 'view_map', None): vmap = k.owner.op.view_map out_idx = k.owner.outputs.index(k) data = storage_map[k][0] if out_idx in vmap: assert len(vmap[out_idx]) == 1 input_data = storage_map[ k.owner.inputs[vmap[out_idx][0]]][0] if k.type.may_share_memory(data, input_data): total_size -= sz # If it is a destroyed input, the input # shouldn't be in the storage_map anymore # except if there is a special flag used. So # we still must check it. if getattr(k.owner.op, 'destroy_map', None): vmap = k.owner.op.destroy_map out_idx = k.owner.outputs.index(k) data = storage_map[k][0] if out_idx in vmap: assert len(vmap[out_idx]) == 1 input_data = storage_map[ k.owner.inputs[vmap[out_idx][0]]][0] if k.type.may_share_memory(data, input_data): total_size -= sz else: bytes = getsizeof(storage_map[k][0]) storage_map_item.append(bytes) storage_map_item.append(None) # Flag of shared val # storage_map_item[4] if k in shared_input_list: storage_map_item.append(True) elif k in nonshared_input_list: storage_map_item.append(False) else: storage_map_item.append(None) storage_map_list.append(storage_map_item) from operator import itemgetter storage_map_list.sort(key=itemgetter(3), reverse=True) for item in storage_map_list: if item[3] is None: continue detailed_err_msg += " - " + item[0] + ", " if item[4] is True: detailed_err_msg += "Shared Input, " elif item[4] is False: detailed_err_msg += "Input, " if item[1] is not None: detailed_err_msg += "Shape: %s, " % str(item[1]) detailed_err_msg += "ElemSize: %s Byte(s)" % item[2] if item[3] is not None: detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3] else: detailed_err_msg += "\n" detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % ( total_size, total_size / 1024. / 1024 / 1024) detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f BG\n" % ( total_size_inputs, total_size_inputs / 1024. / 1024 / 1024) else: hints.append( "HINT: Use the Theano flag 'exception_verbosity=high'" " for a debugprint and storage map footprint of this apply node.") exc_value = exc_type(str(exc_value) + detailed_err_msg + '\n' + '\n'.join(hints)) reraise(exc_type, exc_value, exc_trace)
def __import__(self, apply_node, check=True, reason=None): """ Given an apply_node, recursively search from this node to know graph, and then add all unknown variables and apply_nodes to this graph. """ node = apply_node # We import the nodes in topological order. We only are interested # in new nodes, so we use all variables we know of as if they were the input set. # (the functions in the graph module only use the input set to # know where to stop going down) new_nodes = graph.io_toposort(self.variables, apply_node.outputs) if check: for node in new_nodes: if hasattr(node, 'fgraph') and node.fgraph is not self: raise Exception("%s is already owned by another fgraph" % node) for r in node.inputs: if hasattr(r, 'fgraph') and r.fgraph is not self: raise Exception("%s is already owned by another fgraph" % r) if (r.owner is None and not isinstance(r, graph.Constant) and r not in self.inputs): # Verbose error message # Show a complete chain of variables from the missing input to an output if config.exception_verbosity == 'high': def find_path_to(output_var, input_var): """ Returns a list of each variable on a (not necessarily unique) path from input_var to output_var, where each variable in the list has the preceding variable as one of its inputs. Returns None if no path exists. """ # If output and input are the same we have a singleton path if output_var is input_var: return [output_var] # If output has no inputs then there is no path owner = output_var.owner if owner is None: return None # If input_var is an input to the output node, there is a # simple two element path inputs = owner.inputs if input_var in inputs: return [input_var, output_var] # Otherwise we must recurse by searching for a path to one # of our inputs, then appending the output to that path for ipt in inputs: path = find_path_to(ipt, input_var) if path is not None: path.append(output_var) return path # Since none of the above methods returned a path, there is none return None # Try different outputs until we find one that has a path to the missing input for output in self.outputs: path = find_path_to(output, r) if path is not None: break # if there is no path then r isn't really a graph input so we shouldn't be running error # handler code in the first place assert path is not None tr = getattr(r.tag, 'trace', []) detailed_err_msg = "" if type(tr) is list and len(tr) > 0: detailed_err_msg += "\nBacktrace when the variable is created:\n" # Print separate message for each element in # the list of batcktraces sio = StringIO() for subtr in tr: traceback.print_list(subtr, sio) detailed_err_msg += str(sio.getvalue()) raise MissingInputError( 'A variable that is an input to the graph was ' 'neither provided as an input to the function ' 'nor given a value. A chain of variables ' 'leading from this input to an output is %s. ' 'This chain may not be unique' % str(path) + detailed_err_msg) # Standard error message raise MissingInputError(( "An input of the graph, used to compute %s, " "was not provided and not given a value." "Use the Theano flag exception_verbosity='high'," "for more information on this error." % str(node)), r) for node in new_nodes: assert node not in self.apply_nodes self.__setup_node__(node) self.apply_nodes.add(node) for output in node.outputs: self.__setup_r__(output) self.variables.add(output) for i, input in enumerate(node.inputs): if input not in self.variables: self.__setup_r__(input) self.variables.add(input) self.__add_clients__(input, [(node, i)]) assert node.fgraph is self self.execute_callbacks('on_import', node, reason)
def test_print_list(): out = StringIO() print_list(extract_tb(_tb()), file=out) eq_(out.getvalue(), u' bbedit +21 traceback/tests.py # _triple\n one()\n bbedit +11 traceback/tests.py # one\n two()\n bbedit +10 traceback/tests.py # two\n h[1]\n')
def raise_with_op(node, thunk=None, exc_info=None): """ Re-raise an exception while annotating the exception object with debug info. Parameters ---------- node : Apply node The Apply node object that resulted in the raised exception. exc_info : tuple, optional A tuple containing the exception type, exception object and associated traceback, as would be returned by a call to `sys.exc_info()` (which is done if `None` is passed). Notes ----- This re-raises the exception described by `exc_info` (or the last one raised, if `exc_info` is omitted) and annotates the exception object with several new members which may be helpful for debugging Theano graphs. They are: * __op_instance__: The Op that is responsible for the exception being raised. * __thunk_trace__: A traceback corresponding to the code that actually generated the exception, if it is available. * __applynode_index__: The index of the Apply node corresponding to this op in `op.fgraph.toposort()`. The exception is not annotated if it is of type `KeyboardInterrupt`. """ if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_trace = exc_info if exc_type == KeyboardInterrupt: # print a simple traceback from KeyboardInterrupt raise exc_type, exc_value, exc_trace try: trace = node.tag.trace except AttributeError: try: trace = node.op.tag.trace except AttributeError: trace = () exc_value.__thunk_trace__ = trace exc_value.__op_instance__ = node if node in node.fgraph.toposort(): exc_value.__applynode_index__ = node.fgraph.toposort().index(node) else: exc_value.__applynode_index__ = None # nose and unittest catch the exception and do not run th thunk_hook # so it can be useful to just blurt out errors right here if raise_with_op.print_thunk_trace: log_thunk_trace(exc_value) hints = [] detailed_err_msg = "\nApply node that caused the error: " + str(node) types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs] detailed_err_msg += "\nInputs types: %s\n" % types if thunk is not None: if hasattr(thunk, 'inputs'): shapes = [getattr(ipt[0], 'shape', 'No shapes') for ipt in thunk.inputs] strides = [getattr(ipt[0], 'strides', 'No strides') for ipt in thunk.inputs] scalar_values = [] for ipt in thunk.inputs: if getattr(ipt[0], "size", -1) == 1: scalar_values.append(ipt[0]) else: scalar_values.append("not scalar") else: shapes = "The thunk don't have an inputs attributes." strides = "So we can't access the strides of inputs values" scalar_values = "And can't print its inputs scalar value" detailed_err_msg += ("Inputs shapes: %s" % shapes + "\nInputs strides: %s" % strides + "\nInputs scalar values: %s\n" % scalar_values) else: hints.append( "HINT: Use another linker then the c linker to" " have the inputs shapes and strides printed.") # Print node backtrace tr = getattr(node.tag, 'trace', None) if tr: sio = StringIO.StringIO() traceback.print_list(tr, sio) tr = sio.getvalue() detailed_err_msg += "\nBacktrace when the node is created:" detailed_err_msg += str(tr) else: hints.append( "HINT: Re-running with most Theano optimization disabled could" " give you a back-traces when this node was created. This can" " be done with by setting the Theano flags" " optimizer=fast_compile") if theano.config.exception_verbosity == 'high': f = StringIO.StringIO() theano.printing.debugprint(node, file=f, stop_on_name=True, print_type=True) detailed_err_msg += "\nDebugprint of the apply node: \n" detailed_err_msg += f.getvalue() else: hints.append( "HINT: Use the Theano flag 'exception_verbosity=high'" " for a debugprint of this apply node.") exc_value = exc_type(str(exc_value) + detailed_err_msg + '\n' + '\n'.join(hints)) raise exc_type, exc_value, exc_trace