def _handle_failure( exc, exc_info ): self.applet.busy = False self.applet.appletStateUpdateRequested.emit() self.applet.progressSignal.emit(100) traceback.print_exception(*exc_info) sys.stderr.write("Exception raised during tracking. See traceback above.\n") self._drawer.TrackButton.setEnabled(True)
def main(): shell = Shell() try: retval = shell.main(sys.argv) except KeyboardInterrupt: sys.exit(1) except SystemExit: raise except: skip_it = False exc_info = sys.exc_info() if hasattr(exc_info[0], "__name__"): exc_class, exc, tb = exc_info if isinstance(exc, IOError) and exc.args[0] == 32: # Skip 'IOError: [Errno 32] Broken pipe'. skip_it = True if not skip_it: tb_path, tb_lineno, tb_func = traceback.extract_tb(tb)[-1][:3] shell.log.error("%s (%s:%s in %s)", exc_info[1], tb_path, tb_lineno, tb_func) else: # string exception shell.log.error(exc_info[0]) if not skip_it: if shell.log.isEnabledFor(logging.DEBUG) or shell.traceback: print() traceback.print_exception(*exc_info) sys.exit(1) else: sys.exit(retval)
def check_func(func, tests, comp = lambda x, y: x == y, in_print = repr, out_print = repr): """Test FUNC according to sequence TESTS. Each item in TESTS consists of (I, V, D=None), where I is a tuple of inputs to FUNC (if not a tuple, (I,) is substituted) and V is the proper output according to comparison COMP. Prints erroneous cases. In case of error, uses D as the test description, or constructs a description from I and V otherwise. Returns 0 for all correct, or the number of tests failed.""" code = 0 for input, output, *desc in tests: try: val = test_eval(func, input) except: fail_msg = "Function {0} failed".format(func.__name__) if desc: print(fail_msg, desc[0]) else: print(fail_msg, "with input", in_print(input)) traceback.print_exception(*sys.exc_info(), limit=2) code += 1 continue if not comp(val, output): wrong_msg = "Wrong result from {0}:".format(func.__name__) if desc: print(wrong_msg, desc[0]) else: print(wrong_msg, "input", in_print(input)) print(" returned", val, "not", out_print(output)) code += 1 return code
def formatResult(self, seekable, le, data, sw, sm): if seekable: # when le = 0 then we want to have 0x9000. here we only have the # effective le, which is either MAX_EXTENDED_LE or MAX_SHORT_LE, # depending on the APDU. Note that the following distinguisher has # one false positive if le > len(data) and le != MAX_EXTENDED_LE and le != MAX_SHORT_LE: sw = SW["WARN_EOFBEFORENEREAD"] if le != None: result = data[:le] else: result = data[:0] if sm: try: sw, result = self.SAM.protect_result(sw, result) except SwError as e: logging.info(e.message) import traceback traceback.print_exception(*sys.exc_info()) sw = e.sw result = "" answer = self.formatResult(False, 0, result, sw, False) return R_APDU(result, inttostring(sw)).render()
def _handle_thread_exception(request, exc_info): """Default exception handler callback function. This just prints the exception info via ``traceback.print_exception``. """ traceback.print_exception(*exc_info)
def printExceptionDetailsToStdErr(): """ No idea if all of this is needed, infact I know it is not. But for now why not. Taken straight from the python manual on Exceptions. """ import sys, traceback exc_type, exc_value, exc_traceback = sys.exc_info() print2err("*** print_tb:") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print2err("*** print_exception:") traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) print2err("*** print_exc:") traceback.print_exc() print2err("*** format_exc, first and last line:") formatted_lines = traceback.format_exc().splitlines() print2err(str(formatted_lines[0])) print2err((formatted_lines[-1])) print2err("*** format_exception:") print2err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback))) print2err("*** extract_tb:") print2err(repr(traceback.extract_tb(exc_traceback))) print2err("*** format_tb:") print2err(repr(traceback.format_tb(exc_traceback))) print2err("*** tb_lineno:" + str(exc_traceback.tb_lineno))
def periodic_tasks(): '''run periodic checks''' if mpstate.status.setup_mode: return if mpstate.settings.heartbeat != 0: heartbeat_period.frequency = mpstate.settings.heartbeat if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0: mpstate.status.counters['MasterOut'] += 1 for master in mpstate.mav_master: send_heartbeat(master) if heartbeat_check_period.trigger(): check_link_status() set_stream_rates() if battery_period.trigger(): battery_report() # call optional module idle tasks. These are called at several hundred Hz for m in mpstate.modules: if hasattr(m, 'idle_task'): try: m.idle_task() except Exception, msg: if mpstate.settings.moddebug == 1: print(msg) elif mpstate.settings.moddebug > 1: import traceback exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
def execute (self, cmd): """ Execute a given command """ sys.stdout, self.stdout = self.stdout, sys.stdout sys.stderr, self.stderr = self.stderr, sys.stderr sys.stdin, self.stdin = self.stdin, sys.stdin sys.settrace (self.idle) try: try: r = eval (cmd, self.namespace, self.namespace) if r is not None: print(r) except SyntaxError: exec cmd in self.namespace except: if hasattr (sys, 'last_type') and sys.last_type == SystemExit: self.quit_handler() else: try: info = sys.exc_info() tb = info[2] if tb: tb = tb.tb_next traceback.print_exception (info[0], info[1], tb) except: sys.stderr, self.stderr = self.stderr, sys.stderr traceback.print_exc() sys.settrace (None) sys.stdout, self.stdout = self.stdout, sys.stdout sys.stderr, self.stderr = self.stderr, sys.stderr sys.stdin, self.stdin = self.stdin, sys.stdin
def __call__(self, environ, start_response): try: return self.app(environ, start_response) except: traceback.print_exception(*sys.exc_info()) pdb.post_mortem(sys.exc_info()[2]) raise
def get_report(self, e): e = self.get_exception(e) s = "".join(traceback.format_exception(type(e), e, e.__traceback__)) with captured_output("stderr") as sio: traceback.print_exception(type(e), e, e.__traceback__) self.assertEqual(sio.getvalue(), s) return s
def cleanMesh(mesh, connectivityFilter=False): try: t = time.clock() connect = vtk.vtkPolyDataConnectivityFilter() clean = vtk.vtkCleanPolyData() if (connectivityFilter): if vtk.vtkVersion.GetVTKMajorVersion() >= 6: connect.SetInputData( mesh ) else: connect.SetInput( mesh ) connect.SetExtractionModeToLargestRegion() clean.SetInputConnection( connect.GetOutputPort() ) else: if vtk.vtkVersion.GetVTKMajorVersion() >= 6: clean.SetInputData( mesh ) else: clean.SetInput( mesh ) clean.Update() print ("Surface cleaned") m2 = clean.GetOutput() print " ", m2.GetNumberOfPolys(), "polygons" elapsedTime(t) clean = None connect = None return m2 except: print "Surface cleaning failed" exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None
def _parseStatement( self, s ): statement_re = re.compile( '(.*)=(.*)' ) value_list_re = re.compile( '([^,]*),?' ) if ( not statement_re.match( s ) ): print 'syntax error (statement match): %s' % repr( s ) return statement_split = statement_re.split( s ) if ( len( statement_split ) != 4 ): print 'syntax error (statement split): %s' % repr( s ) return ( foo, name, value, bar ) = statement_split value_split = value_list_re.split( value ) if ( len( value_split ) < 2 or len( value_split ) % 2 != 1 ): print 'syntax error (value split): %s' % ( repr( value_split ) ) return try: value_array = [] value_split.reverse() value_split.pop() while ( len( value_split ) != 0 ): value_array.append( value_split.pop() ) value_split.pop() except: print traceback.print_exception( sys.exc_type, sys.exc_value, sys.exc_traceback ) print 'syntax error (value to array): %s' % ( repr( value_split ) ) return return ( name, value_array )
def process_feed_wrapper(self, feed): """ wrapper for ProcessFeed """ start_time = datetime.datetime.now() try: pfeed = ProcessFeed(feed, self.options) ret_feed, ret_entries = pfeed.process() del pfeed except: (etype, eobj, etb) = sys.exc_info() print '[%d] ! -------------------------' % (feed.id,) print traceback.format_exception(etype, eobj, etb) traceback.print_exception(etype, eobj, etb) print '[%d] ! -------------------------' % (feed.id,) ret_feed = FEED_ERREXC ret_entries = {} delta = datetime.datetime.now() - start_time if delta.seconds > SLOWFEED_WARNING: comment = u' (SLOW FEED!)' else: comment = u'' prints(u'[%d] Processed %s in %s [%s] [%s]%s' % ( feed.id, feed.feed_url, unicode(delta), self.feed_trans[ret_feed], u' '.join(u'%s=%d' % (self.entry_trans[key], ret_entries[key]) for key in self.entry_keys), comment)) self.feed_stats[ret_feed] += 1 for key, val in ret_entries.items(): self.entry_stats[key] += val return ret_feed, ret_entries
def Try(method, *values): try: return method(*values) except Exception as e: from .. import console traceback.print_exception(*sys.exc_info()) console.pause()
def readCamera(self): cap = cv2.VideoCapture(0) frame = None success = False if not cap.isOpened(): print("Failed to open camera!") return while True: try: success, frame = cap.read() if not success: print("cap.read() failed") yield trollius.From(trollius.sleep(1.0/self.fps)) continue self.broadcast(frame) if self.hasFrame: self.hasFrame(frame) except KeyboardInterrupt: self.loop.stop() except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) yield trollius.From(trollius.sleep(1.0/self.fps)) cap.release()
def __exit__(self, type, value, tb): if type is None: try: self.gen.next() except StopIteration: return else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = type() try: try: self.gen.next() except StopIteration: import traceback traceback.print_exception(type, value, tb) raise value except StopIteration as exc: # Suppress the exception *unless* it's the same exception that # was passed to throw(). This prevents a StopIteration # raised inside the "with" statement from being suppressed return exc is not value
def DbgTry(method, *values): try: return method(*values) except Exception as e: from . import breakpoint traceback.print_exception(*sys.exc_info()) bp()
def default_excepthook(loop, exc_type, exc_value, exc_traceback): # pragma: no cover """ Default excepthook. Prints a traceback and stops the event loop to prevent deadlocks and livelocks. :param loop: event loop the callback belongs to :param exc_type: exception class of the thrown exception :param exc_value: exception instance of the thrown exception :param exc_traceback: traceback to the stack frame where the exception occoured :type loop: uv.Loop :type exc_type: Subclass[Exception] :type exc_value: Exception :type exc_traceback: traceback """ print('Exception happened during callback execution!', file=sys.stderr) traceback.print_exception(exc_type, exc_value, exc_traceback) loop.stop()
def end(self): """Clean up the associated objects This coroutine calls :meth:`src.wsclass.WSClass.end` for all objects in ``self.ws_objects`` and it removes ``self`` from ``self.__class__.clients``. This coroutine is setup to be called when the WebSocket connection closes or when the program ends. """ try: exceptions = [] for ws_object in self.ws_objects.values(): try: yield ws_object.end() except: exceptions.append( exc_info() ) for exception in exceptions: print_exception(*exception) self.__class__.clients.discard(self) msg.code_debug( msg.join_path( __name__, self.end.__qualname__), 'Connection closed! {0} ' '({0.request.remote_ip})'.format(self) ) except: raise
def formatException(self, ei): tb = ei[2] while 1: if not tb.tb_next: break tb = tb.tb_next stack = [] f = tb.tb_frame while f: stack.append(f) f = f.f_back stack.reverse() sio = cStringIO.StringIO() traceback.print_exception(ei[0], ei[1], ei[2], None, sio) for frame in stack: print >> sio print >> sio, "Frame %s in %s at line %s" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno) for key, value in frame.f_locals.items(): print >> sio, "\t%20s = " % key, try: print >> sio, repr(value) except: print >> sio, "<ERROR WHILE PRINTING VALUE>" s = sio.getvalue() sio.close() if s[-1:] == "\n": s = s[:-1] return s
def post_mortem_excepthook(type, value, tb): """ For post mortem exception handling, print a banner and enable post mortem debugging. """ clear_post_mortem() if IS_IPYTHON: from IPython.core.getipython import get_ipython ipython_shell = get_ipython() ipython_shell.showtraceback((type, value, tb)) p = pdb.Pdb(ipython_shell.colors) else: traceback.print_exception(type, value, tb, file=sys.stderr) p = pdb.Pdb() if not type == SyntaxError: # wait for stderr to print (stderr.flush does not work in this case) time.sleep(0.1) _print('*' * 40) _print('Entering post mortem debugging...') _print('*' * 40) # add ability to move between frames p.send_initial_notification = False p.reset() frame = tb.tb_frame prev = frame while frame.f_back: prev = frame frame = frame.f_back frame = prev # wait for stdout to print time.sleep(0.1) p.interaction(frame, tb)
def handle(self, csvpath, *args, **options): loader = ContributionLoader( source=options.get('source'), description='load from denormalized CSVs', imported_by="loadcontributions.py (%s)" % os.getenv('LOGNAME', 'unknown'), ) try: input_iterator = VerifiedCSVSource(open(os.path.abspath(csvpath)), FIELDNAMES, skiprows=1 + int(options['skip'])) output_func = chain_filters( LoaderEmitter(loader), #Every(self.COMMIT_FREQUENCY, lambda i: transaction.commit()), Every(self.COMMIT_FREQUENCY, progress_tick)) record_processor = self.get_record_processor(loader.import_session) load_data(input_iterator, record_processor, output_func) transaction.commit() except KeyboardInterrupt: traceback.print_exception(*sys.exc_info()) transaction.rollback() raise except: traceback.print_exception(*sys.exc_info()) transaction.rollback() raise finally: sys.stdout.flush() sys.stderr.flush()
def _load_neo_file(cls, filename, lazy): """ Returns a NEO io object and a list of contained blocks for a file name. This function also caches all loaded blocks :param str filename: The full path of the file (relative or absolute). :param bool lazy: Determines if lazy mode is used for NEO io. """ if os.path.isdir(filename): for io in neo.io.iolist: if io.mode == 'dir': try: n_io = io(filename) block = n_io.read(lazy=lazy) if io == neo.TdtIO and not block.segments: # TdtIO can produce empty blocks for invalid dirs continue cls.block_indices[block] = 0 cls.loaded_blocks[filename] = [block] return n_io, [block] except Exception, e: sys.stderr.write( 'Load error for directory "%s":\n' % filename) tb = sys.exc_info()[2] while not ('self' in tb.tb_frame.f_locals and tb.tb_frame.f_locals['self'] == n_io): if tb.tb_next is not None: tb = tb.tb_next else: break traceback.print_exception(type(e), e, tb)
def _info(type, value, tb): if not _exception_in_progress.acquire(False): # Exceptions have piled up, so we use the default exception # handler for such exceptions _excepthook_save(type, value, tb) return dialog = dialogs.HigDialog(None, gtk.MESSAGE_WARNING, gtk.BUTTONS_NONE, _('A programming error has been detected'), _('It probably is not fatal, but should be reported ' 'to the developers nonetheless.')) #FIXME: add icon to this button RESPONSE_REPORT_BUG = 42 dialog.add_buttons(gtk.STOCK_CLOSE, gtk.BUTTONS_CLOSE, _('_Report Bug'), RESPONSE_REPORT_BUG) dialog.set_default_response(RESPONSE_REPORT_BUG) report_button = dialog.action_area.get_children()[0] # right to left report_button.grab_focus() # Details textview = gtk.TextView() textview.set_editable(False) textview.modify_font(pango.FontDescription('Monospace')) sw = gtk.ScrolledWindow() sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) sw.add(textview) frame = gtk.Frame() frame.set_shadow_type(gtk.SHADOW_IN) frame.add(sw) frame.set_border_width(6) textbuffer = textview.get_buffer() trace = StringIO() traceback.print_exception(type, value, tb, None, trace) textbuffer.set_text(trace.getvalue()) textview.set_size_request( gtk.gdk.screen_width() / 3, gtk.gdk.screen_height() / 4) expander = gtk.Expander(_('Details')) expander.add(frame) dialog.vbox.add(expander) dialog.set_resizable(True) # on expand the details the dialog remains centered on screen dialog.set_position(gtk.WIN_POS_CENTER_ALWAYS) dialog.show_all() close_clicked = False while not close_clicked: resp = dialog.run() if resp == RESPONSE_REPORT_BUG: url = 'http://trac.gajim.org/wiki/WikiStart#howto_report_ticket' helpers.launch_browser_mailer('url', url) else: close_clicked = True dialog.destroy() _exception_in_progress.release()
def __exit__(self, type, exception, trace): if exception is None: # If exception is None, no exception occurred. step_failed = False elif isinstance(exception, StepFailed): step_failed = True print print 'Halting build step because of failure.' print else: step_failed = True print print 'The build step threw an exception...' print traceback.print_exception(type, exception, trace, file=sys.stdout) print if step_failed: self.status.ReportFail(self.name) print '@@@STEP_FAILURE@@@' if self.halt_on_fail: print print 'Entire build halted because %s failed.' % self.name raise StopBuild() else: self.status.ReportPass(self.name) print '@@@STEP_SUCCESS@@@' # Suppress any exception that occurred. return True
def Start(filename): global state global g result='' # read data case line by line from file try: br=open(filename,'r') for re in br: print 'Processing '+re+'...' re=re.strip() calc_next(re) state=0 nfa=parse(re,0,len(re)-1) #calculate closure calc_closure() #test 01 string of length up to 6 for length in range(1,6+1): for num in range(0,(1<<length)): if (test(closure[nfa[0]],nfa[1],0,length,num)): for i in range(length): if ((num&(1<<i))>0): result=result+'1' else: result=result+'0' result=result+"\n" # Close the input stream br.close() except: exc_type, exc_value, exc_traceback = sys.exc_info() print "*** print_exception:" traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout) result=result+'error' return result
def execute(meth, *args, **kwargs): """ Execute *meth* in a Python thread, blocking the current coroutine/ greenthread until the method completes. The primary use case for this is to wrap an object or module that is not amenable to monkeypatching or any of the other tricks that Eventlet uses to achieve cooperative yielding. With tpool, you can force such objects to cooperate with green threads by sticking them in native threads, at the cost of some overhead. """ setup() # if already in tpool, don't recurse into the tpool # also, call functions directly if we're inside an import lock, because # if meth does any importing (sadly common), it will hang my_thread = threading.currentThread() if my_thread in _threads or imp.lock_held() or _nthreads == 0: return meth(*args, **kwargs) e = event.Event() _reqq.put((e, meth, args, kwargs)) rv = e.wait() if isinstance(rv, tuple) \ and len(rv) == 3 \ and isinstance(rv[1], EXC_CLASSES): (c, e, tb) = rv if not QUIET: traceback.print_exception(c, e, tb) traceback.print_stack() six.reraise(c, e, tb) return rv
def _greenlet_report_error(self, exc_info): import sys import traceback exception = exc_info[1] if isinstance(exception, gevent.greenlet.GreenletExit): self._report_result(exception) return exc_handler = False for lnk in self._links: if isinstance(lnk, gevent.greenlet.FailureSpawnedLink): exc_handler = True break if not exc_handler: try: traceback.print_exception(*exc_info) except: pass self._exception = exception if self._links and self._notifier is None: self._notifier = self.parent.loop.run_callback(self._notify_links) ## Only print errors if not exc_handler: info = str(self) + ' failed with ' try: info += self._exception.__class__.__name__ except Exception: info += str(self._exception) or repr(self._exception) sys.stderr.write(info + '\n\n')
def on_crash(self, msg, *fmt, **kwargs): print(msg.format(*fmt), file=sys.stderr) exc_info = sys.exc_info() try: traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, sys.stderr) finally: del (exc_info)
def main(solver=config.user_config.solver): logging.basicConfig(level=logging.CRITICAL, format='%(levelname)s: %(message)s') from minpower import solve dirNm=splitFilename(__file__)[0] if dirNm=='': dirNm='.' excludeL=[] for fileNm in os.listdir(dirNm): if fileNm in excludeL: continue testDir = joindir(dirNm, fileNm) if not os.path.isdir(testDir): continue print 'testing: ',fileNm wipeTestSlate(testDir) fResults=open(joindir(testDir,'results.txt'),'w+') fError=open(joindir(testDir,'error.txt'),'w+') sys.stdout=fResults #switch output to results file if hasPyscript(testDir): sys.stdout = sys.__stdout__ #switch back to standard outputting os.system('python {s}'.format(s=hasPyscript(testDir)[0])) else: try: user_config.scenarios = 2 solve.solve_problem(testDir) sys.stdout = sys.__stdout__ #switch back to standard outputting fError.close() os.remove(joindir(testDir,'error.txt')) except: #write the error to file exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, file=fError ) traceback.print_exception(exc_type, exc_value, exc_traceback, file=fError) sys.stdout = sys.__stdout__ #switch back to standard outputting print '\t had error' #note that this dir produced error else: sys.stdout = sys.__stdout__ #switch back to standard outputting
async def on_command_error(self, context, exception): if isinstance(exception, commands.CommandNotFound): return exception = exception.original traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
def error_handler(type, value, tb): with open('ERRORLOG.TXT', 'w') as errout: traceback.print_exception(type, value, tb, file=errout) traceback.print_exception(type, value, tb, file=sys.stdout)
def format(self, record): try: console_normal = getattr(CONSOLE_COLOR, 'normal') if record.levelname == 'ERROR': console_color = getattr(CONSOLE_COLOR, self.msg_color['err_color']) elif record.levelname == 'CRITICAL': console_color = getattr(CONSOLE_COLOR, self.msg_color['err_color']) elif record.levelname == 'WARNING': console_color = getattr(CONSOLE_COLOR, self.msg_color['warn_color']) elif record.levelname == 'DEBUG': console_color = getattr(CONSOLE_COLOR, self.msg_color['dbg_color']) elif record.levelname == 'INFO': console_color = getattr(CONSOLE_COLOR, self.msg_color['info_color']) else: console_color = console_normal except AttributeError: color_keys = list(CONSOLE_COLOR.__dict__.keys()) color_keys.remove('__doc__') color_keys.remove('__weakref__') color_keys.remove('__module__') color_keys.remove('__dict__') t, v, tb = sys.exc_info() sys.stderr.write('\n--- Logging error ---\n') traceback.print_exception(t, v, tb, None, sys.stderr) sys.stderr.write('\n--- Logging error ---\n') sys.stderr.write('Does not support color error, choose one from list:'+\ '\n'+str(color_keys)+'\nEdit class CONSOLE to add more.\n') sys.exit() record.message = record.getMessage() record.message = record.message.replace(self.Keyword_tag_start, '') record.message = record.message.replace(self.Keyword_tag_end, '') if record.levelno > 20: record.message = console_color + record.message + console_normal if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) #s = self.formatMessage(record) s = self._fmt % record.__dict__ if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) ''' if record.exc_text: if s[-1:] != "\n": s = s + "\n" s = s + record.exc_text if record.stack_info: if s[-1:] != "\n": s = s + "\n" s = s + self.formatStack(record.stack_info) ''' if record.exc_text: if s[-1:] != "\n": s = s + "\n" try: s = s + record.exc_text except UnicodeError: # Sometimes filenames have non-ASCII chars, which can lead # to errors when s is Unicode and record.exc_text is str # See issue 8924. # We also use replace for when there are multiple # encodings, e.g. UTF-8 for the filesystem and latin-1 # for a script. See issue 13232. s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') return s
def excepthook(self, etype, evalue, tb): # write uncaught traceback to 'real' stderr, not zmq-forwarder traceback.print_exception(etype, evalue, tb, file=sys.__stderr__)
def new_excepthook(type, value, tb): # By default, Qt does not output any errors, this prevents that traceback.print_exception(type, value, tb)
def test_blast_output(outfile): # Try to auto-detect the format if 1: print("No parser specified. I'll try to choose one for you based") print("on the format of the output file.") print("") parser_class = choose_parser(outfile) print("It looks like you have given output that should be parsed") print("with %s.%s. If I'm wrong, you can select the correct parser" % (parser_class.__module__, parser_class.__name__)) print("on the command line of this script (NOT IMPLEMENTED YET).") else: raise NotImplementedError("Biopython no longer has an HTML BLAST parser.") print("") scanner_class = parser_class()._scanner.__class__ consumer_class = parser_class()._consumer.__class__ # parser_class()._scanner.feed( # open(outfile), ParserSupport.TaggingConsumer()) print("I'm going to run the data through the parser to see what happens...") parser = parser_class() try: rec = parser.parse_file(outfile) except (KeyboardInterrupt, SystemExit): raise except Exception as x: exception_info = str(x) print("Dang, the parsing failed.") else: print("Parsing succeeded, no problems detected.") print("However, you should check to make sure the following scanner") print("trace looks reasonable.") print("") parser_class()._scanner.feed( open(outfile), ParserSupport.TaggingConsumer()) return 0 print("") print("Alright. Let me try and figure out where in the parser the") print("problem occurred...") etype, value, tb = sys.exc_info() ftb = traceback.extract_tb(tb) ftb.reverse() class_found = None for err_file, err_line, err_function, err_text in ftb: if hasattr(consumer_class, err_function): class_found = consumer_class break elif hasattr(scanner_class, err_function): class_found = scanner_class break if class_found is None: print("Sorry, I could not pinpoint the error to the parser.") print("There's nothing more I can tell you.") print("Here's the traceback:") traceback.print_exception(etype, value, tb) return 1 else: print("I found the problem in %s.%s.%s, line %d:" % (class_found.__module__, class_found.__name__, err_function, err_line)) print(" %s" % err_text) print("This output caused an %s to be raised with the" % etype) print("information %r." % exception_info) print("") print("Let me find the line in the file that triggers the problem...") parser = parser_class() scanner, consumer = parser._scanner, parser._consumer consumer = DebuggingConsumer(consumer) try: scanner.feed(open(outfile), consumer) except etype as x: pass else: print("Odd, the exception disappeared! What happened?") return 3 print("It's caused by line %d:" % consumer.linenum) lines = open(outfile).readlines() start, end = consumer.linenum - CONTEXT, consumer.linenum + CONTEXT + 1 if start < 0: start = 0 if end > len(lines): end = len(lines) ndigits = len(str(end)) for linenum in range(start, end): line = chomp(lines[linenum]) if linenum == consumer.linenum: prefix = '*' else: prefix = ' ' s = "%s%*d %s" % (prefix, ndigits, linenum, line) s = s[:80] print(s) print("") if class_found == scanner_class: print("Problems in %s are most likely caused by changed formats." % class_found.__name__) print("You can start to fix this by going to line %d in module %s." % (err_line, class_found.__module__)) print("Perhaps the scanner needs to be made more lenient by accepting") print("the changed format?") print("") if VERBOSITY <= 0: print("For more help, you can run this script in verbose mode") print("to see detailed information about how the scanner") print("identifies each line.") else: print("OK, let's see what the scanner's doing!") print("") print("*" * 20 + " BEGIN SCANNER TRACE " + "*" * 20) try: parser_class()._scanner.feed( open(outfile), ParserSupport.TaggingConsumer()) except etype as x: pass print("*" * 20 + " END SCANNER TRACE " + "*" * 20) print("") elif class_found == consumer_class: print("Problems in %s can be caused by two things:" % class_found.__name__) print(" - The format of the line parsed by '%s' changed." % err_function) print(" - The scanner misidentified the line.") print("Check to make sure '%s' should parse the line:" % err_function) s = " %s" % chomp(lines[consumer.linenum]) s = s[:80] print(s) print("If so, debug %s.%s. Otherwise, debug %s." % (class_found.__name__, err_function, scanner_class.__name__))
def on_error(self, exc_info): if not isinstance(exc_info[1], UserCancelled): traceback.print_exception(*exc_info) self.show_error(str(exc_info[1]))
def plot( data=[], files=[], plot_params=plot_parameters(), combine=False, display=False, output_dir=".", echo_dart=None, ): """ A function to plot JSON data Args: - data (list): - list of "plot_data" objects - should contain their own plot_parameters object - files (list): - list of JSON files - "plot_params" argument object will be applied to these files - combine (bool): - if specified, the plot_data objects from "data" and "files" will be combined into one "plot_data" object - the plot_params object will be used for this """ try: # try here in case running as main on C++ output import timemory.options as options if echo_dart is None and options.echo_dart: echo_dart = True elif echo_dart is None: echo_dart = False except ImportError: pass if len(files) > 0: for filename in files: # print('Reading {}...'.format(filename)) f = open(filename, "r") _data = read(json.load(f)) f.close() _data.filename = filename _data.title = filename data.append(_data) data_sum = None if combine: for _data in data: if data_sum is None: data_sum = _data else: data_sum += _data data_sum.update_parameters(plot_params) data = [data_sum] for _data in data: try: # print('Plotting {}...'.format(_data.filename)) plot_all(_data, display, output_dir, echo_dart) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=5 ) print("Exception - {}".format(e)) print('Error! Unable to plot "{}"...'.format(_data.filename))
def __init__(self, tools_dir, keycfg): try: if keycfg == "": keycfg = os.path.join(tools_dir, "key_config.xml") print("INFO: --keys option not given. ") print("INFO: Continuing with default configuration file: " + keycfg) config_file = os.path.join(tools_dir, "ssd_bin.cfg") # temporary directories tdir = tempfile.mkdtemp() self.TEMP_F_DIR = tdir # temporary files self.IV_BIN_FNAME = os.path.join(tdir, "iv.bin") self.IEK_BIN_FNAME = os.path.join(tdir, "iek.bin") self.SSD_MD_TO_SIGN_FNAME = os.path.join(tdir, "to_sign.xml") self.config_p.readfp(open(config_file)) # Information in the config file is needed for generating the MD self.parse_config_file() if (self.IEK_ENC_ALGO == "RSA-2048"): self.dvc_key_fn = key_config_parser.get_rsa_pub_dvc_key(keycfg) self.dvc_key_id = key_config_parser.get_rsa_dvc_key_id_buf( keycfg) elif (self.IEK_ENC_ALGO == "AES-128"): self.dvc_key_fn = key_config_parser.get_aes_dvc_key(keycfg) self.dvc_key_id = key_config_parser.get_aes_dvc_key_id_buf( keycfg) else: print("Error: Unsupported IEK_ENC_ALGO from config") exit(1) self.oem_key_fn = key_config_parser.get_rsa_pri_oem_key(keycfg) self.oem_key_id = key_config_parser.get_rsa_oem_key_id_buf(keycfg) if (self.dvc_key_fn == '' or self.dvc_key_id == '' or self.oem_key_fn == '' or self.oem_key_id == ''): print("Error: Key config not correct") exit(1) # Remove previous temporary directory if True == os.path.exists(tdir) and True == os.path.isdir(tdir): shutil.rmtree(tdir) # Create temp directory for storing all temp files os.makedirs(tdir) # Initialization for encrypting&signing self.init_enc() self.init_sign('', '') except: print "Failed during init" exc_type, exc_value, exc_traceback = sys.exc_info() print "*** print_tb:" traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print "*** print_exception:" traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) sys.exit(2)
def print_exception(exc, file=stderr): traceback.print_exception(type(exc), exc, exc.__traceback__, file=file)
owner_info = ['--organization', 'conda-forge'] print('Calculating the recipes which need to be turned into feedstocks.') with tmp_dir('__feedstocks') as feedstocks_dir: feedstock_dirs = [] for recipe_dir, name in list_recipes(): feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock') print('Making feedstock for {}'.format(name)) try: subprocess.check_call([ 'conda', 'smithy', 'init', recipe_dir, '--feedstock-directory', feedstock_dir ]) except subprocess.CalledProcessError: traceback.print_exception(*sys.exc_info()) continue if not is_merged_pr: # We just want to check that conda-smithy is doing its thing without having any metadata issues. continue feedstock_dirs.append([feedstock_dir, name, recipe_dir]) subprocess.check_call([ 'git', 'remote', 'add', 'upstream_with_token', 'https://*****:*****@github.com/conda-forge/{}-feedstock' .format(os.environ['GH_TOKEN'], name) ], cwd=feedstock_dir) print_rate_limiting_info(gh_drone)
def plot_maximums( output_name, title, data, plot_params=plot_parameters(), display=False, output_dir=".", echo_dart=None, ): """ A function to plot JSON data Args: - data (list): - list of "plot_data" objects - should contain their own plot_parameters object - files (list): - list of JSON files - "plot_params" argument object will be applied to these files - combine (bool): - if specified, the plot_data objects from "data" and "files" will be combined into one "plot_data" object - the plot_params object will be used for this """ try: # try here in case running as main on C++ output import timemory.options as options if echo_dart is None and options.echo_dart: echo_dart = True elif echo_dart is None: echo_dart = False except ImportError: pass _combined = None for _data in data: if _combined is None: _combined = plot_data( filename=output_name, output_name=output_name, concurrency=_data.concurrency, mpi_size=_data.mpi_size, timemory_functions={}, title=title, plot_params=plot_params, ) _key = list(_data.timemory_functions.keys())[0] _obj = _data.timemory_functions[_key] _obj_name = "{}".format(_data.filename) _obj.func = _obj_name _combined.timemory_functions[_obj_name] = _obj try: # print('Plotting {}...'.format(_combined.filename)) plot_all(_combined, display, output_dir, echo_dart) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=5) print("Exception - {}".format(e)) print('Error! Unable to plot "{}"...'.format(_combined.filename))
in_location = '/home/user/basic_website/test_data/python_errors.json' my_data = None with open(in_location, 'r') as infile: my_data = json.load(infile) for data in my_data: scanner.append(data["error"]) try: parser.parse() except Exception as e: print("Exited badly with exception: {}".format(e)) print("Trying to get full error") exc_info = sys.exc_info() traceback.print_exception(*exc_info) del exc_info tokens = parser.get_tokens() print("****** Got tokens ******") for token in tokens: print(token) print("************************") scanner.exit_status() parser.exit_status() parser.print_unparsed_in_context() scanner.clear_stats() parser.clear_stats() parser.clear_tokens() input("Press enter to parse next error")
def _log_exception(*exc_info): thread_name = threading.current_thread().name logger.error("Unhandled exception in thread: '{}'".format(thread_name)) sio = StringIO.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], file=sio) logger.error(sio.getvalue())
def print_exception(): exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3, file=sys.stdout)
class WSGIApplication(object): def __init__(self, document_root=None, **kw): ''' Init a WSGIApplication. Args: document_root: document root path. ''' self._running = False self._document_root = document_root self._interceptors = [] self._template_engine = None self._get_static = {} self._post_static = {} self._get_dynamic = [] self._post_dynamic = [] def _check_not_running(self): if self._running: raise RuntimeError('Cannot modify WSGIApplication when running.') @property def template_engine(self): return self._template_engine @template_engine.setter def template_engine(self, engine): self._check_not_running() self._template_engine = engine def add_module(self, mod): self._check_not_running() m = mod if type(mod)==types.ModuleType else _load_module(mod) logging.info('Add module: %s' % m.__name__) for name in dir(m): fn = getattr(m, name) if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'): self.add_url(fn) def add_url(self, func): self._check_not_running() route = Route(func) if route.is_static: if route.method=='GET': self._get_static[route.path] = route if route.method=='POST': self._post_static[route.path] = route else: if route.method=='GET': self._get_dynamic.append(route) if route.method=='POST': self._post_dynamic.append(route) logging.info('Add route: %s' % str(route)) def add_interceptor(self, func): self._check_not_running() self._interceptors.append(func) logging.info('Add interceptor: %s' % str(func)) def run(self, port=9000, host='127.0.0.1'): from wsgiref.simple_server import make_server logging.info('application (%s) will start at %s:%s...' % (self._document_root, host, port)) server = make_server(host, port, self.get_wsgi_application(debug=True)) server.serve_forever() def get_wsgi_application(self, debug=False): self._check_not_running() if debug: self._get_dynamic.append(StaticFileRoute()) self._running = True _application = Dict(document_root=self._document_root) def fn_route(): request_method = ctx.request.request_method path_info = ctx.request.path_info if request_method=='GET': fn = self._get_static.get(path_info, None) if fn: return fn() for fn in self._get_dynamic: args = fn.match(path_info) if args: return fn(*args) raise notfound() if request_method=='POST': fn = self._post_static.get(path_info, None) if fn: return fn() for fn in self._post_dynamic: args = fn.match(path_info) if args: return fn(*args) raise notfound() raise badrequest() fn_exec = _build_interceptor_chain(fn_route, *self._interceptors) def wsgi(env, start_response): ctx.application = _application ctx.request = Request(env) response = ctx.response = Response() try: r = fn_exec() if isinstance(r, Template): r = self._template_engine(r.template_name, r.model) if isinstance(r, unicode): r = r.encode('utf-8') if r is None: r = [] start_response(response.status, response.headers) return r except RedirectError, e: response.set_header('Location', e.location) start_response(e.status, response.headers) return [] except HttpError, e: start_response(e.status, response.headers) return ['<html><body><h1>', e.status, '</h1></body></html>'] except Exception, e: logging.exception(e) if not debug: start_response('500 Internal Server Error', []) return ['<html><body><h1>500 Internal Server Error</h1></body></html>'] exc_type, exc_value, exc_traceback = sys.exc_info() fp = StringIO() traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp) stacks = fp.getvalue() fp.close() start_response('500 Internal Server Error', []) return [ r'''<html><body><h1>500 Internal Server Error</h1><div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''', stacks.replace('<', '<').replace('>', '>'), '</pre></div></body></html>']
async def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return error = getattr(error, 'original', error) if isinstance(error, (commands.CommandNotFound, utils.SilentError)): return elif isinstance(error, utils.CustomError): return await ctx.send(error.message) elif isinstance(error, commands.DisabledCommand): return await ctx.send(f'`{ctx.command}` has been disabled.') elif isinstance(error, commands.NoPrivateMessage): try: return await ctx.author.send(f'`{ctx.command}` cannot be used in Private Messages.') except: pass elif isinstance(error, commands.NotOwner): return await ctx.send('This command is reserved for bot owner.') elif isinstance(error, (commands.MissingRequiredArgument, commands.TooManyArguments)): return await ctx.send(f'Invalid number of arguments passed. Correct usage:\n`{utils.get_signature(ctx.command)}`') elif isinstance(error, commands.CommandOnCooldown): return await ctx.send(f'Command on cooldown for `{error.retry_after:.1f} seconds`.') elif isinstance(error, (commands.UserInputError, commands.ConversionError)): return await ctx.send(error) elif isinstance(error, (discord.Forbidden, menus.MenuError)): try: return await ctx.send(f'Bot is missing permissions to execute this command :(\n' f'Error: `{error}`') except discord.Forbidden: pass try: embed = discord.Embed(title=f'{type(error).__module__}: {type(error).__qualname__}', description=str(error), color=self.bot.color) if isinstance(ctx.channel, discord.TextChannel): embed.add_field(name='Context', value=f'Guild: {ctx.guild} (`{ctx.guild.id}`)\n' f'Channel: {ctx.channel} (`{ctx.channel.id}`)\n' f'Member: {ctx.author} (`{ctx.author.id}`)', inline=False) elif isinstance(ctx.channel, (discord.DMChannel, discord.GroupChannel)): embed.add_field(name='Context', value=f'DM/Group Channel (`{ctx.channel.id}`)\n' f'User: {ctx.author} (`{ctx.author.id}`)', inline=False) else: embed.add_field(name='Context', value='Unknown.', inline=False) embed.add_field(name='Invocation Text', value=f'```{ctx.message.content}```') await self.bot.errors_channel.send(embed=embed) except: print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
def format_exc(exc_info=None): if exc_info is None: exc_info = sys.exc_info() out = StringIO() traceback.print_exception(*exc_info, **dict(file=out)) return out.getvalue()
def xmlToCommon(path, destPath=None): """Converts an Afterbirth xml to the common format""" xml = ET.parse(path) root = xml.getroot() # can be stage, rooms, etc rooms = root.findall("room") ret = [] for roomNode in rooms: roomXmlProps = dict(roomNode.attrib) rtype = int(roomNode.get("type") or "1") del roomXmlProps["type"] rvariant = int(roomNode.get("variant") or "0") del roomXmlProps["variant"] rsubtype = int(roomNode.get("subtype") or "0") del roomXmlProps["subtype"] difficulty = int(roomNode.get("difficulty") or "0") del roomXmlProps["difficulty"] roomName = roomNode.get("name") or "" del roomXmlProps["name"] rweight = float(roomNode.get("weight") or "1") del roomXmlProps["weight"] shape = int(roomNode.get("shape") or "-1") del roomXmlProps["shape"] if shape == -1: shape = None width = int(roomNode.get("width") or "13") + 2 height = int(roomNode.get("height") or "7") + 2 dims = (width, height) for k, s in Room.Shapes.items(): if s["Dims"] == dims: shape = k break shape = shape or 1 del roomXmlProps["width"] del roomXmlProps["height"] lastTestTime = roomXmlProps.get("lastTestTime", None) if lastTestTime: try: lastTestTime = datetime.datetime.fromisoformat(lastTestTime) del roomXmlProps["lastTestTime"] except: print("Invalid test time string found", lastTestTime) traceback.print_exception(*sys.exc_info()) lastTestTime = None doors = list( map( lambda door: [ int(door.get("x")) + 1, int(door.get("y")) + 1, door.get("exists", "0")[0] in "1tTyY", ], roomNode.findall("door"), )) room = Room(roomName, None, difficulty, rweight, rtype, rvariant, rsubtype, shape, doors) room.xmlProps = roomXmlProps room.lastTestTime = lastTestTime ret.append(room) realWidth = room.info.dims[0] gridLen = room.info.gridLen() for spawn in roomNode.findall("spawn"): ex, ey, stackedEnts = ( int(spawn.get("x")) + 1, int(spawn.get("y")) + 1, spawn.findall("entity"), ) grindex = Room.Info.gridIndex(ex, ey, realWidth) if grindex >= gridLen: print( f"Discarding the current entity stack due to invalid position! {room.getPrefix()}: {ex-1},{ey-1}" ) continue ents = room.gridSpawns[grindex] for ent in stackedEnts: entityXmlProps = dict(ent.attrib) etype, evariant, esubtype, eweight = ( int(ent.get("type")), int(ent.get("variant")), int(ent.get("subtype")), float(ent.get("weight")), ) del entityXmlProps["type"] del entityXmlProps["variant"] del entityXmlProps["subtype"] del entityXmlProps["weight"] ents.append( Entity(ex, ey, etype, evariant, esubtype, eweight, entityXmlProps)) room.gridSpawns = room.gridSpawns fileXmlProps = dict(root.attrib) return File(ret, fileXmlProps)
def bright_side_of_death(): return tuple()[0] try: lumberjack() except IndexError: exc_type, exc_value, exc_traceback = sys.exc_info() print "*** print_tb:" traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print "*** print_exception:" traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) print "*** print_exc:" traceback.print_exc() print "*** format_exc, first and last line:" formatted_lines = traceback.format_exc().splitlines() print formatted_lines[0] print formatted_lines[-1] print "*** format_exception:" print repr(traceback.format_exception(exc_type, exc_value, exc_traceback)) print "*** extract_tb:" print repr(traceback.extract_tb(exc_traceback)) print "*** format_tb:" print repr(traceback.format_tb(exc_traceback)) print "*** tb_lineno:", exc_traceback.tb_lineno
def __exit__(self, *exc_info): if exc_info[0]: import traceback traceback.print_exception(*exc_info) self._sock.close()
def verify_reservation(self, reservation_id): """ データベースから以下の形式で予約情報を取り出し、返り値とする. ----------------------------------------------------------- 予約番号 代表者の名前 代表者のアドレス 代表者の住所 プランの名前 プランの料金 プランの最大人数 ベッドの種類 プランのサービス チェックインの日付 チェックインの時間 チェックアウトの日 ----------------------------------------------------------- @param reservation_id 予約番号.この番号はただひとつしか存在しない. @return data 予約情報をタプルとして返す. """ query = """ select reservation.reservation_id, customer.customer_name, customer.customer_email, customer.customer_address, plan.plan_name, plan.plan_price, plan.plan_persons, plan.plan_beds, plan.plan_service, reservation.checkin_date, reservation.checkin_time, reservation.checkout_date from reservation inner join customer on reservation.customer_id = customer.customer_id inner join plan on reservation.plan_id = plan.plan_id where reservation.reservation_id == :reservation_id """ try: # FIX-ME 相対パスの問題どうにかして!@knskw connect = sqlite3.connect('../res/lovely.db') cursor = connect.execute(query, {'reservation_id': reservation_id, }) data = cursor.fetchone() except sqlite3.Error, e: """ エラー発見のためトレースバックを私用. あとで消す必要あり. """ import traceback as tb import sys import cStringIO error = cStringIO.StringIO() tb.print_exception(type(e), e, sys.exc_info()[2], None, error) connect.rollback() raise e
def show_exception_and_exit(exc_type, exc_value, tb): import traceback traceback.print_exception(exc_type, exc_value, tb) input("Press key to exit.") sys.exit(-1)
def execute(self, msg): def notImplemented(*argz, **args): """ If an application tries to use a function which is not implemented by the currently emulated smartcard we raise an exception which should result in an appropriate response APDU being passed to the application. """ raise SwError(SW["ERR_INSNOTSUPPORTED"]) try: c = C_APDU(msg) except ValueError as e: logging.warning(str(e)) return self.formatResult(False, 0, "", SW["ERR_INCORRECTPARAMETERS"], False) self.logAPDU(parsed=c, unparsed=msg) # Handle Class Byte # {{{ class_byte = c.cla SM_STATUS = None logical_channel = 0 command_chaining = 0 header_authentication = 0 # Ugly Hack for OpenSC-explorer if (class_byte == 0xb0): logging.debug("Open SC APDU") SM_STATUS = "No SM" # If Bit 8,7,6 == 0 then first industry values are used if (class_byte & 0xE0 == 0x00): # Bit 1 and 2 specify the logical channel logical_channel = class_byte & 0x03 # Bit 3 and 4 specify secure messaging secure_messaging = class_byte >> 2 secure_messaging &= 0x03 if (secure_messaging == 0x00): SM_STATUS = "No SM" elif (secure_messaging == 0x01): SM_STATUS = "Proprietary SM" # Not supported ? elif (secure_messaging == 0x02): SM_STATUS = "Standard SM" elif (secure_messaging == 0x03): SM_STATUS = "Standard SM" header_authentication = 1 # If Bit 8,7 == 01 then further industry values are used elif (class_byte & 0x0C == 0x0C): # Bit 1 to 4 specify logical channel. 4 is added, value range is # from four to nineteen logical_channel = class_byte & 0x0f logical_channel += 4 # Bit 6 indicates secure messaging secure_messaging = class_byte >> 6 if (secure_messaging == 0x00): SM_STATUS = "No SM" elif (secure_messaging == 0x01): SM_STATUS = "Standard SM" else: # Bit 8 is set to 1, which is not specified by ISO 7816-4 SM_STATUS = "Proprietary SM" # In both cases Bit 5 specifies command chaining command_chaining = class_byte >> 5 command_chaining &= 0x01 # }}} sm = False try: if SM_STATUS == "Standard SM" or SM_STATUS == "Proprietary SM": c = self.SAM.parse_SM_CAPDU(c, header_authentication) logging.info("Decrypted APDU:\n%s", str(c)) sm = True sw, result = self.ins2handler.get(c.ins, notImplemented)(c.p1, c.p2, c.data) answer = self.formatResult(Iso7816OS.seekable(c.ins), c.effective_Le, result, sw, sm) except SwError as e: logging.info(e.message) import traceback traceback.print_exception(*sys.exc_info()) sw = e.sw result = "" answer = self.formatResult(False, 0, result, sw, sm) return answer
def check_imports(no_check=None, extra=[], skip_func=None): """ tests all bundled modules are importable just add "PEERDIR(library/python/import_test)" to your CMakeLists.txt and "from import_test import test_imports" to your python test source file. """ exceptions = [ '__yt_entry_point__', 'boto.*', 'celery.backends.database.*', # needs optional dependency sqlalchemy 'click._winconsole', 'common.*', # sandbox.common 'dask.*', 'flaky.flaky_pytest_plugin', 'flask.ext.__init__', 'future.backports.email.policy', # email backport is incomplete in v0.16.0. 'future.moves.dbm.ndbm', 'gensim.models.lda_worker', 'gensim.models.lda_dispatcher', 'gensim.models.lsi_dispatcher', 'gensim.models.lsi_worker', 'gensim.similarities.index', 'kernel.*', # skynet/kernel 'kombu.transport.sqlalchemy.*', 'IPython.*', 'ipykernel.pylab.backend_inline', 'lxml.cssselect', 'lxml.html.ElementSoup', 'lxml.html.diff', 'lxml.html._diffcommand', 'lxml.html._html5builder', 'lxml.html.html5parser', 'lxml.html.soupparser', 'lxml.html.usedoctest', 'lxml.isoschematron.__init__', 'lxml.usedoctest', 'py._code._assertionnew', 'py._code._assertionold', 'rbtools.clients.tests', 'requests.__init__', 'requests.packages.chardet.chardetect', 'requests.packages.urllib3.contrib.ntlmpool', 'setuptools.*', '_pytest.*', '__tests__.*', # all test modules get imported when tests are run "yt.packages.*", "yt.wrapper.cypress_fuse", "tornado.platform.*", "tornado.curl_httpclient", "google.protobuf.internal.cpp_message", "google.protobuf.pyext.cpp_message", "numpy.distutils.*", "numpy.core.setup", "numpy.core.cversions", "numpy.lib.__init__", "numpy.doc.*", "numpy.testing.__init__", "numpy.ma.version", "numpy.matrixlib.__init__", "numpy.testing.*", "numpy.__init__", "numpy.core.code_generators.generate_numpy_api", "numpy.doc.basics", "numpy.doc.broadcasting", "scipy.misc.__init__", # XXX: fixme "kazoo.handlers.*", "psutil._pssunos", "psutil._psbsd", "psutil._psosx", "psutil._pswindows", "psutil._psutil_common", "psutil._psutil_bsd", "psutil._psutil_osx", "psutil._psutil_sunos", "psutil._psutil_windows", "trollius.py33_winapi", "trollius.windows_events", "trollius.windows_utils", "trollius.test_utils", "trollius.test_support", "PIL.ImageCms", "PIL.ImageGL", "PIL.ImageGrab", "PIL.ImageQt", "PIL.ImageTk", "coloredlogs.cli", "dateutil.tzwin", "dateutil.tz.win", "django.contrib.*", "django.core.management.commands.*", "django.db.backends.*", "django.db.migrations.*", "django.template.backends.jinja2", "pytest_django.compat", "matplotlib.backends.*", "matplotlib.sphinxext.*", "matplotlib.testing.*", "mpl_toolkits.*", "mwlib.*", "networkx.algorithms.connectivity.__init__", "networkx.algorithms.flow.__init__", "networkx.testing.__init__", "nile.drivers.yql.*", "nile.style.jupyter_monitor", "pandas.io.*", "pandas.plotting.*", "pandas.tseries.*", "pandas.util.*", "parsel.unified", 'partd.pandas', 'partd.zmq', "ptpython.contrib.asyncssh_repl", "ptpython.ipython", "prompt_toolkit.clipboard.pyperclip", "prompt_toolkit.eventloop.asyncio_posix", "prompt_toolkit.eventloop.asyncio_win32", "prompt_toolkit.eventloop.win32", "prompt_toolkit.terminal.conemu_output", "prompt_toolkit.win32_types", "prompt_toolkit.terminal.win32_input", "prompt_toolkit.terminal.win32_output", "prompt_toolkit.input.win32", "prompt_toolkit.input.win32_pipe", "prompt_toolkit.output.conemu", "prompt_toolkit.output.win32", "prompt_toolkit.output.windows10", "backports.__init__", "pygments.sphinxext", "raven.contrib.*", "raven.handlers.logbook", "raven.utils.testutils", "semantic_version.django_fields", "sklearn.utils.*", "statsmodels.*", "skimage.io._plugins.*", "skimage.measure.mc_meta.visual_test", "subvertpy.ra_svn", # can only be imported after subvertpy.ra "superfcgi.*", "tenacity.tornadoweb", "thrift.TSCons", "thrift.TTornado", "thrift.transport.*", "twisted.*", "uwsgidecorators", "vcr.stubs.*", "watchdog.*", "werkzeug.*", "ws4py.*", 'wtforms.ext.django.*', "services.lfm.*", "sqlalchemy.testing", "gevent.win32util", "library.python.ctypes.__init__", "celery.events.cursesmon", "billiard.popen_forkserver", "billiard.forkserver", "celery.contrib.sphinx", "flask_wtf.i18n", "playhouse.apsw_ext", "botocore.vendored.requests.packages.urllib3.contrib.pyopenssl", "cssutils._fetchgae", "catboost.widget.*", "kubiki.geobase", "hypothesis.extra.*", "jsonrpc.backend.*", ] + list(no_check or []) if sys.version_info.major == 3: exceptions += [ "antigravity", "lzma", "dbm.ndbm", "tkinter", "msvcrt", "msilib.*", "_msi", "winreg", "asyncio.test_utils", "dbm.gnu", "multiprocessing.popen_spawn_win32", "encodings.mbcs", "turtle", "ctypes.wintypes", "asyncio.windows_utils", "distutils.msvc9compiler", "distutils._msvccompiler", "urllib3.packages.ordered_dict", "encodings.oem", "crypt", "asyncio.windows_events", "encodings.cp65001", "curses.*", "distutils.command.bdist_msi", "yaml.cyaml", "vh.ext.nirvana.nirvana_api_bridge", "aiohttp.pytest_plugin", ] patterns = [re.escape(s).replace(r'\*', r'.*') for s in exceptions] rx = re.compile('^({})$'.format('|'.join(patterns))) failed = [] for module in sys.extra_modules: if rx.search(module): continue if skip_func and skip_func(module): continue if module == '__main__' and 'if __name__ ==' not in importer.get_source( module): print('SKIP:', module, '''without "if __name__ == '__main__'" check''') continue try: print('TRY:', module) if module == '__main__': importer.load_module('__main__', '__main__py') elif module.endswith('.__init__'): __import__(module[:-len('.__init__')]) else: __import__(module) print('OK:', module) except Exception as e: print('FAIL:', module, e, file=sys.stderr) traceback.print_exception(*sys.exc_info()) failed.append('{}: {}'.format(module, e)) if failed: raise ImportError('modules not imported:\n' + '\n'.join(failed))
def excepthook(*args): import traceback traceback.print_exception(*args)
def xmlToCommon(path, destPath=None): """Converts an Afterbirth xml to the common format""" xml = ET.parse(path) root = xml.getroot() # can be stage, rooms, etc rooms = root.findall('room') ret = [] for roomNode in rooms: roomXmlProps = dict(roomNode.attrib) rtype = int(roomNode.get('type') or '1') del roomXmlProps['type'] rvariant = int(roomNode.get('variant') or '0') del roomXmlProps['variant'] rsubtype = int(roomNode.get('subtype') or '0') del roomXmlProps['subtype'] difficulty = int(roomNode.get('difficulty') or '0') del roomXmlProps['difficulty'] roomName = roomNode.get('name') or '' del roomXmlProps['name'] rweight = float(roomNode.get('weight') or '1') del roomXmlProps['weight'] shape = int(roomNode.get('shape') or '-1') del roomXmlProps['shape'] if shape == -1: shape = None width = int(roomNode.get('width') or '13') + 2 height = int(roomNode.get('height') or '7') + 2 dims = (width, height) for k, s in Room.Shapes.items(): if s['Dims'] == dims: shape = k break shape = shape or 1 del roomXmlProps['width'] del roomXmlProps['height'] lastTestTime = roomXmlProps.get('lastTestTime', None) if lastTestTime: try: lastTestTime = datetime.datetime.fromisoformat(lastTestTime) del roomXmlProps['lastTestTime'] except: print('Invalid test time string found', lastTestTime) traceback.print_exception(*sys.exc_info()) lastTestTime = None doors = list( map( lambda door: [ int(door.get('x')) + 1, int(door.get('y')) + 1, door.get('exists', "0")[0] in "1tTyY" ], roomNode.findall('door'))) room = Room(roomName, None, difficulty, rweight, rtype, rvariant, rsubtype, shape, doors) room.xmlProps = roomXmlProps room.lastTestTime = lastTestTime ret.append(room) realWidth = room.info.dims[0] gridLen = room.info.gridLen() for spawn in roomNode.findall('spawn'): ex, ey, stackedEnts = int(spawn.get('x')) + 1, int( spawn.get('y')) + 1, spawn.findall('entity') grindex = Room.Info.gridIndex(ex, ey, realWidth) if grindex >= gridLen: print( f'Discarding the current entity stack due to invalid position! {room.getPrefix()}: {ex-1},{ey-1}' ) continue ents = room.gridSpawns[grindex] for ent in stackedEnts: entityXmlProps = dict(ent.attrib) etype, evariant, esubtype, eweight = int(ent.get('type')), int( ent.get('variant')), int(ent.get('subtype')), float( ent.get('weight')) del entityXmlProps['type'] del entityXmlProps['variant'] del entityXmlProps['subtype'] del entityXmlProps['weight'] ents.append( Entity(ex, ey, etype, evariant, esubtype, eweight, entityXmlProps)) room.gridSpawns = room.gridSpawns fileXmlProps = dict(root.attrib) return File(ret, fileXmlProps)
def print_exec(stream): ei = sys.exc_info() traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
def main(): experiment_name = "blocks_world_experiments" experiment = "./results/" + experiment_name print("EXPERIMENT NAME: ", experiment_name) # Create the experiment folder if not os.path.exists(experiment): os.makedirs(experiment) # Define log settings log_path = experiment + '/train_baseline.log' multiprocess_logging_manager = MultiprocessingLoggerManager( file_path=log_path, logging_level=logging.INFO) master_logger = multiprocess_logging_manager.get_logger("Master") master_logger.log("----------------------------------------------------------------") master_logger.log(" STARING NEW EXPERIMENT ") master_logger.log("----------------------------------------------------------------") with open("data/blocks/config.json") as f: config = json.load(f) with open("data/shared/contextual_bandit_constants.json") as f: constants = json.load(f) print(json.dumps(config,indent=2)) setup_validator = BlocksSetupValidator() setup_validator.validate(config, constants) # log core experiment details master_logger.log("CONFIG DETAILS") for k, v in sorted(config.items()): master_logger.log(" %s --- %r" % (k, v)) master_logger.log("CONSTANTS DETAILS") for k, v in sorted(constants.items()): master_logger.log(" %s --- %r" % (k, v)) master_logger.log("START SCRIPT CONTENTS") with open(__file__) as f: for line in f.readlines(): master_logger.log(">>> " + line.strip()) master_logger.log("END SCRIPT CONTENTS") action_space = ActionSpace(config) meta_data_util = MetaDataUtil() # Create vocabulary vocab = dict() vocab_list = open("./Assets/vocab_both").readlines() for i, tk in enumerate(vocab_list): token = tk.strip().lower() vocab[token] = i vocab["$UNK$"] = len(vocab_list) config["vocab_size"] = len(vocab_list) + 1 # Number of processes num_processes = 6 try: # Create the model master_logger.log("CREATING MODEL") model_type = IncrementalModelEmnlp shared_model = model_type(config, constants) # make the shared model use share memory shared_model.share_memory() master_logger.log("MODEL CREATED") print("Created Model...") # Read the dataset all_train_data = DatasetParser.parse("trainset.json", config) num_train = int(0.8 * len(all_train_data)) train_split = all_train_data[:num_train] tune_split = list(all_train_data[num_train:]) shuffle(train_split) # shuffle the split to break ties master_logger.log("Created train dataset of size %d " % len(train_split)) master_logger.log("Created tuning/validation dataset of size %d " % len(tune_split)) processes = [] # Split the train data between processes train_split_process_chunks = [] chunk_size = int(len(train_split)/num_processes) pad = 0 for i in range(0, num_processes): chunk = train_split[pad: pad + chunk_size] pad += chunk_size train_split_process_chunks.append(chunk) simulator_file = "./simulators/blocks/retro_linux_build.x86_64" # Start the training thread(s) ports = find_k_ports(num_processes) for i, port in enumerate(ports): train_chunk = train_split_process_chunks[i] tmp_config = {k: v for k, v in config.items()} tmp_config["port"] = port if i == num_processes - 1: # Master client which does testing. Don't want each client to do testing. tmp_tune_split = tune_split else: tmp_tune_split = [] print("Client " + str(i) + " getting a validation set of size ", len(tmp_tune_split)) server = BlocksServer(tmp_config, action_space, vocab=vocab) client_logger = multiprocess_logging_manager.get_logger(i) p = mp.Process(target=TmpAsynchronousContextualBandit.do_train, args=(simulator_file, shared_model, tmp_config, action_space, meta_data_util, constants, train_chunk, tmp_tune_split, experiment, experiment_name, i, server, client_logger, model_type)) p.daemon = False p.start() processes.append(p) for p in processes: p.join() except Exception: exc_info = sys.exc_info() traceback.print_exception(*exc_info)
def main(args): outcome = {'result': 'failure'} try: stunnel_exists = is_stunnel_installed() if not stunnel_exists: logging.warning( 'stunnel does not seem to exist. Did you install it?') logging.warning('Aborting.') return {'result': 'notexists'} stunnel_running = is_stunnel_running(args) if stunnel_running: logging.warning('stunnel seems to already be running.'\ 'Please check and if this is expected (e.g., stunnel '\ 'running for another application) '\ 'please consider providing a different --stunnel_conf_path.') logging.warning('In case you want to delete the current client configuration'\ 'and create a new one, please do a --cleanup first.') logging.warning('Aborting.') return {'result': 'running'} if not os.path.isdir(args.stunnel_conf_path): os.mkdir(args.stunnel_conf_path) # 1. create client cl = DsfmClient(args.apikey, args.instance_crn, args.stunnel_conf_path, args.dsfm_setup_url) # 2. try to login to bluemix and receive an iam token logging.info( 'Trying to retrieve a login token based on the provided apikey.') cl.login() if not cl.logged_in: return outcome # 3. create local ssl key ssl_cnf = SSLClientConf(args.email, args.ssl_subject, args.domain, cli_dir=args.stunnel_conf_path) logging.info('Creating local ssl keys.') ret = cl.create_ssl_key(ssl_cnf) if not ret: logging.warning('Failed to create ssl keys.') return outcome logging.info( 'Asking for authentication and authorization against {}.'.format( cl.dsfm_setup_url)) logging.info('This might take a while.') # 4. authenticate with service and get the key certified ret = cl.dsfm_authenticate_setup(ssl_cnf) if not ret: logging.warning('Failed to authenticate with dsfm service.') return outcome logging.info('Authorization success. '\ 'Setting up end-to-end ssl encryption using stunnel.') # 5. setup and launch stunnel, verify that it works ret = cl.configure_stunnel(ssl_cnf) if not ret: logging.warning('Failed to configure stunnel.') return outcome # 6. test that connection works, do a simple memcache get stats logging.info( 'Setup complete, checking memcache protocol with a non-mutable stats command.' ) ret, out, err = call('echo stats | nc localhost 11211 -4') logging.debug('memcache server response stdout={0} stderr={1}'.format( out, err)) if ret != 0: logging.warning('Failed to run memcache command.') return outcome except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return outcome return {'result': 'success', 'client-conf': cl.__dict__}