def server(self, server_id): ''' get server associated with an id, create one if none exists ''' try: if server_id in self.server_dict: server_info = self.server_dict[server_id] return server_info['proxy'] else: url_fmt = "tcp://127.0.0.1:%i" rep_url = url_fmt % get_unused_ip_port() pub_url = url_fmt % get_unused_ip_port() out_url = url_fmt % get_unused_ip_port() DEBUG("%s \n\t RPC on %s \n\t pub on %s \n\t out on %s" % (server_id, rep_url, pub_url, out_url)) server = ZMQServer.spawn_server(self.classpath, rep_url, pub_url, out_url) proxy = ZMQ_RPC(rep_url) self.server_dict[server_id] = { 'server': server, 'proxy': proxy, 'rep_url': rep_url, 'pub_url': pub_url, 'out_url': out_url } return proxy except Exception as err: print 'Error getting server', server_id print str(err.__class__.__name__), ":", err exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback) traceback.print_tb(exc_traceback, limit=30) return None
def __call__(self, *args, **kwargs): argtypes = self._argtypes_ if self.callable is not None: if len(args) == len(argtypes): pass elif self._flags_ & _rawffi.FUNCFLAG_CDECL: if len(args) < len(argtypes): plural = len(argtypes) > 1 and "s" or "" raise TypeError( "This function takes at least %d argument%s (%s given)" % (len(argtypes), plural, len(args))) else: # For cdecl functions, we allow more actual arguments # than the length of the argtypes tuple. args = args[:len(self._argtypes_)] else: plural = len(self._argtypes_) > 1 and "s" or "" raise TypeError( "This function takes %d argument%s (%s given)" % (len(self._argtypes_), plural, len(args))) try: newargs = self._convert_args_for_callback(argtypes, args) except (UnicodeError, TypeError, ValueError), e: raise ArgumentError(str(e)) try: res = self.callable(*newargs) except: exc_info = sys.exc_info() traceback.print_tb(exc_info[2], file=sys.stderr) print >>sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1]) return 0 if self._restype_ is not None: return res return
def _run(self, lines = None): line = "" while lines is None or len(lines) > 0: try: for function in self.pre_hook: function() if lines is not None and len(lines) > 0: line = lines.pop(0) else: line = raw_input("%s> " % self.prompt) except EOFError: print ## line break (there probably was none after the prompt) break except KeyboardInterrupt: print ## only clear the current command continue try: self.parse_and_execute(line) for function in self.post_hook: function() except Exception: exctype, value = sys.exc_info()[:2] if exctype == exceptions.SystemExit: raise exctype, value print "%s: %s" % (exctype, value) if self.env.get("print_backtrace", "") != "": traceback.print_tb(sys.exc_info()[2])
def __catch_errors(self, *args, ord_by=None): try: assert (self.__desc_asc == 'desc' or self.__desc_asc == 'asc'), \ 'Parameter desc_asc should be desc or asc but %s given' % self.__desc_asc assert type(self.__search_text) is str, \ 'Parameter search_text should be string but %s given' % type(self.__search_text) assert type(args[0]) is dict, \ 'Args should be dictionaries with class of model but %s inspected' % type(args[0]) assert type(self.__pagination) is bool, \ 'Parameter pagination should be boolean but %s given' % type(self.__pagination) assert (type(self.__page), type(self.__items_per_page) is int) and self.__page >= 0, \ 'Parameter page is not integer, or page < 1 .' assert (getattr(args[0]['class'], str(ord_by), False) is not False) or \ (type(ord_by) is int) or type( ord_by is (list or tuple)), \ 'Bad value for parameter "order_by".' \ 'You requested attribute which is not in class %s or give bad kwarg type.' \ 'Can be string, list or tuple %s given' % \ (args[0]['class'], type(ord_by)) assert type(self.__return_objects) is bool, \ 'Parameter "return_objects" must be boolean but %s given' % type(self.__return_objects) except AssertionError as e: _, _, tb = sys.exc_info() traceback.print_tb(tb) tb_info = traceback.extract_tb(tb) filename_, line_, func_, text_ = tb_info[-1] message = 'An error occurred on File "{file}" line {line}\n {assert_message}'.format( line=line_, assert_message=e.args, file=filename_) raise errors.BadDataProvided({'message': message})
def transition_update(self, new_status): # print "Transition Update of app ", self.id, " to ",new_status try: transform = self.getTransform() if self.tasks_id.startswith("00"): # Master job if new_status == "new": # something went wrong with submission for sj in self._getParent().subjobs: sj.application.transition_update(new_status) if transform: stripProxy(transform).setMasterJobStatus( self._getParent(), new_status) else: if transform: stripProxy(transform).setAppStatus(self, new_status) except Exception as x: import traceback import sys logger.error( "Exception in call to transform[%s].setAppStatus(%i, %s)", self.tasks_id, self.id, new_status) logger.error( getName(x) + " : " + x) tb = sys.exc_info()[2] if tb: traceback.print_tb(tb) else: logger.error("No Traceback available") logger.error("%s", x)
def show_command_error(self, e): '''display a command error''' if isinstance(e, CommandError): (etype, evalue, etraceback) = e.exception_info inner_exception = e.inner_exception message = e.message force_traceback = False else: (etype, evalue, etraceback) = sys.exc_info() inner_exception = e message = "uncaught exception" force_traceback = True if isinstance(inner_exception, LdbError): (ldb_ecode, ldb_emsg) = inner_exception self.errf.write("ERROR(ldb): {0!s} - {1!s}\n".format(message, ldb_emsg)) elif isinstance(inner_exception, AssertionError): self.errf.write("ERROR(assert): {0!s}\n".format(message)) force_traceback = True elif isinstance(inner_exception, RuntimeError): self.errf.write("ERROR(runtime): {0!s} - {1!s}\n".format(message, evalue)) elif type(inner_exception) is Exception: self.errf.write("ERROR(exception): {0!s} - {1!s}\n".format(message, evalue)) force_traceback = True elif inner_exception is None: self.errf.write("ERROR: {0!s}\n".format((message))) else: self.errf.write("ERROR({0!s}): {1!s} - {2!s}\n".format(str(etype), message, evalue)) force_traceback = True if force_traceback or samba.get_debug_level() >= 3: traceback.print_tb(etraceback)
def readCamera(self): cap = cv2.VideoCapture(0) frame = None success = False if not cap.isOpened(): print("Failed to open camera!") return while True: try: success, frame = cap.read() if not success: print("cap.read() failed") yield trollius.From(trollius.sleep(1.0/self.fps)) continue self.broadcast(frame) if self.hasFrame: self.hasFrame(frame) except KeyboardInterrupt: self.loop.stop() except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) yield trollius.From(trollius.sleep(1.0/self.fps)) cap.release()
def ructf_error(status=110, message=None, error=None, exception=None, request=None, reply=None, body=None): if message: sys.stdout.write(message) sys.stdout.write("\n") sys.stderr.write("{}\n".format(status)) if error: sys.stderr.write(error) sys.stderr.write("\n") if request or reply: sys.stderr.write(make_err_message(message, request, reply)) sys.stderr.write("\n") if body: sys.stderr.write("BODY:\n") sys.stderr.write(body) sys.stderr.write("\n") if exception: sys.stderr.write("Exception: {}\n".format(exception)) traceback.print_tb(exception.__traceback__, file=sys.stderr) sys.stderr.flush() sys.exit(status)
def check(text, fdo): import traceback started = False in_part = False in_chapter = False in_section = False in_subsection = False line_number = 0 fdo.write(u"""%s\n<node TEXT="Readings">\n""" %MINDMAP_PREAMBLE) for line in text.split('\n'): line = line.strip() try: started, in_part, in_chapter, in_section, in_subsection = parse( line, started, in_part, in_chapter, in_section, in_subsection) except KeyError: print traceback.print_tb(sys.exc_traceback), '\n', line_number, line sys.exit() line_number += 1 if in_subsection: fdo.write(u"""</node>""") # close the last section if in_section: fdo.write(u"""</node>""") # close the last section if in_chapter: fdo.write(u"""</node>""") # close the last chapter if in_part: fdo.write(u"""</node>""") # close the last part fdo.write(u"""</node>\n</node>\n</node>\n""") # close the last entry fdo.write(u"""</node>\n</map>\n""") # close the document
def excepthook(exception_type, exception_value, tb): """ Default exception handler """ import traceback if debug: traceback.print_tb(tb) showerror('Fatal Error','%s: %s' % (exception_type.__name__, exception_value)) sys.exit(1)
def excepthook(exception_type, exception_value, traceback_obj): """Global function to catch unhandled exceptions.""" separator = '-' * 80 notice = \ """An unhandled exception occurred. Please report this error using\n"""\ """GitHub Issues <https://github.com/gimu/hitagi-reader/issues>."""\ """\n\nException saved in error.log"""\ """\n\nError information:\n""" time_string = time.strftime("%Y-%m-%d, %H:%M:%S") tbinfofile = io.StringIO() traceback.print_tb(traceback_obj, None, tbinfofile) tbinfofile.seek(0) tbinfo = tbinfofile.read() # Create error message error_msg = '%s: \n%s' % (str(exception_type), str(exception_value)) sections = [separator, time_string, separator, error_msg, separator, tbinfo] # Combine and write to file msg = '\n'.join(sections) try: f = open('error.log', 'w') f.write(msg) f.close() except IOError: pass # GUI message error_box = QMessageBox() error_box.setWindowTitle('Error occured') error_box.setText(str(notice) + str(msg)) error_box.exec_()
def run(self, edit): try: configuration = PluginUtils.get_project_configuration(self.view.window(), self.view.file_name()) base_path = configuration.get("base-path") test_path = configuration.get("test-path") if not base_path or not test_path: sublime.message_dialog("This project has not been configured for Jasmine boilerplate generation.\n\nRight click on the base and test folders to configure.") return output_files = AngularJasmineBoilerplateCommand.run_command(self, base_path, test_path) if output_files[0].find(OUTPUT_ALREADY_EXISTS) != -1: # TODO: Update when we support multiple files if sublime.ok_cancel_dialog("Boilerplate file " + output_files[0], "Overwrite"): output_files = AngularJasmineBoilerplateCommand.run_command(self, base_path, test_path, True) else: return for file in output_files: if file: self.view.window().open_file(test_path + "/" + file) except: print("Unexpected error({0}): {1}".format(sys.exc_info()[0], sys.exc_info()[1])) traceback.print_tb(sys.exc_info()[2]) sublime.message_dialog("Unable to generate Jasmine boilerplate.\n\nEnsure that the AngularJS service or controller is annotated correctly.")
def OnSaveButtonClick(self, event): if self.btn_apply.Enabled: if not self.main_frame.YesNoMessageDialog(_(u'Para que as modificações sejam salvas é preciso aplicá-las. Continua?'), 'E-Dictor'): return else: self.OnApplyButtonClick(None) wildcard = _(u"Todos arquivos (*.*)|*.*") wildcard = _(u"Arquivos de preferências (*.cfg)|*.cfg;*.CFG|") + wildcard ext = '.cfg' dir = os.getcwd() file_name = '*' + ext dlg = wx.FileDialog( self, message=_(u"Salvar preferências em arquivo"), defaultDir=dir, defaultFile=file_name, wildcard=wildcard, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT ) ret = False if dlg.ShowModal() == wx.ID_OK: try: path = dlg.GetPath() if not path.lower().endswith(ext): path += ext __builtin__.log(u'Log: [op:SavePref] [params:CFG, ' + path + ']\n') cfg_file = codecs.open(path.encode('utf-8'),'w','utf-8') __builtin__.cfg.set(u'File Settings',u'Recent files', "") __builtin__.cfg.write(cfg_file) ret = True except: msg = str(sys.exc_info()[0]) + ':' + str(sys.exc_info()[1]) + '\n' + str(sys.exc_info()[2]) __builtin__.log(str(sys.exc_info()[0]) + ':' + str(sys.exc_info()[1]) + '\n') traceback.print_tb(sys.exc_info()[2], None, open(__builtin__.log_file.name, "a")) wx.MessageBox(_(u"Não foi possível salvar o arquivo."),"E-Dictor") dlg.Destroy() return ret
def main(solver=config.user_config.solver): logging.basicConfig(level=logging.CRITICAL, format='%(levelname)s: %(message)s') from minpower import solve dirNm=splitFilename(__file__)[0] if dirNm=='': dirNm='.' excludeL=[] for fileNm in os.listdir(dirNm): if fileNm in excludeL: continue testDir = joindir(dirNm, fileNm) if not os.path.isdir(testDir): continue print 'testing: ',fileNm wipeTestSlate(testDir) fResults=open(joindir(testDir,'results.txt'),'w+') fError=open(joindir(testDir,'error.txt'),'w+') sys.stdout=fResults #switch output to results file if hasPyscript(testDir): sys.stdout = sys.__stdout__ #switch back to standard outputting os.system('python {s}'.format(s=hasPyscript(testDir)[0])) else: try: user_config.scenarios = 2 solve.solve_problem(testDir) sys.stdout = sys.__stdout__ #switch back to standard outputting fError.close() os.remove(joindir(testDir,'error.txt')) except: #write the error to file exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, file=fError ) traceback.print_exception(exc_type, exc_value, exc_traceback, file=fError) sys.stdout = sys.__stdout__ #switch back to standard outputting print '\t had error' #note that this dir produced error else: sys.stdout = sys.__stdout__ #switch back to standard outputting
def changed_listConditions(self, sel): o = self.builder.get_object p = self.builder_panes.get_object m, i = sel.get_selected() t = p('txtCondHint').get_buffer() self.plugin_cond_changed = True if i is not None: item = m[i][0] item_plugin = all_plugins[item] t.set_text(item_plugin.desc_string_gui()) self.plugin_cond = item_plugin self.plugin_cond.set_forward_button(o('btnForward')) if item_plugin.stock: self.pane_CondDef = item_plugin.get_pane() self.enable_next = True else: try: self.pane_CondDef = item_plugin.get_pane() self.enable_next = True except Exception: if PLUGIN_TEMP_FOLDER: _x_info = sys.exc_info() sys.stderr.write( "%s: %s\n" % (_x_info[0].__name__, _x_info[1])) traceback.print_tb(_x_info[2]) self.pane_CondDef = None self.enable_next = False self.pane_CondDef_changed = True self.refresh_buttons() else: t.set_text('') self.plugin_cond = None self.pane_CondDef = None self.enable_next = False self.refresh_buttons()
def do_import(self,args): self.ClearPages() result=[] fPath=args if os.path.isdir(fPath): os.path.walk(fPath,self.SearchQTI1,result) else: head,tail=os.path.split(fPath) self.SearchQTI1(result,head,[tail]) if len(result)==0: print "No QTI v1 files found in %s"%fPath else: for fName,fPath,doc in result: print "Processing: %s"%fPath try: results=doc.MigrateV2(self.cp) for doc,metadata,log in results: if isinstance(doc.root,qti2.QTIAssessmentItem): print "AssessmentItem: %s"%doc.root.identifier else: print "<%s>"%doc.root.xmlname for line in log: print "\t%s"%line except: type,value,tb=sys.exc_info() print "Unexpected error: %s (%s)"%(type,value) traceback.print_tb(tb)
def collect_statistics(): global PerformanceCounters global CounterLock completed = 0 try: RootLock.acquire() for mesg in RootProcess.recvmesgs(): src, tstamp, tup = mesg event_type, count = tup CounterLock.acquire() if PerformanceCounters.get(src) is not None: if PerformanceCounters[src].get(event_type) is None: PerformanceCounters[src][event_type] = count else: PerformanceCounters[src][event_type] += count if event_type == 'totaltime': completed += 1 if TotalUnits != None and completed == TotalUnits: raise KeyboardInterrupt() else: Log.debug("Unknown proc: " + str(src)) CounterLock.release() except KeyboardInterrupt: pass except Exception as e: err_info = sys.exc_info() Log.debug("Caught unexpected global exception: %r", e) traceback.print_tb(err_info[2])
def render_single_annotation(annotation_path): svg_file = annotation_path.replace('.yaml', '.svg') png_file = annotation_path.replace('.yaml', '.png') log = dict() with open(annotation_path, 'r') as f: log = yaml.load(f) try: gv = render_pipeline([log]) dot = subprocess.Popen(['dot', '-Tsvg'], stdin = subprocess.PIPE, stdout = subprocess.PIPE) dot.stdin.write(gv) dot.stdin.close() svg = dot.stdout.read() with open(svg_file, 'w') as f: f.write(svg) dot = subprocess.Popen(['dot', '-Tpng'], stdin = subprocess.PIPE, stdout = subprocess.PIPE) dot.stdin.write(gv) dot.stdin.close() png = dot.stdout.read() with open(png_file, 'w') as f: f.write(png) except: print(sys.exc_info()) import traceback traceback.print_tb(sys.exc_info()[2]) pass
def showTraceback( self ): import traceback type, value = sys.exc_info()[:2] print "________________________\n" print "Exception", type, ":", value traceback.print_tb( sys.exc_info()[2] ) print "________________________\n"
def parseText(self,article_text): try: #link = simpleWiki.getMediaWikiFirstLink(article_text) link = simpleWiki.getNthLink(article_text,2) writeLink(self.title,link,self.linksFile) except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stderr) ''' article_text = removeBalanced(article_text,'{{','}}') #article_text = removeBalanced(article_text,'(',')') # article_text = re.sub(r'\{\{[\s\S]*?\}\}','',article_text) article_text = re.sub(r'\[\[([Ii]mage|[Ff]ile)[\s\S]*?\]\]\n','',article_text) # remove image links # article_text = re.sub(r'\([\s\S]*?\)','',article_text) # remove paretheses article_text = re.sub(r'<\!--[\s\S]*?-->','',article_text) # remove html remarks article_text = re.sub(r'<!--[\s\S]*?-->','',article_text) # remove html remarks article_text = re.sub(r'\:\'\'.*?\'\'','',article_text) # remove wiki italics article_text = re.sub(r'<ref[\s\S]*?</ref>','',article_text) # revmoe refs article_text = re.sub(r'\(from \[\[[\s\S]*?\)','',article_text) article_text = re.sub(r'\[\[wikt\:[\s\S]*?\]\]','',article_text) # wikitionary links if verbose: print article_text firstlink = getFirstLink(article_text) writeLink(self.title,firstlink,self.linksFile) ''' return True
def sendResponse(self, response): responses = [] if hasattr(response, '__iter__'): for r in response: if r is None or r.Response is None or r.Response == '': continue responses.append(r) elif response is not None and response.Response is not None and response.Response != '': responses.append(response) for response in responses: try: response = self.postProcess(response) if response.Type == ResponseType.Say: self.bot.msg(response.Target.encode('utf-8'), response.Response.encode('utf-8')) elif response.Type == ResponseType.Do: self.bot.describe(response.Target.encode('utf-8'), response.Response.encode('utf-8')) elif response.Type == ResponseType.Notice: self.bot.notice(response.Target.encode('utf-8'), response.Response.encode('utf-8')) elif response.Type == ResponseType.Raw: self.bot.sendLine(response.Response.encode('utf-8')) except Exception: # ^ dirty, but I don't want any commands to kill the bot, especially if I'm working on it live print "Python Execution Error sending responses '{0}': {1}".format(responses, str(sys.exc_info())) traceback.print_tb(sys.exc_info()[2])
def handle_indexing(app, mysql): with app.app_context(): while(True): try: g.conn = mysql.connect() g.cursor = g.conn.cursor() g.conn.begin() # run indexing servie every 300 seconds time.sleep(300) sqlpie.Indexer().index_documents() g.conn.commit() except Exception as e: if sqlpie.Util.is_debug(): traceback.print_tb(sys.exc_info()[2]) try: g.conn.rollback() except: pass finally: # if the MySQL Server is not running, this will fail. try: g.cursor.close() g.conn.close() except: pass
def printException(): print '=' * 50 print 'Exception:', sys.exc_info()[1] print "getcwd()=%s;curdir=%s" % (os.getcwd(), os.curdir) print 'Traceback:' traceback.print_tb(sys.exc_info()[2]) print '=' * 50
def handle_error(self, wrapper, exception, traceback_): print >> sys.stderr, "exception %s in wrapper %s" % (exception, wrapper) self.num_errors += 1 if 0: # verbose? import traceback traceback.print_tb(traceback_) return True
def printExceptionDetailsToStdErr(): """ No idea if all of this is needed, infact I know it is not. But for now why not. Taken straight from the python manual on Exceptions. """ import sys, traceback exc_type, exc_value, exc_traceback = sys.exc_info() print2err("*** print_tb:") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print2err("*** print_exception:") traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) print2err("*** print_exc:") traceback.print_exc() print2err("*** format_exc, first and last line:") formatted_lines = traceback.format_exc().splitlines() print2err(str(formatted_lines[0])) print2err((formatted_lines[-1])) print2err("*** format_exception:") print2err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback))) print2err("*** extract_tb:") print2err(repr(traceback.extract_tb(exc_traceback))) print2err("*** format_tb:") print2err(repr(traceback.format_tb(exc_traceback))) print2err("*** tb_lineno:" + str(exc_traceback.tb_lineno))
def excepthook(excType, excValue, tracebackobj): """ stolen from ERIC IDE! Global function to catch unhandled exceptions. @param excType exception type @param excValue exception value @param tracebackobj traceback object """ separator = '-' * 80 logFile = "error.log" notice = \ """An unhandled exception occurred. Please report the problem\n"""\ """via email to <%s>.\n"""\ """A log has been written to "%s".\n\nError information:\n""" % \ ("*****@*****.**", os.getcwd()) versionInfo="OptiSim Version:\t" + getattr(ui.mainwindow, "__version__") timeString = time.strftime("%Y-%m-%d, %H:%M:%S") tbinfofile = io.StringIO() traceback.print_tb(tracebackobj, None, tbinfofile) tbinfofile.seek(0) tbinfo = tbinfofile.read() errmsg = '%s: \n%s' % (str(excType), str(excValue)) sections = [separator, timeString, versionInfo, separator, errmsg, separator, tbinfo] msg = '\n'.join(sections) try: f = open(logFile, "w") f.write(msg) f.close() except IOError: pass errorbox = QMessageBox() errorbox.setText(str(notice)+str(msg)) errorbox.exec_()
def report_all(hs, qcx2_res, SV=True, **kwargs): allres = init_allres(hs, qcx2_res, SV=SV, **kwargs) #if not 'kwargs' in vars(): #kwargs = dict(rankres=True, stem=False, matrix=False, pdf=False, #hist=False, oxford=False, ttbttf=False, problems=False, #gtmatches=False) try: dump_all(allres, **kwargs) except Exception as ex: import sys import traceback print('\n\n-----------------') print('report_all(hs, qcx2_res, SV=%r, **kwargs=%r' % (SV, kwargs)) print('Caught Error in rr2.dump_all') print(repr(ex)) exc_type, exc_value, exc_traceback = sys.exc_info() print("*** print_tb:") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print("*** print_exception:") traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) print('Caught Error in rr2.dump_all') print('-----------------\n') raise return allres, ex return allres
def main(self, options=["--disablewallet", "--testnet", "--disableexchangerates"]): parser = argparse.ArgumentParser( description="OpenBazaar Test Framework", usage="python3 test_framework.py [options]" ) parser.add_argument('-b', '--binary', required=True, help="the openbazaar-go binary") parser.add_argument('-d', '--bitcoind', help="the bitcoind binary") parser.add_argument('-t', '--tempdir', action='store_true', help="temp directory to store the data folders", default="/tmp/") args = parser.parse_args(sys.argv[1:]) self.binary = args.binary self.temp_dir = args.tempdir self.bitcoind = args.bitcoind self.options = options failure = False try: self.setup_network() self.run_test() except TestFailure as e: print(repr(e)) failure = True except Exception as e: print("Unexpected exception caught during testing: " + repr(e)) traceback.print_tb(sys.exc_info()[2]) failure = True if not failure: self.teardown(True) else: self.teardown(True) if failure: sys.exit(1)
def fail_info(self,resultproxy): try: self.result(resultproxy) except: t,v,tb = sys.exc_info() traceback.print_tb(tb) return (t,v)
def run(self): try: self._cmdline.this_module_name = self.__class__.__module__ if multiprocessing.get_start_method() == 'spawn': common.set_global_options(self._cmdline) common.sysinit() signal.signal(signal.SIGTERM, self._sighandler) self.id = self._channel(self._dp_name, self.__class__) common.set_current_process(self.id) pattern.initialize(self.id) if hasattr(self._cmdline, 'clock') and \ self._cmdline.clock == 'Lamport': self._logical_clock = 0 self._log = logging.getLogger(str(self)) self._start_comm_thread() self._lock = threading.Lock() self._lock.acquire() self._wait_for_go() if not hasattr(self, '_da_run_internal'): self._log.error("Process does not have entry point!") sys.exit(1) result = self._da_run_internal() self.report_times() except Exception as e: sys.stderr.write("Unexpected error at process %s:%r"% (str(self), e)) traceback.print_tb(e.__traceback__) except KeyboardInterrupt as e: self._log.debug("Received KeyboardInterrupt, exiting") pass
def run_games(first_agent, second_agent, first_agent_turn, num_games, update_param=0, quiet=False, first_file_name="./data/first_save", second_file_name="./data/second_save", first_weights_file_name="./data/first_weights", second_weights_file_name="./data/second_weights", first_result_file_name="./data/first_results", second_result_file_name="./data/second_results", first_m_result_file_name="./data/first_m_results", second_m_result_file_name="./data/second_m_results", play_against_self=False): """ first_agent: instance of Agent which reflects first agent second_agent: instance of Agent which reflects second agent first_agent_turn: True if turn is of the first agent num_games: total number of games to run without training num_training: total number of training games to run """ try: write_str = "num_moves,win,reward,max_q_value\n" if first_agent.is_learning_agent: first_f = open_file(first_file_name, header=write_str) first_w_deq = deque() first_f_res = open_file(first_result_file_name) first_writer_res = csv.writer(first_f_res, lineterminator='\n') first_f_m_res = open_file(first_m_result_file_name) first_writer_m_res = csv.writer(first_f_m_res, lineterminator='\n') first_f_str = "" first_writer_w_list = [] if second_agent.is_learning_agent: second_f = open_file(second_file_name, header=write_str) second_w_deq = deque() second_f_res = open_file(second_result_file_name) second_writer_res = csv.writer(second_f_res, lineterminator='\n') second_f_m_res = open_file(second_m_result_file_name) second_writer_m_res = csv.writer(second_f_m_res, lineterminator='\n') second_f_str = "" second_writer_w_list = [] # learn weights # save weights # test using weights # change agent print('starting game', 0) for i in range(num_games): if (i+1) % NOTIFY_FREQ == 0: print('Starting game', (i+1)) rules = ClassicGameRules() if first_agent.has_been_learning_agent: first_agent.start_learning() if second_agent.has_been_learning_agent: second_agent.start_learning() game = rules.new_game(first_agent, second_agent, first_agent_turn, quiet=quiet) num_moves, game_state = game.run() if first_agent.is_learning_agent: reward = first_agent.episode_rewards win = 1 if game_state.is_first_agent_win() else 0 init_state = GameState(the_player_turn=first_agent_turn) max_q_value = first_agent.compute_value_from_q_values(init_state) w_str = str(num_moves) + "," + str(win) + "," + str(reward) + "," + str(max_q_value) + "\n" first_f_str += w_str if (i+1) % WEIGHTS_SAVE_FREQ == 0: if len(first_w_deq) != 0 and len(first_w_deq) % NUM_WEIGHTS_REM == 0: first_w_deq.popleft() first_w_deq.append(np.array(first_agent.weights)) if (i+1) % WRITE_FREQ == 0: first_f.write(first_f_str) first_f_str = "" if second_agent.is_learning_agent: reward = second_agent.episode_rewards win = 1 if game_state.is_second_agent_win() else 0 init_state = GameState(the_player_turn=first_agent_turn) max_q_value = second_agent.compute_value_from_q_values(init_state) w_str = str(num_moves) + "," + str(win) + "," + str(reward) + "," + str(max_q_value) + "\n" second_f_str += w_str if (i+1) % WEIGHTS_SAVE_FREQ == 0: if len(second_w_deq) != 0 and len(second_w_deq) % NUM_WEIGHTS_REM == 0: second_w_deq.popleft() second_w_deq.append(np.array(second_agent.weights)) if (i+1) % WRITE_FREQ == 0: second_f.write(second_f_str) second_f_str = "" if (i+1) % TEST_FREQ == 0: if first_agent.is_learning_agent: first_agent.stop_learning() if second_agent.is_learning_agent: second_agent.stop_learning() result_f = [] result_s = [] print('strting', TEST_GAMES, 'tests') result_f, result_s = \ multiprocess(rules, first_agent, second_agent, first_agent_turn, quiet=True) if first_agent.has_been_learning_agent: first_writer_res.writerow(result_f[0]) first_writer_m_res.writerow(result_f[1]) if second_agent.has_been_learning_agent: second_writer_res.writerow(result_s[0]) second_writer_m_res.writerow(result_s[1]) if first_agent.has_been_learning_agent and play_against_self: if (i+1) % CHANGE_AGENT_FREQ == 0: weights = first_w_deq[-1] second_agent = QLearningAgent(weights=weights, is_learning_agent=False) if first_agent.has_been_learning_agent and update_param: first_agent.update_parameters(update_param, (i+1)) if second_agent.has_been_learning_agent and update_param: second_agent.update_parameters(update_param, (i+1)) except Exception as e: print(sys.exc_info()[0]) traceback.print_tb(e.__traceback__) finally: if first_agent.has_been_learning_agent: first_f.close() first_f_res.close() first_f_m_res.close() first_f_w = open_file(first_weights_file_name) first_writer_w = csv.writer(first_f_w, lineterminator='\n') first_writer_w.writerows(first_w_deq) first_f_w.close() if second_agent.has_been_learning_agent: second_f.close() second_f_res.close() second_f_m_res.close() second_f_w = open_file(second_weights_file_name) second_writer_w = csv.writer(second_f_w, lineterminator='\n') second_writer_w.writerows(second_w_deq) second_f_w.close()
def _print_debug(): if BENTOMAKER_DEBUG: tb = sys.exc_info()[2] traceback.print_tb(tb)
def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option( "--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave lathaands and test.* datadir on exit or error") parser.add_option( "--srcdir", dest="srcdir", default="../../src", help= "Source directory containing lathaand/lathaan-cli (default: %default%)" ) parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") (options, args) = parser.parse_args() os.environ['PATH'] = options.srcdir + ":" + os.environ['PATH'] check_json_precision() success = False nodes = [] try: print("Initializing test directory " + options.tmpdir) if not os.path.isdir(options.tmpdir): os.makedirs(options.tmpdir) initialize_chain(options.tmpdir) nodes = start_nodes(2, options.tmpdir) connect_nodes(nodes[1], 0) sync_blocks(nodes) run_test(nodes) success = True except AssertionError as e: print("Assertion failed: " + e.message) except Exception as e: print("Unexpected exception caught during testing: " + str(e)) traceback.print_tb(sys.exc_info()[2]) if not options.nocleanup: print("Cleaning up") stop_nodes(nodes) wait_lathaands() shutil.rmtree(options.tmpdir) if success: print("Tests successful") sys.exit(0) else: print("Failed") sys.exit(1)
def main(): try: func1() except Exception as e: exc_type, exc_value, exc_traceback_obj = sys.exc_info() traceback.print_tb(exc_traceback_obj)
def main(args): """ Main function of receiver_test.py. This sets up the various configuration options and start the UDP command handler. """ # Parse the command line config = parseConfig(args) ip, port = config['args'] port = int(port, 10) # Setup logging logger = logging.getLogger(__name__) logFormat = logging.Formatter('%(asctime)s [%(levelname)-8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logFormat.converter = time.gmtime logHandler = logging.StreamHandler(sys.stdout) logHandler.setFormatter(logFormat) logger.addHandler(logHandler) logger.setLevel(logging.DEBUG) # Get current MJD and MPM mjd, mpm = getTime() # Report on who we are shortRevision = __revision__.split()[1] shortDate = ' '.join(__date__.split()[1:4]) logger.info('Starting receiver_test.py with PID %i', os.getpid()) logger.info('Receiver - FRB Announcement Capture - Test Client') logger.info('Version: %s', __version__) logger.info('Revision: %s', shortRevision) logger.info('Last Changed: %s', shortDate) logger.info('Current MJD: %i', mjd) logger.info('Current MPM: %i', mpm) logger.info('All dates and times are in UTC except where noted') # Setup the configuration config = { 'MESSAGEINHOST': ip, 'MESSAGEINPORT': port, } # Setup the communications channels frbComms = Communicate(config) frbComms.start() # Setup handler for SIGTERM so that we aren't left in a funny state def HandleSignalExit(signum, frame, logger=logger, CommInstance=frbComms): logger.info('Exiting on signal %i', signum) # Shutdown receiver_test and close the communications channels tStop = time.time() logger.info('Shutting down receiver_test.py, please wait...') logger.info('Shutdown completed in %.3f seconds', time.time() - tStop) CommInstance.stop() # Exit logger.info('Finished') logging.shutdown() sys.exit(0) # Hook in the signal handler - SIGTERM signal.signal(signal.SIGTERM, HandleSignalExit) # Loop and process the MCS data packets as they come in - exit if ctrl-c is # received logger.info('Receiving line open.') while True: try: frbComms.receiveNotification() except KeyboardInterrupt: logger.info('Exiting on ctrl-c') break except Exception, e: exc_type, exc_value, exc_traceback = sys.exc_info() logger.error("receiver_test.py failed with: %s at line %i", str(e), traceback.tb_lineno(exc_traceback)) ## Grab the full traceback and save it to a string via StringIO fileObject = StringIO.StringIO() traceback.print_tb(exc_traceback, file=fileObject) tbString = fileObject.getvalue() fileObject.close() ## Print the traceback to the logger as a series of DEBUG messages for line in tbString.split('\n'): logger.debug("%s", line)
def packetProcessor(self): """ Using two deques (one inbound, one outbound), deal with bursty UDP traffic by having a separate thread for processing commands. """ exitCondition = False while True: while len(self.queueIn) > 0: try: data, dest = self.queueIn.popleft() if data is 'STOP_THREAD': exitCondition = True break # Below is a four-element tuple of: # * destination # * command name (i.e. "TRF") # * command arguments (i.e. RA, Dec, etc.) # * reference number destination, command, packed_data, reference = self.processNotification( data) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #!!! Here's where to trigger decision-making for whether to observe or not. !!! #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # The event information, packed_data, can also be packed into a struct (see processNotification). # It can be unpacked into a tuple with e.g. unpacked_data = struct.unpack('>64sffd', packed_data). # # However, this client program passes information to here as a list. The list is packed as # eventName, eventRA, eventDec, eventTimestamp, eventAdd(=event duration=eventDur). # # Event duration (eventDur) passed from the VLA will be NEGATIVE OR ZERO to signify the end of the # VLA's observation. It will be POSITIVE with units of seconds to signify the "timeout" of an # observation---that is, the receiving telescope should aim to observe for eventDur seconds unless # it receives an event with eventDur<=0. This "end event" message will have the same event # name+number as the "start event" message for that VLA pointing. # # If we've received a VLA event packet... if (packed_data is not None): # Unpack the event information eventName, eventRA, eventDec, eventTime, eventDur = packed_data # Is this a "VLA observation" event or some other event? # (note, only obs events are supported on VLA-end right now) if ("VLA_FRB_SESSION" in eventName): # Start observing or end observing? if eventDur > 0: # DECIDE WHETHER TO START OBSERVATION. self.logger.info( "Found START notice for session %s" % eventName) self.logger.debug( "Session info: %s" % ' '.join(str(val) for val in packed_data)) self.logger.info( "I will now observe RA/Dec %f %f for %f seconds." % (eventRA, eventDec, eventDur)) else: # DECIDE WHETHER TO STOP OBSERVATION. self.logger.info( "Found END notice for session %s" % eventName) self.logger.debug( "Session info: %s" % ' '.join(str(val) for val in packed_data)) self.logger.info( "I will now CEASE observation of (%f, %f)." % (eventRA, eventDec)) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!H #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! except Exception, e: exc_type, exc_value, exc_traceback = sys.exc_info() self.logger.error( "packetProcessor failed with: %s at line %i", str(e), traceback.tb_lineno(exc_traceback)) ## Grab the full traceback and save it to a string via StringIO fileObject = StringIO.StringIO() traceback.print_tb(exc_traceback, file=fileObject) tbString = fileObject.getvalue() fileObject.close() ## Print the traceback to the logger as a series of DEBUG messages for line in tbString.split('\n'): self.logger.debug("%s", line) if exitCondition: break time.sleep(0.010)
def _date_input_check(date, version): """Function to check date entered by user. Parameters ---------- date : {string or list}, Input data, where ``date`` is a single date string, two dates representing a range, or several dates that represent individual days of interest. version: {int}, An integer between 1 and 2 representing the version of GDELT used. Returns ------- self : None Returns self. :param date: :param version: """ if isinstance(date, str): if date != "": if parse(date) > datetime.datetime.now(): # pragma: no cover raise ValueError('Your date is greater than the current date. ' 'Please enter a relevant date.') elif parse(date) < parse('Feb 18 2015') and int( version) != 1: # pragma: no cover raise ValueError( "GDELT 2.0 only supports \'Feb 18 2015 - Present\' " "queries currently. Try another date.") if version == 1 and parse(date).date() == \ datetime.datetime.now().date(): raise ValueError( ("You entered today's date for a GDELT 1.0 query. GDELT 1.0's " "most recent data is always the" " trailing day (i.e. {0}). Please retry your query.").format( np.datetime64(datetime.datetime.now().date()) - np.timedelta64(1, 'D'))) # GDELT release yesterday's data 6AM today if datetime.datetime.now().hour <= 6 and parse(date).date() == ( datetime.datetime.now().date() - datetime.timedelta(days=1) ) and version == 1: # pragma: no cover raise BaseException( 'GDELT 1.0 posts the latest daily update by ' '6AM EST.' 'The next update will appear in {0}'.format( str( datetime.datetime.combine(datetime.datetime.now( ), datetime.datetime.min.time()) + datetime.timedelta(hours=6, minutes=00, seconds=00) - datetime.datetime.now()))) elif isinstance(date, list) or isinstance(date, np.ndarray): newdate = [] for l in date: if len(l) == 4: test = (str(datetime.datetime.strptime(l, '%Y'))) newdate.append(test) elif len(l) == 6: # pragma: no cover test = str(datetime.datetime.strptime(l, '%Y%m')) newdate.append(test) else: try: test = str(parse(str(l))) except: # pragma: no cover test = l newdate.append(test) if parse(test) < parse('Feb 18 2015') and version == 2: raise ValueError( "GDELT 2.0 only supports \'Feb 18 2015 - " "Present\'queries currently. Try another date.") date = newdate if len(date) == 1: try: if parse("".join(date)) > datetime.datetime.now(): raise ValueError( 'Your date is greater than the current date. ' 'Please enter a relevant date.') elif version == 2 and parse("".join(date)) < \ parse('Feb 18 2015') and int( version) != 1: # pragma: no cover raise ValueError( "GDELT 2.0 only supports \'Feb 18 2015 - " "Present\'queries currently. Try another date.") except: # exc_type, exc_value, exc_traceback = sys.exc_info() # traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) # traceback.print_exception(exc_type, exc_value, exc_traceback, # limit=2, file=sys.stdout) raise ValueError( "One or more of your input date strings does not parse to " "a date format. Check input.") # if datetime.datetime.now().hour <= 6 and parse( # "".join(date)).date() == ( # datetime.datetime.now().date() - datetime.timedelta( # days=1)): # raise BaseException('GDELT 1.0 posts the latest daily update ' # 'by 6AM EST. The next update will appear ' # 'in {0}'.format(str( # datetime.datetime.combine( # datetime.datetime.now(), datetime.datetime.min.time() # ) + datetime.timedelta(hours=6, minutes=00, seconds=00) - # datetime.datetime.now()))) # # return "".join(date) elif len(date) == \ 2 and (isinstance(date, list) or isinstance(date, np.ndarray)): try: list(map(parse, date)) except Exception as exc: # pragma: no cover exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) raise ValueError( "One or more of your input date strings does not parse to " "a date format. Check input.") if not bool(parse(date[0]) < parse(date[1])): raise ValueError( 'Start date greater than or equal to end date. ' 'Check your entered date query.') elif not np.all( np.logical_not( np.array(list(map(parse, date))) > datetime.datetime.now())): raise ValueError( "One of your dates is greater than the current date. " "Check your entered date query.") # elif np.any( # np.logical_not(np.array(list(map(parse, date) # )) > parse("Feb 18 2015"))) == \ # True and int(version) != 1: # raise ValueError( # "GDELT 2.0 only supports \'Feb 18 2015 - Present\'queries " # "currently. Try another date." # ) # elif version == 1: # # if not np.all( # # np.logical_not(np.array(list(map(lambda x: parse(x), # # date)), # # dtype='datetime64[D]') >= # # np.datetime64( # # datetime.datetime.now().date()))): # # raise ValueError( # # "You have today's date in your query for GDELT 1.0. " # # " GDELT 1.0\'s most recent data" # # "is always the trailing day (i.e. {0}). Please retry " # # "your query.".format( # # np.datetime64(datetime.datetime.now().date()) - # # np.timedelta64(1, 'D')) # # ) # if datetime.datetime.now().hour <= 6 and ( # datetime.datetime.now().date() - datetime.timedelta( # days=1)) in list( # map(lambda x: parse(x).date(), date)): # if datetime.datetime.now().hour < 6: # raise BaseException('GDELT 1.0 posts the latest daily ' # 'update by 6AM EST. The next ' # 'update will appear in {0}'.format( # str(datetime.datetime.combine( # datetime.datetime.now(), # datetime.datetime.min.time() # ) + datetime.timedelta( # hours=6, minutes=00, seconds=00) - # datetime.datetime.now()))) elif len(date) > 2: try: map(parse, date) except Exception as exc: # pragma: no cover exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) raise ValueError( "One or more of your input date strings does not parse " "to a date format. Check input.") if not np.all( np.logical_not( np.array(list(map(parse, date))) > datetime.datetime.now())): raise ValueError( "One or more of your input date strings is greater than" " today's date. Check input.") elif np.any( np.logical_not(np.array(list(map(parse, date) )) > parse("Feb 18 2015"))) == \ True and int(version) != 1: # pragma: no cover raise ValueError( "GDELT 2.0 only supports \'Feb 18 2015 - Present\'queries " "currently. Try another date.") elif version == 1: if not np.all( np.logical_not( np.array(list(map(lambda x: parse(x), date)), dtype='datetime64[D]') >= np.datetime64( datetime.datetime.now().date())) ): # pragma: no cover raise ValueError( "You have today's date in your query for GDELT 1.0. " "GDELT 1.0\'s most recent data" "is always the trailing day (i.e. {0}). Please " "retry your query.".format( np.datetime64(datetime.datetime.now().date()) - np.timedelta64(1, 'D'))) if datetime.datetime.now().hour <= 6 and ( datetime.datetime.now().date() - datetime.timedelta(days=1)) in list( map(lambda x: parse(x).date(), date)): # pragma: no cover if datetime.datetime.now().hour < 6: raise BaseException( 'GDELT 1.0 posts the latest daily ' 'update by 6AM EST.' 'The next update will appear in ' '{0}'.format( str( datetime.datetime.combine( datetime.datetime.now(), datetime.datetime.min.time()) + datetime.timedelta( hours=6, minutes=00, seconds=00) - datetime.datetime.now())))
def main(self): parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave zurcoinds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop zurcoinds after the test execution") parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"), help="Source directory containing zurcoind/zurcoin-cli (default: %default)") parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', help="The seed to use for assigning port numbers (default: current process id)") parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") self.add_options(parser) (self.options, self.args) = parser.parse_args() # backup dir variable for removal at cleanup self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed) if self.options.trace_rpc: logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) if self.options.coveragedir: enable_coverage(self.options.coveragedir) PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH'] check_json_precision() success = False try: os.makedirs(self.options.tmpdir, exist_ok=False) self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: print("JSONRPC error: "+e.error['message']) traceback.print_tb(sys.exc_info()[2]) except AssertionError as e: print("Assertion failed: " + str(e)) traceback.print_tb(sys.exc_info()[2]) except KeyError as e: print("key not found: "+ str(e)) traceback.print_tb(sys.exc_info()[2]) except Exception as e: print("Unexpected exception caught during testing: " + repr(e)) traceback.print_tb(sys.exc_info()[2]) except KeyboardInterrupt as e: print("Exiting after " + repr(e)) if not self.options.noshutdown: print("Stopping nodes") stop_nodes(self.nodes) else: print("Note: zurcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success: print("Cleaning up") shutil.rmtree(self.options.tmpdir) if not os.listdir(self.options.root): os.rmdir(self.options.root) else: print("Not cleaning up dir %s" % self.options.tmpdir) if success: print("Tests successful") sys.exit(0) else: print("Failed") sys.exit(1)
def __init__(self, tools_dir, keycfg): try: if keycfg == "": keycfg = os.path.join(tools_dir, "key_config.xml") print("INFO: --keys option not given. ") print("INFO: Continuing with default configuration file: " + keycfg) config_file = os.path.join(tools_dir, "ssd_bin.cfg") # temporary directories tdir = tempfile.mkdtemp() self.TEMP_F_DIR = tdir # temporary files self.IV_BIN_FNAME = os.path.join(tdir, "iv.bin") self.IEK_BIN_FNAME = os.path.join(tdir, "iek.bin") self.SSD_MD_TO_SIGN_FNAME = os.path.join(tdir, "to_sign.xml") with open(config_file) as conf_fp: if PY2: # noinspection PyDeprecation self.config_p.readfp(conf_fp) else: self.config_p.read_file(conf_fp) # Information in the config file is needed for generating the MD self.parse_config_file() if (self.IEK_ENC_ALGO == "RSA-2048"): self.dvc_key_fn = key_config_parser.get_rsa_pub_dvc_key(keycfg) self.dvc_key_id = key_config_parser.get_rsa_dvc_key_id_buf( keycfg) elif (self.IEK_ENC_ALGO == "AES-128"): self.dvc_key_fn = key_config_parser.get_aes_dvc_key(keycfg) self.dvc_key_id = key_config_parser.get_aes_dvc_key_id_buf( keycfg) else: print("Error: Unsupported IEK_ENC_ALGO from config") exit(1) self.oem_key_fn = key_config_parser.get_rsa_pri_oem_key(keycfg) self.oem_key_id = key_config_parser.get_rsa_oem_key_id_buf(keycfg) if (self.dvc_key_fn == '' or self.dvc_key_id == '' or self.oem_key_fn == '' or self.oem_key_id == ''): print("Error: Key config not correct") exit(1) # Remove previous temporary directory if True == os.path.exists(tdir) and True == os.path.isdir(tdir): shutil.rmtree(tdir) # Create temp directory for storing all temp files os.makedirs(tdir) # Initialization for encrypting&signing self.init_enc() self.init_sign('', '') except: print("Failed during init") exc_type, exc_value, exc_traceback = sys.exc_info() print("*** print_tb:") traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) print("*** print_exception:") traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) sys.exit(2)
logger.debug(pwd) db_params["password"] = pwd else: logger.info("No password given") db_connected = False db_connection = None while not db_connected: try: db_connection = DatabaseConnection(db_params) db_connected = True except DatabaseConnectionError as e: logger.error("Couldn't connect to database. ({}) " "Password wrong or not set?".format(e)) import traceback logger.error(traceback.print_tb(e.__traceback__)) pw = getpass.getpass("Type database password: "******"password"] = pw # FLASK SETUP app = Flask(__name__) cors = CORS(app) app.config['CORS_HEADERS'] = 'Content-Type' if not hasattr(args, "disable_ssl") or args.disable_ssl is False: logger.info("Using SSL connection.") sslify = SSLify(app) else: logger.info("Not using SSL connection.") app.debug = False
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import traceback from vPiP import * from vPiP.renderers.norwegianSpiral import renderNorwegianSpiral Vpip = vPiP.Vpip filename = "/home/pi/vPiP/testImages/HamptonCourt.jpg" #filename = "../testImages/TyneBridge.jpg" #filename = "../testImages/SydneyOpera.jpg" #filename = "../testImages/SydneyOperaNight.jpg" #filename = "../testImages/HamptonCourt.jpg" with Vpip() as p: # p.setShowDrawing(True) p.setPlotting(True) try: p.drawTo(250, 250) p.goHome() except: exc_type, exc_value, exc_traceback = sys.exc_info() print("test3 main thread exception : %s" % exc_type) traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
def run(self, chain = None, chargeType = 'bcc', chargeVal = None, guessCharge = False, multiplicity = '1', atomType = 'gaff', force = False, basename = None, debug = False, outTopol = 'all', engine = 'tleap', allhdg = False, timeTol = 36000, qprog = 'sqm', ekFlag = None, outType='mol2'): ccpnProject = self.project if chain: other = [chain] else: other = self.getHeteroMols() if not other: print("WARN: no molecules entitled for ACPYPE") return None acpypeDict = {} for chain in other: if chargeVal == None and not guessCharge: chargeVal = chain.molecule.formalCharge # pdbCode = ccpnProject.name res = chain.findFirstResidue() resName = res.ccpCode.upper() if chargeVal == None: print("Running ACPYPE for '%s : %s' and trying to guess net charge" % (resName, chain.molecule.name)) else: print("Running ACPYPE for '%s : %s' with charge '%s'" % (resName, chain.molecule.name, chargeVal)) random.seed() d = [random.choice(string.letters) for x in xrange(10)] randString = "".join(d) randString = 'test' dirTemp = '/tmp/ccpn2acpype_%s' % randString if not os.path.exists(dirTemp): os.mkdir(dirTemp) if outType == 'mol2': resTempFile = os.path.join(dirTemp, '%s.mol2' % resName) else: resTempFile = os.path.join(dirTemp, '%s.pdb' % resName) entry = ccpnProject.currentNmrEntryStore.findFirstEntry() strucGen = entry.findFirstStructureGeneration() refStructure = strucGen.structureEnsemble.sortedModels()[0] if outType == 'mol2': mol2Format = Mol2Format.Mol2Format(ccpnProject) mol2Format.writeChemComp(resTempFile, chemCompVar=chain.findFirstResidue().chemCompVar,coordSystem='pdb', minimalPrompts = True, forceNamingSystemName = 'XPLOR') else: pdbFormat = PdbFormat.PdbFormat(ccpnProject) pdbFormat.writeCoordinates(resTempFile, exportChains = [chain], structures = [refStructure], minimalPrompts = True, forceNamingSystemName = 'XPLOR') origCwd = os.getcwd() os.chdir(dirTemp) t0 = time.time() print(header) try: molecule = ACTopol(resTempFile, chargeType = chargeType, chargeVal = chargeVal, debug = debug, multiplicity = multiplicity, atomType = atomType, force = force, outTopol = outTopol, engine = engine, allhdg = allhdg, basename = basename, timeTol = timeTol, qprog = qprog, ekFlag = '''"%s"''' % ekFlag) if not molecule.acExe: molecule.printError("no 'antechamber' executable... aborting!") hint1 = "HINT1: is 'AMBERHOME' or 'ACHOME' environment variable set?" hint2 = "HINT2: is 'antechamber' in your $PATH?\n What 'which antechamber' in your terminal says?\n 'alias' doesn't work for ACPYPE." molecule.printMess(hint1) molecule.printMess(hint2) sys.exit(1) molecule.createACTopol() molecule.createMolTopol() acpypeFailed = False except: raise _exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() print("ACPYPE FAILED: %s" % exceptionValue) if debug: traceback.print_tb(exceptionTraceback, file = sys.stdout) acpypeFailed = True execTime = int(round(time.time() - t0)) if execTime == 0: msg = "less than a second" else: msg = elapsedTime(execTime) try: rmtree(molecule.tmpDir) except: raise print("Total time of execution: %s" % msg) if not acpypeFailed: acpypeDict[resName] = [x for x in dirWalk(os.path.join(dirTemp, '%s.acpype' % resName))] else: acpypeDict[resName] = [] # sys.exit(1) os.chdir(origCwd) self.acpypeDictFiles = acpypeDict
def cal_yinzi0(celuename, train_da0, s_t, e_t, res_t, train_res): ''' :param d: 计算周期因子 :param s_t: 开始周期时间 :param now_time: 当前时间 :param train_res: 【是否要传入训练结果,样本训练结果】 :return: ['end', 'max_back', 'trade_nums', 'sharp_rate', 'all_tradedays', 'profit_days', 'win_rate', 'win_mean_num', 'loss_mean_num', 'max_num', 'min_num', 'mean_num', 'std_num', ''' df_zong0 = pd.DataFrame() # 保存参数的统计因子 # 计算每个参数的统计因子 train_da0 = train_da0.copy() # 遍历所有参数,计算本期的统计量 for i, d in train_da0.groupby('canshu'): try: d.fillna(0, inplace=True) # 训练开始 df_zong0.loc[i, 'train_s'] = s_t # 训练结束 df_zong0.loc[i, 'train_e'] = e_t # 预测时间 df_zong0.loc[i, 'res_t'] = res_t df_zong0.loc[i, 'canshu'] = i df_zong0.loc[i, 'celuename'] = celuename if float(d['end'].sum()) == 0 or d['end'].sum() == np.NAN: continue df_zong0.loc[i, '本周期总收益'] = float(d['end'].sum()) df_zong0.loc[i, '最近周期收益'] = float(d.iloc[-1]['end'].sum()) df_zong0.loc[i, '最大回撤'] = d['max_back'].min() df_zong0.loc[i, '最大值'] = (d['end'].cumsum()).max() df_zong0.loc[i, '收益std'] = (d['end'].std()) df_zong0.loc[i, '偏度'] = (d['end'].skew()) df_zong0.loc[i, '峰度'] = (d['end'].kurt()) df_zong0.loc[i, '平均月收益'] = d['end'].mean() df_zong0.loc[i, '平均月最大收益'] = d['max_sum'].mean() df_zong0.loc[i, '平均月最大回撤'] = d['max_back'].mean() df_zong0.loc[i, '平均月夏普率'] = d['sharp_rate'].mean() df_zong0.loc[i, '平均月交易次数'] = d['trade_nums'].mean() df_zong0.loc[i, '月均交易天数'] = d['total_days'].mean() df_zong0.loc[i, '月均盈利天数'] = d['profit_days'].mean() df_zong0.loc[i, '月均开单收益std'] = d['std_num'].mean() df_zong0.loc[i, '月均开单最大收益'] = d['max_num'].mean() df_zong0.loc[i, '月均亏单平均亏损'] = d['loss_mean_num'].mean() df_zong0.loc[i, '月均胜单平均盈利'] = d['win_mean_num'].mean() df_zong0.loc[i, '月均胜单平均盈利偏度'] = d['win_mean_num'].skew() df_zong0.loc[i, '月均胜单平均盈利std'] = d['win_mean_num'].std() df_zong0.loc[i, '月均交易胜率'] = d['win_rate'].mean() df_zong0.loc[i, '月均交易胜率偏度'] = d['win_rate'].skew() df_zong0.loc[i, '月均交易胜率std'] = d['win_rate'].std() df_zong0.loc[i, '月均开单平均收益'] = d['mean_num'].mean() df_zong0.loc[i, '月均开单平均收益偏度'] = d['mean_num'].skew() df_zong0.loc[i, '月均开单平均收益std'] = d['mean_num'].std() df_zong0.loc[i, '回撤std'] = (d['max_back'].std() * -1) df_zong0.loc[i, '盈撤比'] = (d['max_sum'].mean() / (-1 * d['max_back'].mean()) ) if (d['max_back'].mean()) != 0 else 0 df_zong0.loc[i, '盈利因子01'] = d['max_sum'].sum() * d['end'].mean() / ( d['end'].std()) if (d['end'].std() != 0) else 0 if train_res.empty: df_zong0.loc[i, '预测周期真实收益'] = float(0) # c=1, y=1, else: # 训练结果 d1 = train_res df_zong0.loc[i, '预测周期真实收益'] = float(d1.loc[d1['canshu'] == i, 'end'].sum()) except Exception as e: exc_type, exc_value, exc_traceback_obj = sys.exc_info() traceback.print_tb(exc_traceback_obj) print('单参数统计出错', d.tail()) finally: df_zong0.fillna(0, inplace=True) # print(df_zong0.tail()) return df_zong0
def redplt(args=None): description = \ """redplt This command is to be run in the "raw_data" directory containing night-by-night directories of data for |hipercam|, ULTRACAM or ULTRASPEC. It attempts to generate plots of any runs it finds with corresponding reduce logs files inside a sub-directory reduce and then stores these inside a sub-directory "meta". The purpose of these plots is so that they can be attached to the runs logs as a quick look on past runs. The code assumes that aperture 1 contains the target while aperture 2 has the best comparison. It produces plots in which the top panel shows the target divided by the comparison, and the bottom panel shows the comparison alone to give some sense of clouds. Runs with fewer than 20 points or lasting less than 10 minutes will not be plotted. """ parser = argparse.ArgumentParser(description=description) parser.add_argument( "-f", dest="full", action="store_true", help="carry out full re-computation of plots", ) args = parser.parse_args() cwd = os.getcwd() if os.path.basename(cwd) != "raw_data": print("redplt must be run in a directory called 'raw_data'") print("redplt aborted", file=sys.stderr) return if cwd.find("ultracam") > -1: instrument = "ULTRACAM" itype = 'U' source = 'ul' cnams = ('1', '2', '3') cols = {'1': "red", '2': "green", '3': "blue"} elif cwd.find("ultraspec") > -1: instrument = "ULTRASPEC" itype = 'U' source = 'ul' cnams = ('1', ) cols = {'1': "blue"} elif cwd.find("hipercam") > -1: instrument = "HiPERCAM" itype = 'H' source = 'hl' cnams = ('1', '2', '3', '4', '5') cols = { '1': "blue", '2': "green", '3': "orange", '4': "red", '5': "mud" } else: print("cannot find either ultracam, ultraspec or hipercam in path") print("redplt aborted", file=sys.stderr) return linstrument = instrument.lower() # Now the actual work. Next are regular expressions to match run # directories, nights, and run files nre = re.compile("^\d\d\d\d-\d\d-\d\d$") lre = re.compile("^run\d\d\d\\d?.log$") # Get list of night directories nnames = [ nname for nname in os.listdir(".") if nre.match(nname) and os.path.isdir(nname) and os.path.exists(os.path.join(nname, 'reduce')) ] nnames.sort() if len(nnames) == 0: print("no night directories found", file=sys.stderr) print("redplt aborted", file=sys.stderr) return for nname in nnames: print(f"Night {nname}") # reduce and meta directories rdir = os.path.join(nname, 'reduce') mdir = os.path.join(nname, 'meta') # load all the run names that can be found in reduce runs = [run[:-4] for run in os.listdir(rdir) if lre.match(run)] runs.sort() if len(runs) == 0: print(f' No run logs found in {rdir}; skipping') continue # ensure meta directory exists os.makedirs(mdir, exist_ok=True) # Minimum number of points / minutes to bother with NMIN, TMIN = 20, 10 # Create plots, where possible. for run in runs: rlog = os.path.join(rdir, run + '.log') pname = os.path.join(mdir, run + '.png') if not args.full and os.path.exists(pname): print(f' Plot {pname} exists and will not be re-computed') continue # OK attempt a plot try: hlog = hcam.hlog.Hlog.read(rlog) # Two panels, target / comparison and comparison fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) cnams = sorted(list(hlog.keys())) apnames = hlog.apnames # use the next to work out optimal plot ranges tymin, tymax, cymin, cymax = 4 * [None] for cnam in cnams: if cnam in apnames: apnams = apnames[cnam] if '1' in apnams: targ = hlog.tseries(cnam, '1') if '2' in apnams: comp = hlog.tseries(cnam, '2') # run some checks ts = targ.t[~targ.get_mask(hcam.BAD_TIME) & ~comp.get_mask(hcam.BAD_TIME)] if len(ts) < NMIN: print( f'{run}, CCD={cnam} has too few points ({len(ts)} < {NMIN})' ) continue tmins = 1440 * (ts.max() - ts.min()) if tmins < TMIN: print( f'{run}, CCD={cnam} is too short ({tmins} < {TMIN} mins)' ) continue targ /= comp ndat = len(targ) if ndat > 3000: # stop plotting too many points to keep # size down binsize = ndat // 1500 targ.bin(binsize) comp.bin(binsize) (_d, _d), (tylo, _d), (_d, tyhi) = targ.percentile( [5, 95], bitmask=hcam.BAD_TIME) (_d, _d), (cylo, _d), (_d, cyhi) = comp.percentile( [5, 95], bitmask=hcam.BAD_TIME) if tymax is not None: off = tymax - tylo targ += off tymax += tyhi - tylo else: tymin, tymax = tylo, tyhi if cymax is not None: off = cymax - cylo comp += off cymax += cyhi - cylo else: cymin, cymax = cylo, cyhi targ.mplot(ax1, utils.rgb(cols[cnam]), ecolor='0.5', bitmask=hcam.BAD_TIME) comp.mplot(ax2, utils.rgb(cols[cnam]), ecolor='0.5', bitmask=hcam.BAD_TIME) else: # run some checks ts = targ.t[~targ.get_mask(hcam.BAD_TIME)] if len(ts) < NMIN: print( f'{run}, CCD={cnam} has too few points ({len(ts)} < {NMIN})' ) continue tmins = 1440 * (ts.max() - ts.min()) if tmins < TMIN: print( f'{run}, CCD={cnam} is too short ({tmins} < {TMIN} mins)' ) continue ndat = len(targ) if ndat > 3000: # stop plotting too many points to keep # size down binsize = ndat // 1500 targ.bin(binsize) (_d, _d), (tylo, _d), (_d, tyhi) = targ.percentile( [5, 95], bitmask=hcam.BAD_TIME) if tymax is not None: off = tymax - tylo targ += off tymax += tyhi - tylo else: tymin, tymax = tylo, tyhi targ.mplot(ax1, utils.rgb(cols[cnam]), ecolor='0.5', bitmask=hcam.BAD_TIME) if tymin is not None: yrange = tymax - tymin ax1.set_ylim(tymin - yrange / 4, tymax + yrange / 4) if cymin is not None: yrange = cymax - cymin ax2.set_ylim(cymin - yrange / 4, cymax + yrange / 4) ax1.set_ylabel('Target / Comparison') ax1.set_title(f'{nname}, {run}') ax2.set_ylabel('Comparison') ax2.set_xlabel('Time [MJD]') plt.savefig(pname) print(f'Written {pname}') plt.close() except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stderr) traceback.print_exc(file=sys.stderr) print("Problem reading log for run =", run)
' SOURCE VIDEO: ' + videoChoice), in_reply_to_status_id=tweet['id']) while True: date = datetime.now() print('Current time is ' + time_str(date)) date = round_to_next_hour(date) print('Next run is at ' + time_str(date)) dt = datetime(date.year, date.month, date.day, date.hour, date.minute, date.second, 0).timestamp() pause.until(dt) fails = 0 while fails < 3: try: run() break except Exception as e: fails += 1 print("Failed!") traceback.print_tb(e.__traceback__) pass finally: active = psutil.Process(os.getpid()) print("Theres " + str(len(active.children())) + " child processes") for child in active.children(): child.kill() if fails == 3: print('Program failed 3 times at ' + time_str(date)) #break
enable_bigint = True except Exception: pass # Request full stack traces for Python errors. gdb.execute('set python print-stack full') # Tell GDB not to ask the user about the things we tell it to do. gdb.execute('set confirm off', False) # Some print settings that make testing easier. gdb.execute('set print static-members off') gdb.execute('set print address off') gdb.execute('set print pretty off') gdb.execute('set width 0') try: # testscript is set on the GDB command line, via: # --eval-command python testscript=... execfile(testscript, globals(), locals()) except AssertionError as err: header = '\nAssertion traceback' if active_fragment: header += ' for ' + active_fragment sys.stderr.write(header + ':\n') (t, v, tb) = sys.exc_info() traceback.print_tb(tb) sys.stderr.write('\nTest assertion failed:\n') sys.stderr.write(str(err)) sys.exit(1)
def dumpException(e): print("EXCEPTION:", e) traceback.print_tb(e.__traceback__)
async def on_command_error(ctx: commands.Context, error: commands.CommandError): if ctx.kwargs.get("resolved", False): return if isinstance(error, commands.CommandInvokeError): if isinstance(error.original, discord.errors.Forbidden): await ctx.author.send(embed=bot.create_error_embed( "You ran the command `{}`, but I don't " "have permission to send " "messages in that channel!".format(ctx.command))) return print(type(error.original)) if isinstance(error, commands.BotMissingPermissions): missing = [ perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms ] if len(missing) > 2: perms_formatted = '{}, and {}'.format(", ".join(missing[:-1]), missing[-1]) else: perms_formatted = ' and '.join(missing) try: await ctx.reply( f"In order to run these commands, I need the following permission(s): {perms_formatted}" ) except discord.errors.Forbidden: await ctx.author.send(embed=bot.create_error_embed( "You ran the command `{}`, but I don't " "have permission to send " "messages in that channel!".format(ctx.command))) return if isinstance(error, commands.CommandNotFound) or isinstance( error, commands.DisabledCommand): return if isinstance(error, commands.CheckFailure): try: await ctx.send(embed=bot.create_error_embed( "You don't have permission to do that, {}.".format( ctx.message.author.mention))) return except discord.errors.Forbidden: await ctx.author.send(embed=bot.create_error_embed( "You ran the command `{}`, but I don't " "have permission to send " "messages in that channel!".format(ctx.command))) return try: embed = discord.Embed( title="MonkeyUtils experienced an error in a command.", colour=discord.Colour.red()) embed.description = format_exc()[:2000] embed.add_field(name="Command passed error", value=str(error)) if ctx.message.application is not None and ctx.message.application.get( "original_content") is not None: embed.add_field( name="Context", value=ctx.message.application.get("original_content")) else: embed.add_field(name="Context", value=ctx.message.content) print_tb(error.__traceback__) if hasattr(error, "original"): print_tb(error.original.__traceback__) guild_error_channel_id = await bot.mongo.discord_db.channels.find_one( { "guild_id": ctx.guild.id, "error_channel": True }) if guild_error_channel_id is None: guild_error_channel_id = config.error_channel_id else: guild_error_channel_id = guild_error_channel_id.get( "_id", None) error_channel = bot.get_channel(guild_error_channel_id) if error_channel is None: error_channel = bot.get_channel(config.error_channel_id) await error_channel.send(embed=embed) try: await ctx.reply(embed=embed) except discord.errors.HTTPException as http_e: if http_e.code == 400: await ctx.send(embed=bot.create_error_embed( "The original message was deleted! " "Could not reply with error. Please don't run commands " "in a channel that auto-deletes.")) await ctx.send(embed=embed) print(f"{http_e.code = }, {http_e.args = }") # bot.restart() except Exception as e: print("Error in sending error to discord. Error was {}".format( error)) print("Error sending to discord was {}".format(e))
def print_traceback(self, exc_desc, exc_tb): """Print exception description and traceback to standard error.""" import traceback sys.stderr.write("\nTraceback (most recent call last):\n") traceback.print_tb(exc_tb) sys.stderr.write("%s\n\n" % exc_desc)
def main(self): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option( "--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_option( "--srcdir", dest="srcdir", default="../../src", help= "Source directory containing animecoind/animecoin-cli (default: %default)" ) parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") self.add_options(parser) (self.options, self.args) = parser.parse_args() if self.options.trace_rpc: import logging logging.basicConfig(level=logging.DEBUG) os.environ['PATH'] = self.options.srcdir + ":" + os.environ['PATH'] check_json_precision() success = False try: if not os.path.isdir(self.options.tmpdir): os.makedirs(self.options.tmpdir) self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: print("JSONRPC error: " + e.error['message']) traceback.print_tb(sys.exc_info()[2]) except AssertionError as e: print("Assertion failed: " + e.message) traceback.print_tb(sys.exc_info()[2]) except Exception as e: print("Unexpected exception caught during testing: " + str(e)) traceback.print_tb(sys.exc_info()[2]) if not self.options.noshutdown: print("Stopping nodes") stop_nodes(self.nodes) wait_bitcoinds() else: print("Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown: print("Cleaning up") shutil.rmtree(self.options.tmpdir) if success: print("Tests successful") sys.exit(0) else: print("Failed") sys.exit(1)
def provider_handle_client(provider_lock, provider_int, conn, addr, PROVIDER_APPEAL_ONLY, STOP_ON_MAX): customer_address = 0x000000000 state = 0 with conn: conn.settimeout(1) print(shorten_addr(customer_address), "New Connection: ", addr) state = 0 # Getting Address while True: terminated = False provider_lock.acquire() try: terminated = provider_int.terminated() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if (terminated): conn.close() print(shorten_addr(customer_address), "Closing connection", addr) return try: msg = receive_dict(conn) if msg is None: break except: continue # get next message if state == 0: #print(addr, "Received: " + str(msg)) if not msg: print(addr, "ERROR: connection ended?") return if "type" in msg and msg[ "type"] == "address" and "address" in msg: customer_address = msg["address"] print(shorten_addr(customer_address), "Got Address. (" + str(addr) + ")") # Creating contract and sending it: sub_add = None provider_lock.acquire() try: sub_add = provider_int.create_subscription( customer_address) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if not sub_add: print(shorten_addr(customer_address), "No subscription address! Returning.") return print(shorten_addr(customer_address), "Sending subscription...") send_dict(conn, { "type": "subscription", "address": sub_add }) state = 1 print(shorten_addr(customer_address), "Waiting for msg") continue # Getting questions and sending answers if state == 1: active = True provider_lock.acquire() try: active = provider_int.is_subscription_active( customer_address) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if not active: print(shorten_addr(customer_address), "Subscription no longer active.") conn.close() print(shorten_addr(customer_address), "Closing connection", addr) return provider_lock.acquire() try: demanding = customer_address in provider_int.customers_demanded except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if demanding: continue # Dismiss Broken Messages if not msg: break if "type" not in msg: continue # Handle Msgs by Types elif msg["type"] == "new_question": # Decode Message question = Coder.stream_to_encoded(msg["question"]) hashes = str_to_bytes(msg["hashes"]) unanswered = msg["unanswered"] signature = str_to_bytes(msg["signature"]) print(shorten_addr(customer_address), "Got new question:", Coder.str_question(question)) # Register Ack closed = [] provider_lock.acquire() try: closed = provider_int.register_ack( customer_address, Ack(hashes, unanswered, signature)) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if closed is None: print(shorten_addr(customer_address), "Invalid ack! Ignoring.") continue for q in closed: print(shorten_addr(customer_address), "Got new answer ack:", Coder.str_question(q)) # Check for Overflow overflow = False provider_lock.acquire() try: overflow = provider_int.can_overflow(customer_address) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if overflow: print(shorten_addr(customer_address), "Max queries met! Ignoring.") continue #Register Question qa = QA(question) h = qa.get_hash() if h not in hashes: print(shorten_addr(customer_address), "Question not in hashes! Ignoring.") ret = False provider_lock.acquire() try: ret = provider_int.register_question( customer_address, question) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if not ret: print(shorten_addr(customer_address), "Invalid question! Ignoring.") elif msg["type"] == "ack": # Decode Msg hashes = str_to_bytes(msg["hashes"]) unanswered = msg["unanswered"] signature = str_to_bytes(msg["signature"]) # Register Ack closed = [] provider_lock.acquire() try: closed = provider_int.register_ack( customer_address, Ack(hashes, unanswered, signature)) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if closed is None: print(shorten_addr(customer_address), "Got useless ack...") elif len(closed) < 1: print(shorten_addr(customer_address), "Got stale ack") else: for q in closed: print(shorten_addr(customer_address), "Got new answer ack:", Coder.str_question(q)) elif msg["type"] == "send_answer": # Check if specific question needed (currently useless) question = None if "question" in msg: question = Coder.stream_to_encoded(msg["question"]) q = question if q is not None: q = Coder.str_question(q) print(shorten_addr(customer_address), "Asking for new answers, prefered question:", q) # Get all qas qas = [] provider_lock.acquire() try: qas = provider_int.get_new_answers(customer_address) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() # Send all answers questions = [ Coder.encoded_to_stream(qa.get_question()) for qa in qas ] answers = [ Coder.encoded_to_stream(qa.get_answer()) for qa in qas ] #print(shorten_addr(customer_address), "Almost sent answers:", len(answers)) if PROVIDER_APPEAL_ONLY: questions = answers = [] send_dict( conn, { "type": "answer", "questions": questions, "answers": answers }) print(shorten_addr(customer_address), "Sent answers:", len(answers)) else: print(shorten_addr(customer_address), "??? Received: " + str(msg)) print(shorten_addr(customer_address), "Ended Connection.")
def create_device(thing_group_name="AC"): logger.debug(">create_device '{}'".format(thing_group_name)) response = {} try: r = iot_cli.create_keys_and_certificate(setAsActive=True) certificate_arn = r['certificateArn'] certificate_id = r['certificateId'] certificate_pem = r['certificatePem'] private_key = r['keyPair']['PrivateKey'] logger.info("CERT ARN: {}\nCERT ID : {}\nCERT PEM:\n{}".format( certificate_arn, certificate_id, certificate_pem)) serial_number = create_random_name(4) dev_name = "{}-{}".format(PREFIX, serial_number) response['dev_name'] = dev_name logger.info("DEV NAME: {}".format(dev_name)) with open("/tmp/device_name.{}".format(dev_name), "w") as dev_file: dev_file.write("%s" % dev_name) with open("/tmp/{}.pem.cer".format(dev_name), "w") as cert_file: cert_file.write("%s" % certificate_pem) with open("/tmp/{}.pem.key".format(dev_name), "w") as key_file: key_file.write("%s" % private_key) try: r = iot_cli.create_thing_group( thingGroupName=thing_group_name, thingGroupProperties={ "thingGroupDescription": "This is group for {} devices".format(thing_group_name) }) except Exception as e: traceback.print_tb(e, limit=10, file=sys.stdout) logger.debug("Error creating thing group") r = iot_cli.create_thing( thingName=dev_name, #thingTypeName='AC', attributePayload={ "attributes": { "Location": "Brazil", "SerialNumber": serial_number } }) policy_name = "{}-{}".format(dev_name, "Policy") default_iot_policy = load_iot_policy() r = iot_cli.create_policy( policyName=policy_name, policyDocument=json.dumps(default_iot_policy)) #attach r = iot_cli.attach_thing_principal(thingName=dev_name, principal=certificate_arn) r = iot_cli.attach_policy(policyName=policy_name, target=certificate_arn) r = iot_cli.add_thing_to_thing_group( thingGroupName=thing_group_name, #thingGroupArn='string', thingName=dev_name, #thingArn='string', #overrideDynamicGroups=True|False ) r = iot_data_cli.update_thing_shadow( thingName=dev_name, payload='{"state":{"desired":{"debug":"off"}}}') r = iot_cli.describe_endpoint(endpointType='iot:Data-ATS') iot_endpoint = r["endpointAddress"] device_type = thing_group_name cfg_file = prepare_config_file(dev_name, iot_endpoint, certificate_pem, private_key, device_type) upload_file_to_s3(cfg_file) cfg_file_url = create_presigned_s3_url(cfg_file) container_data = create_container(dev_name, cfg_file_url) include_device_in_catalog(dev_name, iot_endpoint, certificate_pem, container_data) response['endpoint'] = iot_endpoint response['task_arn'] = container_data["taskArn"] response['cluster_arn'] = container_data["clusterArn"] response['config_file_url'] = cfg_file_url response['result'] = "OK" except ClientError as e: #if e.response['Error']['Code'] == 'EntityAlreadyExists': logger.error("Unexpected error", e) response['result'] = "ERROR" response['error-msg'] = e return generate_error_response(response) return generate_response(response)
def handle_appeals_provider(provider_lock, provider_int, PROVIDER_WAIT_APPEAL, PROVIDER_DROP_APPEAL): #main logic of provider while (True): #Check if provider terminated terminated = False provider_lock.acquire() try: terminated = provider_int.terminated() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if (terminated): print("[x] Closing appeals provider") return time.sleep(0.1) # Check for all appeals appeals = [] provider_lock.acquire() try: appeals = provider_int.check_for_appeals() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if PROVIDER_DROP_APPEAL: appeals = [] # Resolve every appeal if able unresolved = [] for appeal in appeals: customer_address = appeal.get_customer_address() deadline_block, question = appeal.get_end_of_service_block( ), appeal.get_question() if PROVIDER_WAIT_APPEAL and w3.eth.blockNumber < deadline_block - 2: continue provider_lock.acquire() try: resolved = provider_int.resolve_appeal(customer_address, appeal) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) print(shorten_addr(customer_address), "Couldn't resolve appeal! Is answer incorrect?") finally: provider_lock.release() if resolved: print(shorten_addr(customer_address), "Resolved appeal") else: print(shorten_addr(customer_address), "Appealed an unsent question") unresolved.append(appeal) # Set unresolved appeals to urgent provider_lock.acquire() try: resolved = provider_int.set_urgent(unresolved) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() # Check for status of demands solved = [] provider_lock.acquire() try: solved = provider_int.check_demands() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() for address in solved: print(shorten_addr(address), "Has resolved the signature demand") # Execute unresolved timed demands solved = [] provider_lock.acquire() try: solved = provider_int.exec_demands() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() for address in solved: print(shorten_addr(address), "Channel closed - demand not resolved") # Try withdrawing funds if w3.eth.blockNumber % 10 == 0: amount = 0 provider_lock.acquire() try: amount = provider_int.withdraw_all() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if amount > 0: print("Withdrew funds:", amount)
def solve_provider(provider_lock, provider_int, solver, PROVIDER_WRONG_ANSWER, PROVIDER_IMMIDIEATE_DEMAND): solved_counter = 0 while True: # Check if Provider terminated terminated = False provider_lock.acquire() try: terminated = provider_int.terminated() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if (terminated): print("[x] Closing solve provider") return time.sleep(0.1) # Get Urgent to Solve urgent = [] question = None customer_address = None provider_lock.acquire() try: urgent = provider_int.get_urgent() qa = provider_int.get_next_question() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if qa is not None: customer_address = qa.get_asker() question = qa.get_question() # Get most Urgent Question if len(urgent) > 0: closest = urgent[0].get_end_of_service_block() for appeal in urgent: add = appeal.get_customer_address() deadline_block, q = appeal.get_end_of_service_block( ), appeal.get_question() if deadline_block < closest: customer_address = add question = q closest = deadline_block # Sleep if no question to solve if question is None: time.sleep(0.5) continue # Solve most recent or urgent answer = solver(question, wrong=PROVIDER_WRONG_ANSWER) print(shorten_addr(customer_address), "Solved:", Coder.str_question(question), "->", Coder.str_answer(answer)) provider_lock.acquire() try: provider_int.register_answer(customer_address, question, answer) if (PROVIDER_IMMIDIEATE_DEMAND): provider_int.demand_signature(customer_address, question, answer) except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release()
def main(self, argsOverride=None, bitcoinConfDict=None, wallets=None): """ argsOverride: pass your own values for sys.argv in this field (or pass None) to use sys.argv bitcoinConfDict: Pass a dictionary of values you want written to bitcoin.conf. If you have a key with multiple values, pass a list of the values as the value, for example: { "debug":["net","blk","thin","lck","mempool","req","bench","evict"] } This framework provides values for the necessary fields (like regtest=1). But you can override these defaults by setting them in this dictionary. wallets: Pass a list of wallet filenames. Each wallet file will be copied into the node's directory before starting the node. """ parser = optparse.OptionParser(usage="%prog [options]") parser.add_option( "--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_option( "--srcdir", dest="srcdir", default=os.path.normpath( os.path.dirname(os.path.realpath(__file__)) + "/../../../src"), help= "Source directory containing bitcoind/bitcoin-cli (default: %default)" ) testname = "".join( filter(lambda x: x in string.ascii_lowercase, basename(argv[0]))) default_tempdir = tempfile.mkdtemp(prefix="test_" + testname + "_") parser.add_option( "--tmppfx", dest="tmppfx", default=None, help= "Directory custom prefix for data directories, if specified, overrides tmpdir" ) parser.add_option("--tmpdir", dest="tmpdir", default=default_tempdir, help="Root directory for data directories.") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option( "--portseed", dest="port_seed", default=os.getpid(), type='int', help= "The seed to use for assigning port numbers (default: current process id)" ) parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") # BU: added for tests using randomness (e.g. excessive.py) parser.add_option( "--randomseed", dest="randomseed", help= "Set RNG seed for tests that use randomness (ignored otherwise)") parser.add_option( "--no-ipv6-rpc-listen", dest="no_ipv6_rpc_listen", default=False, action="store_true", help="Switch off listening on the IPv6 ::1 localhost RPC port. " "This is meant to deal with travis which is currently not supporting IPv6 sockets." ) parser.add_option( "--electrum.exec", dest="electrumexec", help="Set a custom path to the electrum server executable", default=None) parser.add_option( "--gitlab", dest="gitlab", default=False, action="store_true", help= "Changes root directory for gitlab artifact exporting. overrides tmpdir and tmppfx" ) self.add_options(parser) (self.options, self.args) = parser.parse_args(argsOverride) if self.options.gitlab is True: basedir = os.path.normpath( os.path.dirname(os.path.realpath(__file__)) + "/../../qa_tests") if os.path.exists(basedir) == False: try: os.mkdir(path=basedir, mode=0o700) except FileExistsError as _: # ignore pass self.options.tmpdir = tempfile.mkdtemp(prefix="test_" + testname + "_", dir=basedir) UtilOptions.no_ipv6_rpc_listen = self.options.no_ipv6_rpc_listen UtilOptions.electrumexec = self.options.electrumexec # BU: initialize RNG seed based on time if no seed specified if self.options.randomseed: self.randomseed = int(self.options.randomseed) else: self.randomseed = int(time.time()) random.seed(self.randomseed) logging.info("Random seed: %s" % self.randomseed) if self.options.tmppfx is not None and self.options.gitlab is False: i = self.options.port_seed # find a short path that's easy to remember compared to mkdtemp while os.path.exists(self.options.tmppfx + os.sep + testname[0:-2] + str(i)): i += 1 self.options.tmpdir = self.options.tmppfx + os.sep + testname[ 0:-2] + str(i) if self.options.trace_rpc: logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) if self.options.coveragedir: enable_coverage(self.options.coveragedir) PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + os.path.join( self.options.srcdir, "qt") + ":" + os.environ['PATH'] self.bitcoindBin = os.path.join(self.options.srcdir, "bitcoind") check_json_precision() # By setting the environment variable BITCOIN_CONF_OVERRIDE to "key=value,key2=value2,..." you can inject bitcoin configuration into every test baseConf = os.environ.get("BITCOIN_CONF_OVERRIDE") if baseConf is None: baseConf = {} else: lines = baseConf.split(",") baseConf = {} for line in lines: (key, val) = line.split("=") baseConf[key.strip()] = val.strip() if bitcoinConfDict is None: bitcoinConfDict = {} bitcoinConfDict.update(baseConf) success = False try: try: os.makedirs(self.options.tmpdir, exist_ok=False) except FileExistsError as e: assert ( self.options.tmpdir.count(os.sep) >= 2 ) # sanity check that tmpdir is not the top level before I delete stuff for n in range( 0, 8 ): # delete the nodeN directories so their contents dont affect the new test d = self.options.tmpdir + os.sep + ("node%d" % n) try: shutil.rmtree(d) except FileNotFoundError: pass # Not pretty but, I changed the function signature # of setup_chain to allow customization of the setup. # However derived object may still use the old format if self.setup_chain.__defaults__ is None: self.setup_chain() else: self.setup_chain(bitcoinConfDict, wallets) self.setup_network() self.run_test() success = True except JSONRPCException as e: logging.error("JSONRPC error: " + e.error['message']) typ, value, tb = sys.exc_info() traceback.print_tb(tb) if self.drop_to_pdb: pdb.post_mortem(tb) except AssertionError as e: logging.error("Assertion failed: " + str(e)) typ, value, tb = sys.exc_info() traceback.print_tb(tb) if self.drop_to_pdb: pdb.post_mortem(tb) except KeyError as e: logging.error("key not found: " + str(e)) typ, value, tb = sys.exc_info() traceback.print_tb(tb) if self.drop_to_pdb: pdb.post_mortem(tb) except Exception as e: logging.error("Unexpected exception caught during testing: " + repr(e)) typ, value, tb = sys.exc_info() traceback.print_tb(tb) if self.drop_to_pdb: pdb.post_mortem(tb) except KeyboardInterrupt as e: logging.error("Exiting after " + repr(e)) if not self.options.noshutdown: logging.info("Stopping nodes") if hasattr( self, "nodes"): # nodes may not exist if there's a startup error stop_nodes(self.nodes) wait_bitcoinds() else: logging.warning( "Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success: logging.info("Cleaning up") shutil.rmtree(self.options.tmpdir) else: logging.info("Not cleaning up dir %s" % self.options.tmpdir) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. import glob filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log") MAX_LINES_TO_PRINT = 1000 for f in filenames: print("From", f, ":") from collections import deque print("".join(deque(open(f), MAX_LINES_TO_PRINT))) if success: logging.info("Tests successful") return 0 else: logging.error("Failed") return 1
def init_provider(address, host, port): provider_int = None # Get Behavior of Provider from user PROVIDER_APPEAL_ONLY = False PROVIDER_WAIT_APPEAL = False PROVIDER_DROP_APPEAL = False PROVIDER_WRONG_ANSWER = False PROVIDER_IMMIDIEATE_DEMAND = False STOP_ON_MAX = False ''' value = input("Should the provider send answers? (y/n):") if value == "y": PROVIDER_APPEAL_ONLY = False value = input("Should the provider wait for the last minute with appeals? (y/n):") if value == "y": PROVIDER_WAIT_APPEAL = True value = input("Should the provider drop appeals? (y/n):") if value == "y": PROVIDER_DROP_APPEAL = True value = input("Should the provider provide wrong answers (in appeals also)? (y/n):") if value == "y": PROVIDER_WRONG_ANSWER = True value = input("Should the provider demand a signature for every answer? (y/n):") if value == "y": PROVIDER_IMMIDIEATE_DEMAND = True value = input("Should the provider stop answering when max queries met? (y/n):") if value == "y": STOP_ON_MAX = True''' global MAX_QUERIES if not STOP_ON_MAX: MAX_QUERIES = 0 # Create all threads provider_int = ProviderInterface(address, MAX_QUERIES) provider_lock = Lock() to_join = [] x = Thread(target=handle_appeals_provider, args=(provider_lock, provider_int, PROVIDER_WAIT_APPEAL, PROVIDER_DROP_APPEAL)) x.start() to_join.append(x) x = Thread(target=solve_provider, args=(provider_lock, provider_int, Solver.solve, PROVIDER_WRONG_ANSWER, PROVIDER_IMMIDIEATE_DEMAND)) x.start() to_join.append(x) x = Thread(target=handle_input_provider, args=(provider_lock, provider_int)) x.start() to_join.append(x) # Receive connections with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((host, port)) s.listen() print("[x] Started listening: ", (host, port)) s.settimeout(1) while True: try: conn, addr = s.accept() except socket.timeout: terminated = False provider_lock.acquire() try: terminated = provider_int.terminated() except Exception as e: traceback.print_tb(e.__traceback__) print("ERROR:", e) finally: provider_lock.release() if (terminated): break else: x = Thread(target=provider_handle_client, args=(provider_lock, provider_int, conn, addr, PROVIDER_APPEAL_ONLY, STOP_ON_MAX)) x.start() to_join.append(x) for x in to_join: x.join() s.close() print("[x] Closing server")
def newTS(request, initial_ids=""): # Default local variables query_submit = False message = None annotation_info = None annotation_matches = None drugind_json = None activity_info = None activity_matches = None homolog_type_value = None allTags = Tag.allUserTagNames(request.user) compoundDbs = readSources("unichem") proteinDbs = readSources("uniprot") defaultCompoundDb = "1" defaultProteinDb = "ACC+ID" groupingCol = 0 similarityJobs = [(job.id, str(job)) for job in Job.objects.filter( user=request.user, application__category_id=5)] # Default GET request variables id_type = 'compound' ids = list() include_activity = False source_id = 1 similarity_job_id = -1 # Retrieve GET request variables if 'id_type' in request.GET: id_type = request.GET['id_type'] #see if we can identify the entity type of any initial_ids elif initial_ids != "": ii_array = initial_ids.split(",") if len(ii_array) > 0: type_guess = getEntityType(ii_array[0]) if type_guess != "unknown": id_type = type_guess if 'ids' in request.GET: if id_type == 'homolog-target': ids = request.GET.getlist('ids') else: ids = list(request.GET['ids'].split()) initial_ids = " ".join(ids) if 'include_activity' in request.GET: include_activity = True if 'tags' in request.GET: for c in [ compound.cid for compound in Compound.byTagNames( request.GET.getlist("tags"), request.user) ]: ids.append(c) if 'source_id' in request.GET: source_id = request.GET['source_id'] if 'similarity_job_id' in request.GET: similarity_job_id = int(request.GET['similarity_job_id']) # Generate content try: idMapping = {} if id_type == 'compound' and source_id != '1': idMapping = mapToChembl(ids, source_id) ids = list(idMapping.keys()) elif id_type == 'target' and source_id != 'ACC': idMapping = mapToUniprot(ids, source_id) ids = list(idMapping.keys()) elif id_type == 'homolog-target': #full_ids = ids # Full context for homolog handling #ids = [ i.split(',')[2] for i in full_ids ] # Bare Accession IDs for old target-search # Context dictionary for homolog handling # ex: # homolog_context = { # 'P29274': {'paralog': 'P30542'}, # 'P29275': {'paralog': 'P30542'}, # 'P0DMS8': {'paralog': 'P30542'}, # } homolog_context = dict() for i in ids: [relation, src_id, homolog_id] = i.split(',') if homolog_id not in homolog_context.keys(): homolog_context[homolog_id] = dict() if src_id == homolog_id: continue # Don't bother with "X is a homolog of X" if relation not in homolog_context[homolog_id].keys(): homolog_context[homolog_id][relation] = set() homolog_context[homolog_id][relation].add(src_id) ids = list(homolog_context.keys() ) # Bare Accession IDs for old target-search # Prepare homolog relation descriptions homolog_desc = dict() for homolog_id, relations in homolog_context.items(): desc_parts = list() for relation, src_ids in sorted(relations.items()): desc_parts.append("{} of {}".format( relation, ', '.join(sorted(list(src_ids))))) homolog_desc[homolog_id] = '; '.join(desc_parts) if len(ids) != 0: query_submit = True queryIdCol = { "id": "query_id", "sql": None, "table": "Query ID", "name": "Query ID", "desc": "Original compound ID prior to ChEMBL conversion", "visible": True, } headerTextCol = { "id": "header_text", "sql": None, "table": "Header Text", "name": "Header Text", "desc": "Description text to show in row-group headers (i.e. source query IDs, translations, etc.)", "visible": False, } originalQueryCol = { "id": "original_query_id", "sql": None, "table": "Original Query ID", "name": "Original Query ID", "desc": "The compound that the current query compound was originally derived from, based on similarity", "visible": True, } myAnnotationSearch = AnnotationWithDrugIndSearch(id_type, ids) annotation_info = myAnnotationSearch.table_info annotation_matches = myAnnotationSearch.get_grouped_results() drugind_json = drugIndicationData(myAnnotationSearch.drugind_objs) # Exclude ActivitySearch from search-by-target by default if id_type in ['target', 'homolog-target' ] and not include_activity: activity_info = None activity_matches = None else: myActivitySearch = ActivitySearch(id_type, ids) activity_info = myActivitySearch.table_info activity_matches = myActivitySearch.get_grouped_results() if len(idMapping) != 0: groupingCol += 1 addMappedQueryColumn(idMapping, queryIdCol, annotation_info, annotation_matches, activity_info, activity_matches) if similarity_job_id != -1: similarityMapping = readSimilarityMappingData( request.user, similarity_job_id) if len( idMapping ) != 0: # need to compose our mapping with previous mapping similarityMapping = composeMaps(idMapping, similarityMapping) #print("similarity mapping: \n"+str(similarityMapping)) if len(similarityMapping) != 0: groupingCol += 1 addMappedQueryColumn(similarityMapping, originalQueryCol, annotation_info, annotation_matches, activity_info, activity_matches) #if similarity_job_id != -1, then read job_<similarity_job_id> # map chembl id (2nd column) back to the original compound id (first column) # insert new column to show original compound id. if ts_paralog_cache(): homolog_type_value = 'paralog-cache' else: homolog_type_value = 'paralog' initial_ids = initial_ids.replace(",", " ") except Exception as e: print("exception in newTS:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) message = str(e) context = { 'query_submit': query_submit, 'message': message, 'id_type': id_type, 'annotation_info': annotation_info, 'annotation_matches': annotation_matches, 'drugind_json': json.dumps(drugind_json), 'activity_info': activity_info, 'activity_matches': activity_matches, 'tags': allTags, 'compoundDbs': compoundDbs, 'defaultCompoundDb': defaultCompoundDb, 'proteinDbs': proteinDbs, 'defaultProteinDb': defaultProteinDb, 'groupingCol': groupingCol, 'similarityJobs': similarityJobs, 'homolog_type_value': homolog_type_value, 'initial_ids': initial_ids, } return render(request, 'targetsearch/new_ts.html', context)
def attack(self): """Launch the attacks based on the preferences set by the command line""" self._init_attacks() for attack_module in self.attacks: if attack_module.do_get is False and attack_module.do_post is False: continue print('') if attack_module.require: attack_name_list = [attack.name for attack in self.attacks if attack.name in attack_module.require and (attack.do_get or attack.do_post)] if attack_module.require != attack_name_list: print(_("[!] Missing dependencies for module {0}:").format(attack_module.name)) print(" {0}".format(",".join([attack for attack in attack_module.require if attack not in attack_name_list]))) continue else: attack_module.load_require([attack for attack in self.attacks if attack.name in attack_module.require]) attack_module.log_green(_("[*] Launching module {0}"), attack_module.name) already_attacked = self.persister.count_attacked(attack_module.name) if already_attacked: attack_module.log_green( _("[*] {0} pages were previously attacked and will be skipped"), already_attacked ) generator = attack_module.attack() answer = "0" skipped = 0 while True: try: original_request_or_exception = next(generator) if isinstance(original_request_or_exception, BaseException): raise original_request_or_exception except KeyboardInterrupt as exception: print('') print(_("Attack process was interrupted. Do you want to:")) print(_("\tr) stop everything here and generate the (R)eport")) print(_("\tn) move to the (N)ext attack module (if any)")) print(_("\tq) (Q)uit without generating the report")) print(_("\tc) (C)ontinue the current attack")) while True: try: answer = input("? ").strip().lower() except UnicodeDecodeError: pass if answer not in ("r", "n", "q", "c"): print(_("Invalid choice. Valid choices are r, n, q and c.")) else: break if answer in ("r", "n"): break elif answer == "c": continue else: # if answer is q, raise KeyboardInterrupt and it will stop cleanly raise exception except (ConnectionError, Timeout, ChunkedEncodingError, ContentDecodingError): sleep(1) skipped += 1 continue except StopIteration: break except Exception as exception: # Catch every possible exceptions and print it exception_traceback = sys.exc_info()[2] print(exception.__class__.__name__, exception) print_tb(exception_traceback) if self._bug_report: traceback_file = str(uuid1()) with open(traceback_file, "w") as traceback_fd: print_tb(exception_traceback, file=traceback_fd) print("{}: {}".format(exception.__class__.__name__, exception), file=traceback_fd) print("Occurred in {} on {}".format(attack_module.name, self.target_url), file=traceback_fd) print("{}. Requests {}. OS {}".format(WAPITI_VERSION, requests.__version__, sys.platform)) try: upload_request = Request( "https://wapiti3.ovh/upload.php", file_params=[["crash_report", [traceback_file, open(traceback_file, "rb").read()]]] ) page = self.crawler.send(upload_request) print(_("Sending crash report {} ... {}").format(traceback_file, page.content)) except RequestException: print(_("Error sending crash report")) os.unlink(traceback_file) else: if original_request_or_exception and original_request_or_exception.path_id is not None: self.persister.set_attacked(original_request_or_exception.path_id, attack_module.name) if hasattr(attack_module, "finish"): attack_module.finish() if skipped: print(_("{} requests were skipped due to network issues").format(skipped)) if answer == "1": break # if self.crawler.get_uploads(): # print('') # print(_("Upload scripts found:")) # print("----------------------") # for upload_form in self.crawler.get_uploads(): # print(upload_form) if not self.output_file: if self.report_generator_type == "html": self.output_file = self.COPY_REPORT_DIR else: filename = "{}_{}".format( self.server.replace(":", "_"), strftime("%m%d%Y_%H%M", self.report_gen.scan_date) ) if self.report_generator_type == "txt": extension = ".txt" elif self.report_generator_type == "json": extension = ".json" else: extension = ".xml" self.output_file = filename + extension for payload in self.persister.get_payloads(): if payload.type == "vulnerability": self.report_gen.add_vulnerability( category=payload.category, level=payload.level, request=payload.evil_request, parameter=payload.parameter, info=payload.info ) elif payload.type == "anomaly": self.report_gen.add_anomaly( category=payload.category, level=payload.level, request=payload.evil_request, parameter=payload.parameter, info=payload.info ) elif payload.type == "additional": self.report_gen.add_additional( category=payload.category, level=payload.level, request=payload.evil_request, parameter=payload.parameter, info=payload.info ) self.report_gen.generate_report(self.output_file) print('') print(_("Report")) print("------") print(_("A report has been generated in the file {0}").format(self.output_file)) if self.report_generator_type == "html": print(_("Open {0} with a browser to see this report.").format(self.report_gen.final_path))
def panic(self, info, exception): self.errMsg("Uncaught exception encountered in %s (%s)" % (info, exception)) buffer = StringIO.StringIO() traceback.print_tb(sys.exc_traceback, 2, buffer) self.outMsg(buffer.getvalue()) buffer.close()
from __builtin__ import str import boto3 import os from os.path import basename, dirname, realpath import pytest import re import sys import traceback import json try: import aws_fpga_test_utils from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase import aws_fpga_utils except ImportError as e: traceback.print_tb(sys.exc_info()[2]) print("error: {}\nMake sure to source shared/bin/setup_test_env.sh".format( sys.exc_info()[1])) sys.exit(1) logger = aws_fpga_utils.get_logger(__name__) class TestCreateSDAccelAfi(AwsFpgaTestBase): ''' Pytest test class. NOTE: Cannot have an __init__ method. Create AFI from xclbin. '''